code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import socket
import sys
HOST = '192.168.3.11' # Symbolic name meaning all available interfaces
PORT = 3000 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
# +
try:
s.connect((HOST, PORT))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
# -
| cics socket.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Write a Python Program to Display Fibonacci Sequence Using Recursion?
# +
def give_fibonacci(first_term,second_term,number_of_terms):
for i in range(number_of_terms-2):
c = first_term + second_term
first_term = second_term
second_term = c
print(c)
first_term = int(input("Enter the first term"))
second_term = int(input("Enter the second term"))
number_of_terms = int(input("Enter the number of terms you want"))
print(first_term)
print(second_term)
give_fibonacci(first_term,second_term,number_of_terms)
# -
# # Write a Python Program to Find Factorial of Number Using Recursion?
# +
def get_factorial(num):
if num == 0:
return 1
else:
return num*get_factorial(num-1)
num = int(input("Enter the number whose factorial you want"))
print(f"The factorial of {num} is {get_factorial(num)}")
# -
# # Write a Python Program to calculate your Body Mass Index?
# +
# body mass index = mass in kg/(height in meter)^2
def get_bmi(mass,height):
return (mass/(height)**2)
mass = int(input("Enter the mass in kg"))
height = int(input("Enter the height in meter"))
print(f"The BMI for given mass {mass} kg and given height {height} meter is {get_bmi(mass,height):.6f}")
# -
# # Write a Python Program to calculate the natural logarithm of any number?
# +
import math
num = int(input("Enter the number whose natural log you want"))
print(f"The natural logarithm of {num} is {math.log(num):.5f}")
# -
# # Write a Python Program for cube sum of first n natural numbers?
# +
def cube_sum(num):
return sum([x**3 for x in range(num+1)])
num = int(input("Enter the number upto which you want cube sum"))
print(f"Cube sum upto {num} is {cube_sum(num)}")
# -
| Assignment-6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Solutions to Exercises
#
# For each exercise, the solutions below show one possible way of solving it, but you might have used a different approach, and that's great! There is almost always more than one way to solve any particular problem in Python.
#
# **Note**: Since this notebook is in the `solutions` sub-folder, the file paths are slightly different than the lessons notebooks which are in the main project folder—here we have `'../data/some_file.csv'` instead of `'data/some_file.csv'`.
#
# ## Initial Setup
# +
import pandas as pd
# %matplotlib inline
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Exercise 1.1
#
# Let's explore `'data/weather_airports_24hr_snapshot.csv'`, which contains a 24 hour snapshot of Environment Canada weather measurements at major airport stations around Canada.
#
# a) Read the CSV file into a new DataFrame `weather_all` and display the first 10 rows.
# -
weather_all = pd.read_csv('../data/weather_airports_24hr_snapshot.csv')
weather_all.head(10)
# + [markdown] slideshow={"slide_type": "slide"}
# b) How many rows and columns does `weather_all` have?
# -
weather_all.shape
# - 480 rows and 15 columns
# + [markdown] slideshow={"slide_type": "slide"}
# c) Display the names and data types of each column.
# -
weather_all.dtypes
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Exercise 1.2
#
# If you haven't already, read the file `'data/weather_airports_24hr_snapshot.csv'` into a new DataFrame `weather_all`.
#
# a) What are the warmest and coldest temperatures in this data?
# -
# Warmest temperature
weather_all['Temp (deg C)'].max()
# Coldest temperature
weather_all['Temp (deg C)'].min()
# Or we can use describe to display both min and max at once (plus some extra stuff)
weather_all['Temp (deg C)'].describe()
# + [markdown] slideshow={"slide_type": "slide"}
# b) How many unique station names are in this data? Display a list of the unique names.
# -
# Number of unique station names
weather_all['Station Name'].nunique()
# The unique station names are:
weather_all['Station Name'].unique()
# + [markdown] slideshow={"slide_type": "slide"}
# c) What is the most common weather category in the `'Conditions'` column? How many unique categories are there?
# -
# Use value_counts to display the counts for each category
weather_all['Conditions'].value_counts()
# - The most common weather category is "Mostly Cloudy"
# Number of unique categories
weather_all['Conditions'].nunique()
# + [markdown] slideshow={"slide_type": "slide"}
# d) Add a column with the wind speed in miles per hour (multiply the wind speed in km/h by 0.62137). Save the data from columns `'Station Name'`, `'Datetime (Local Standard)'`, `'Wind Spd (km/h)'`, and your new column of wind speed in miles per hour, to a CSV file.
# -
# Add the new column
weather_all['Wind Spd (mph)'] = 0.62137 * weather_all['Wind Spd (km/h)']
weather_all.head()
# +
# Specify columns to include
columns = ['Station Name',
'Datetime (Local Standard)',
'Wind Spd (km/h)',
'Wind Spd (mph)']
# Extract the subset of columns
winds = weather_all[columns]
winds.head()
# -
# Save to CSV file
# (Since this notebook is in the "solutions" sub-folder, the file path
# to the data folder is modified accordingly)
winds.to_csv('../data/winds_YVR.csv', index=False)
# + [markdown] slideshow={"slide_type": "-"}
# ##### Bonus exercises
#
# Create a variable `conditions` corresponding to the `'Conditions'` column of `weather_all`. We'll use this variable in each of the following exercises.
#
# e) What type of object is returned by `conditions.value_counts()`? Can you think of a method that could be applied to this output so that it returns only the counts for the top `n` values? How about the bottom `n` values?
# - Display only the counts for the 5 most common weather categories in `conditions`
# - Display only the counts for the 5 least common weather categories in `conditions`
# -
conditions = weather_all['Conditions']
counts = conditions.value_counts()
type(counts)
# - The output of `value_counts` is a `pandas` Series, so we can use the `head` and `tail` methods for the top `n` and bottom `n` values.
# Top 5
counts.head()
# Bottom 5
counts.tail()
# + [markdown] slideshow={"slide_type": "-"}
# f) Use `conditions.value_counts?` to check out the documentation for the `value_counts` method. Experiment with the `normalize`, `sort` and `dropna` keyword arguments. How does the output change when you change these arguments?
# +
# conditions.value_counts?
# -
# normalize=True
conditions.value_counts(normalize=True)
# - The `normalize=True` option returns the counts expressed as a fraction of the total
# sort=False
conditions.value_counts(sort=False)
# - The `sort=False` option returns unsorted counts
# dropna=False
conditions.value_counts(dropna=False)
# - The `dropna=False` option includes missing values (`NaN`) — we can see that there is one missing value in the `conditions` Series.
# + [markdown] slideshow={"slide_type": "-"}
# g) `pandas` Series have a few *accessors*, which are attributes that [act like an interface to additional methods](https://realpython.com/python-pandas-tricks/#3-take-advantage-of-accessor-methods). With a Series of text data, like `conditions`, the `str` accessor allows you to apply string methods such as `upper`, `lower`, `strip`, `replace`, etc. to all the items in the Series.
#
# - Check out some of the documentation with `conditions.str?` and `conditions.str.upper?`.
# - Create a new Series with the weather categories converted to upper case.
# - Create a new Series with any instance of the string `'Snow'` in a weather category replaced with the string `'SNOW!!!'`.
# - For both of these new Series, use `value_counts` or `unique` methods to verify that the output is what you were expecting.
# +
# Display documentation
# conditions.str?
# +
# Display documentation
# conditions.str.upper?
# -
# Weather categories converted to upper case
conditions_upper = conditions.str.upper()
conditions_upper.head()
# Check the transformed data with value_counts
conditions_upper.value_counts()
# - As expected, all the weather categories have been converted to upper case, and the counts match up with the counts for the original data in `weather_all['Conditions']`.
# Weather categories with "Snow" replaced by "SNOW!!!"
conditions_exciting = conditions.str.replace('Snow', 'SNOW!!!')
conditions_exciting.head()
# Check the transformed data with value_counts
conditions_exciting.value_counts()
# - As expected, the "Snow" category has been transformed to "SNOW!!!" and the "Snow,Blowing Snow" category has been transformed to "SNOW!!!,BlowingSNOW!!!", and the counts match up with the counts for the original data in `weather_all['Conditions']`.
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Exercise 2.1
#
# a) Create a new DataFrame which contains only the rows of `weather_all` where the station name is `'SASKATOON'`
# +
# Create a filter
saskatoon = weather_all['Station Name'] == 'SASKATOON'
# Use the filter to extract the subset to a new DataFrame weather_sk
weather_sk = weather_all[saskatoon]
weather_sk.head()
# + [markdown] slideshow={"slide_type": "slide"}
# b) Display the Saskatoon data sorted on the `'Wind Spd (km/h)'` column in descending order. What is the fastest wind speed and what are the corresponding datetime and temperature?
# -
# Sort on wind speed in descending order
weather_sk.sort_values('Wind Spd (km/h)', ascending=False)
# - The fastest wind speed is 26 km/h at local time 2018-05-21 18:00, and temperature 28.6 C.
# + [markdown] slideshow={"slide_type": "slide"}
# c) Compute the mean values for the Saskatoon data.
# -
weather_sk.mean()
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Exercise 2.2
#
# For this exercise, we'll be working with `'data/bc-popular-girls-names.csv'`:
# - 100 years of baby girl names in British Columbia (1915-2014)
# - The data includes every first name that was chosen five or more times in a given year
# - Each row corresponds to one name and one year
# - `'Count'` column: total count for that name in that year
# - `'Fraction'` column: this name's share of the total of all baby girls for that year
#
# a) Read the data file into a DataFrame and display summary statistics with the `describe` method.
#
# - (i) What are the range of values (minimum and maximum) for the `'Year'`, `'Count'`, and `'Fraction'` columns?
# - (ii) The maximum value of `'Count'` represents the highest count of baby girls with the same name in a single year — what were this name and year?
# - (iii) The maximum value of `'Fraction'` represents the highest _fraction_ (out of the total births for that year) of baby girls with the same name; what were this name and year? How do the count and fraction for this name and year compare with the values for the name/year from part (ii)?
# -
# Read the data
girls = pd.read_csv('../data/bc-popular-girls-names.csv')
girls.head()
# Display summary statistics
girls.describe()
# - The range of years is 1915-2014, the range of counts is 5-765, and the range of fractions is 0.00029-0.068581.
# Use sorting to find the name and year corresponding to the maximum count
girls.sort_values('Count', ascending=False).head()
# - The highest count of baby girls with the same name in a single year was for the name Jennifer in 1984.
# Use sorting to find the name and year corresponding to the maximum fraction
girls.sort_values('Fraction', ascending=False).head()
# - The highest fraction of baby girls with the same name in a single year was for the name Mary in 1922, with a share of about 6.9% of the total, and a count of 289. In contrast, the name Jennifer in 1984 accounted for about 4.5% of the total, with a count of 765. It's interesting that the most popular girl name in 1922 comprised a much larger share of the total compared to the fraction for the name Jennifer in 1984.
# + [markdown] slideshow={"slide_type": "slide"}
# b) Aggregate the data by grouping on the `'Year'` column and taking the `sum`. The `'Count'` column of the resulting DataFrame represents the total number of baby girls per year in this data—create a line plot of this column.
# -
# Group by year and aggregate with sum
girls_per_year = girls.groupby('Year').sum()
girls_per_year.head()
# Create a line plot using the kind='line' keyword argument
girls_per_year['Count'].plot(kind='line');
# + [markdown] slideshow={"slide_type": "slide"}
# c) Compute the grand total for each name over the whole 100 years by grouping on `'Name'` and taking the `sum`. Extract the `'Count'` column from this DataFrame (it's the only column that is meaningful in this aggregation) and use this Series to find the top 10 most common girls names and plot their grand totals in a bar chart.
# -
# Grand total (sum) for each name
grand_totals = girls.groupby('Name').sum()
grand_totals.head()
# +
# Create sorted Series of counts
sorted_counts = grand_totals['Count'].sort_values(ascending=False)
# Plot the first ten rows
sorted_counts.head(10).plot(kind='bar');
# + [markdown] slideshow={"slide_type": "slide"}
# d) Create a filter to extract the data rows for the name `'MARY'`. To explore the popularity of this name over time, create a line plot of this name's share of the total in each year by plotting the `'Fraction'` column vs. the `'Year'` column, with the name `'MARY'` as title.
# - Repeat the above steps for `'JENNIFER'` and any other name(s) of interest (if they're in the data)
# - How do the trends over time for the name Mary compare with the name Jennifer?
#
# _Hint: Check out the documentation for the `plot` method of a DataFrame. You'll see a `title` keyword argument, as well as a couple of extra keyword arguments (`x` and `y`) that aren't in `plot` method for a Series._
# +
# Create a filter
mary = girls['Name'] == 'MARY'
# Create a line plot for the subset
girls[mary].plot(x='Year', y='Fraction', title='MARY');
# +
# Repeat the above steps for another name
name = 'JENNIFER'
name_filter = girls['Name'] == name
# Create a line plot for the subset
girls[name_filter].plot(x='Year', y='Fraction', title=name);
# -
# - The names Mary and Jennifer have very different patterns of popularity over time! Mary was hugely popular in the beginning of this 100-year period, but has been on a fairly steady decline since then, whereas Jennifer had a big peak in the 1970s-1990s, but was much less popular before and after.
# + [markdown] slideshow={"slide_type": "slide"}
# ##### Bonus Exercises
#
# e) Most popular names in recent years: Compute grand totals for each name, as in part (c), but only including the years from 2000 onwards. What are the top 10 most common girls names from 2000-present?
# +
# Create a filter and extract subset of rows from the data
recent = girls['Year'] >= 2000
girls_recent = girls[recent]
# Group by name and aggregate with sum
totals_recent = girls_recent.groupby('Name').sum()
# Sort the counts and display the top 10
recent_counts = totals_recent['Count'].sort_values(ascending=False)
recent_counts.head(10)
# + [markdown] slideshow={"slide_type": "slide"}
# f) Trends in name diversity: Compute the number of unique names per year and plot this data as a line chart. How has name diversity evolved over time?
#
# _Hint: The data is organized such that for each year, there is exactly one row for each unique name in that year, so grouping on `'Year'` and aggregating with `count` will give the unique count we're looking for._
# +
# Group by year and aggregate with count
names_per_year = girls.groupby('Year').count()
# The three columns all contain the same data, so we can pick any one of them
names_per_year = names_per_year['Name']
names_per_year.head()
# -
names_per_year.plot(kind='line');
# Fewest number of unique names
names_per_year.min()
# Largest number of unique names
names_per_year.max()
# - The number of unique names has been growing drastically over time! From a low of only 153 unique names near the start of the 100-year period, up to nearly 800 unique names in recent years.
# + [markdown] slideshow={"slide_type": "slide"}
# g) Compute 100-year summary statistics grouped by name as in part (c), but with the following modifications:
# - Use the `agg` function to calculate the following statistics for each name:
# - For the `'Year'` column: earliest year, latest year, number of years
# - For the `'Count'` column: grand total (sum), lowest yearly count, highest yearly count, mean yearly count
# - Display a random sampling of 50 rows of the resulting DataFrame with the `sample` method.
# +
# Define a dictionary with aggregations for each column
agg_dict = {'Year' : ['min', 'max', 'count'],
'Count' : ['sum', 'min', 'max', 'mean']}
# Use the agg method for aggregation
summary = girls.groupby('Name').agg(agg_dict)
# Use the sample method to display a random sampling
# The random_state=1 argument specifies a seed for the
# random number generator, so that the samples don't keep
# changing each time we run the code
summary.sample(50, random_state=1)
# -
# - You could explore this aggregated data in many neat ways, such as finding which names have been used every year in the 100-year period, or names which were used only in the first 50 years but not the second 50 years and vice versa, and so on!
# + [markdown] slideshow={"slide_type": "slide"}
# ---
#
# # Exercise 3.1
# -
weather_mean = weather_all.groupby('Station Name').mean()
weather_mean.head()
# + [markdown] slideshow={"slide_type": "slide"}
# a) Select the cell in row 2, column 6 of `weather_mean` using `iloc`
# -
weather_mean.iloc[2, 6]
# + [markdown] slideshow={"slide_type": "slide"}
# b) Same as a) but using `loc`
# -
weather_mean.loc['EDMONTON', 'Pressure (kPa)']
# + [markdown] slideshow={"slide_type": "slide"}
# c) Use `iloc` to select the cell in `weather_mean` containing the value of the relative humidity in Halifax.
# -
weather_mean.iloc[4, 2]
# + [markdown] slideshow={"slide_type": "slide"}
# d) Select the same cell as in c) but using `loc`
# -
weather_mean.loc['HALIFAX', 'Rel Hum (%)']
# + [markdown] slideshow={"slide_type": "slide"}
# # Exercise 3.2
#
# a) Use `iloc` to select rows 15 to end and the first 3 columns of `weather_mean`
# -
weather_mean.iloc[15:, :3]
# + [markdown] slideshow={"slide_type": "-"}
# b) Use `iloc` to select the every fourth row (starting from 0) and columns 4, 3, and 0 of weather_mean
# -
weather_mean.iloc[::4, [4, 3, 0]]
# + [markdown] slideshow={"slide_type": "slide"}
# # Exercise 3.3
#
# a) Use `loc` to select the wind speed and wind direction in Toronto, Montreal, and Ottawa
# -
stations = ['TORONTO', 'MONTREAL', 'OTTAWA']
weather_mean.loc[stations, ['Wind Spd (km/h)', 'Wind Dir (deg)']]
# b) Use `loc` to select rows where the relative humidity is less than 50%, and a column slice from `'Temp (deg C)'` through `'Rel Hum (%)'` (inclusive)
low_rh = weather_mean['Rel Hum (%)'] < 50
weather_mean.loc[low_rh, 'Temp (deg C)':'Rel Hum (%)']
| solutions/solutions-to-exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mlops
# language: python
# name: mlops
# ---
import torch
from torch import nn, optim
# +
# Define model
class TheModelClass(nn.Module):
def __init__(self):
super(TheModelClass, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize model
model = TheModelClass()
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name])
# -
| examples/notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AstitvaSharma/ML_Algorithms/blob/main/data_preprocessing_tools.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="37puETfgRzzg"
# # Data Preprocessing Tools
# + [markdown] id="EoRP98MpR-qj"
# ## Importing the libraries
# + id="nhLNqFwu8gGa"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="RopL7tUZSQkT"
# ## Importing the dataset
# + id="iINyGQMA8rs2"
dataset = pd.read_csv("Data.csv")
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# + colab={"base_uri": "https://localhost:8080/"} id="8wk1Cxkx_iQ5" outputId="37fac68b-412a-41fd-e08f-f9546f5eef2f"
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="GjTkcdA9_ycR" outputId="7cb1820b-0abb-4f64-f864-2b9ed0f18edf"
print(y)
# + [markdown] id="nhfKXNxlSabC"
# ## Taking care of missing data
# + id="YaYfCV98Dqx5"
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(x[:, 1:3])
x[:, 1:3]=imputer.transform(x[:, 1:3])
# + colab={"base_uri": "https://localhost:8080/"} id="TuJvhWr-Eg19" outputId="595454b2-f88c-4d8d-9785-604ecae8650f"
print(x)
# + [markdown] id="CriG6VzVSjcK"
# ## Encoding categorical data
# + [markdown] id="AhSpdQWeSsFh"
# ### Encoding the Independent Variable
# + id="GwnincOVUvIn"
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers =[('encoders', OneHotEncoder(),[0])] , remainder='passthrough')
x = np.array(ct.fit_transform(x))
# + colab={"base_uri": "https://localhost:8080/"} id="9AtCHQcmB-bp" outputId="bf4c41e5-d5ed-4904-ae38-c0bd412118d3"
print(x)
# + [markdown] id="DXh8oVSITIc6"
# ### Encoding the Dependent Variable
# + id="843yhw27DuWX"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
# + id="DW7dbY_LECYT" outputId="1d860f38-8b7e-42ff-dc45-36dc288bc52d" colab={"base_uri": "https://localhost:8080/"}
print(y)
# + [markdown] id="qb_vcgm3qZKW"
# ## Splitting the dataset into the Training set and Test set
# + id="L_ek1vBvTfc7"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1)
# + colab={"base_uri": "https://localhost:8080/"} id="gXHoa4LDUCCX" outputId="ce57e6f2-54a4-4f98-a0f5-87b3bf5621a9"
print(x_train)
# + colab={"base_uri": "https://localhost:8080/"} id="MNSPrZKTUHUG" outputId="9348ecfc-2ba6-4a8a-bf77-2b3a49368133"
print(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="gFNGxz8WUJ5y" outputId="4a088b26-2d93-4160-a3c9-b7e9348896a8"
print(y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="VYurkWsUUMlc" outputId="acdffaba-530f-4a0a-8e66-751d6c2be8e9"
print(y_test)
# + [markdown] id="TpGqbS4TqkIR"
# ## Feature Scaling
# + id="RJCfj_XmaJOm"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train[:, 3:] = sc.fit_transform(x_train[:, 3:] )
x_test[:, 3:] = sc.transform(x_test[:, 3:])
# + colab={"base_uri": "https://localhost:8080/"} id="RW6ffqe_a4Da" outputId="8a1764d5-1a7f-41bc-a212-5da39380d12b"
print(x_train)
# + colab={"base_uri": "https://localhost:8080/"} id="Aq-SyxMba6mu" outputId="ee342c9b-a5a0-4be6-bf25-f2ecbd9c0829"
print(x_test)
| data_preprocessing_tools.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Adapted for use with nbdev & Google Colab from [xstreamsrl Jupytemplate](https://github.com/donlelef/jupytemplate).
# +
#default_exp ensemble1
# -
#hide
#test_flag_colab
from google.colab import drive
drive.mount('/content/drive')
#hide
try:
import nbdev
except:
# !pip install nbdev
# !pip install fastcore
# +
#hide
# # %cd /content/drive/My Drive/<project directory>
# -
#hide
# not deps but we need them to use nbdev and run tests
from nbdev import *
from nbdev.showdoc import *
from fastcore.test import *
# # Ensemble1
#
# > Model ensembles.
# ##
#
# **Title**
#
# *The title of the notebook (above) should be coherent with file name (at the top) and, like the filename, follow the nbdev naming convention. For example, notebook '01' to be added to the project by author 'AB' (initials) and containing code for 'data_exploration' might have the filename `01_AB_data_exploration.ipynb` or `AB_01_data_exploration.ipynb` and title '01_AB_Data_Exploration' or 'AB_01_Data_Exploration'.*
#
# *Note that this title will also become the title of the notebook in the documentation.*
#
# **Purpose**
#
# *State the purpose of the notebook.*
#
# **Methodology**
#
# *Quickly describe assumptions and processing steps.*
#
# **WIP - improvements**
#
# *Use this section only if the notebook is not final.*
#
# **Notable TODOs:**
#
# *todo 1;*
# *todo 2;*
# *todo 3*.
#
# **Results**
#
# *Describe and comment the most important results.*
#
# **Suggested next steps**
#
# *State suggested next steps, based on results obtained in this notebook*
# ## Setup
#
# Import all the required **Python** libraries
# +
#test_flag_deps
# general
import os, pickle, pathlib
# Data manipulation
import pandas as pd
import numpy as np
# Options for pandas
pd.options.display.max_columns = 50
pd.options.display.max_rows = 30
# Visualizations
import matplotlib as plt
import plotly
import plotly.graph_objs as go
import plotly.offline as ply
plotly.offline.init_notebook_mode(connected=True)
import altair as alt
import cufflinks as cf
cf.go_offline(connected=True)
cf.set_config_file(theme='white')
# -
#
# Import all required **local** libraries.
# +
# Include local library paths
import sys
# sys.path.append('path/to/local/lib') # put path to local libraries in system path
# Import local libraries # now local libraries in a non_local directory ('lib') can be easily imported
from nbds_colab.common import *
from nbds_colab.vis import *
from nbds_colab.dstemplate1 import *
from nbds_colab.dstemplate2 import *
from nbds_colab.dstemplate3 import *
# -
# ## Parameter definition
# Set all relevant parameters for our notebook. By convention, parameters are uppercase, while all the
# other variables follow Python's guidelines.
PARAM = 0
# ## Data import
# Retrieve required data for analysis.
# ## Data processing
# Put here the core of the notebook. Feel free to further split this section into subsections and adjust heading markdown as fits the project.
#export
def ensemble1_test(test_msg):
"Function ensemble1"
return test_msg
ensemble1_test('ensemble1')
# Test local imports
assert common_test('from common') == 'from common'
assert vis_test('from vis') == 'from vis'
assert template1_test('from template1') == 'from template1'
assert template2_test('from template2') == 'from template2'
assert template3_test('from template3') == 'from template3'
# ## References
# Relevant references:
# 1. author1, article1, journal1, year1, url1
# 2. author2, article2, journal2, year2, url2
| 08_ensemble1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # IT Academy - Data Visualization with Python & R
# ## [M1 T01: Scatter Plot](https://github.com/jesussantana/data_visualization_python/blob/main/Module%201%20-%20%20Introduction/M1_T01_%20Scatter_%20Plot.ipynb)
# ### [Github Data Visualization](https://github.com/jesussantana/data_visualization_python)
#
#
# [](https://www.python.org/)
#
# [](https://jupyter.org/try)
#
# [](https://www.linkedin.com/in/chus-santana/)
#
# [](https://github.com/jesussantana)
# +
import numpy as np
import pandas as pd
import seaborn as sns
# ^^^ pyforest auto-imports - don't write above this line
# Pandas configuration
# ==============================================================================
pd.set_option('display.max_columns', None)
# Graphics
# ==============================================================================
from matplotlib import style
import matplotlib.ticker as ticker
# Matplotlib configuration
# ==============================================================================
plt.rcParams['image.cmap'] = "bwr"
#plt.rcParams['figure.dpi'] = "100"
plt.rcParams['savefig.bbox'] = "tight"
style.use('ggplot') or plt.style.use('ggplot')
# %matplotlib inline
# Seaborn configuration
# ==============================================================================
sns.set_theme(style='darkgrid', palette='deep')
dims = (20, 16)
# Warnings configuration
# ==============================================================================
import warnings
warnings.filterwarnings('ignore')
# Folder configuration
# ==============================================================================
from os import path
import sys
new_path = '../scripts/'
if new_path not in sys.path:
sys.path.append(new_path)
# +
# Path folder configuration
# ===============================================================================
path = '../data/'
file = 'external/sample_data.csv'
df_raw = pd.read_csv(path+file, names=['X','Y'])
# -
df = df_raw.copy()
# ## Exploratory analysis
df.head()
df.info()
df.shape
type(df)
df.isna().sum().sort_values()
# ## Numerical variables
df.select_dtypes(include=['float64', 'int']).describe()
# +
# Distribution graph for each numerical variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
for i, colum in enumerate(columnas_numeric):
sns.histplot(
data = df,
x = colum,
stat = "count",
kde = True,
color = (list(plt.rcParams['axes.prop_cycle'])*2)[i]["color"],
line_kws= {'linewidth': 2},
alpha = 0.3,
ax = axes[i]
)
axes[i].set_title(colum, fontsize = 16, fontweight = "bold")
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
fig.tight_layout()
plt.subplots_adjust(top = 0.9)
fig.suptitle('Distribution Numerical Variable', fontsize = 30, fontweight = "bold")
plt.savefig("reports/figures/Distribution_Numerical_Variable.png")
# -
# ## Numerical variables correlation
# +
# Correlation between numeric columns
# ==============================================================================
def tidy_corr_matrix(corr_mat):
# Function to convert a pandas correlation matrix to tidy format
corr_mat = corr_mat.stack().reset_index()
corr_mat.columns = ['variable_1','variable_2','r']
corr_mat = corr_mat.loc[corr_mat['variable_1'] != corr_mat['variable_2'], :]
corr_mat['abs_r'] = np.abs(corr_mat['r'])
corr_mat = corr_mat.sort_values('abs_r', ascending=False)
return(corr_mat)
corr_matrix = df.select_dtypes(include=['float64', 'int']).corr(method='pearson')
tidy_corr_matrix(corr_matrix).head(20)
# +
# Heatmap matrix of correlations
# ==============================================================================
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 10))
corr= df.select_dtypes(include=['float64', 'int']).corr(method='pearson').corr()
# Getting the Upper Triangle of the co-relation matrix
matrix = np.triu(corr)
# using the upper triangle matrix as mask
sns.heatmap(corr,
annot=True,
mask=matrix,
cmap=sns.diverging_palette(240, 10, s=80, l=55, n=9),
annot_kws = {"size": 10})
ax.set_xticklabels(
ax.get_xticklabels(),
rotation = 45,
horizontalalignment = 'right',
)
ax.set_yticklabels(
ax.get_yticklabels(),
rotation = 0,
horizontalalignment = 'right',
)
ax.tick_params(labelsize = 15)
fig.suptitle('Heatmap Correlation Matrix', fontsize = 30, fontweight = "bold")
plt.savefig("reports/figures/Heatmap_Matrix_Correlations.png")
# +
# Correlation graph for each numerical variable with Response variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
for i, colum in enumerate(columnas_numeric):
sns.regplot(
x = df.X,
y = df.Y,
color = "navy",
marker = '.',
scatter_kws = {"alpha":0.4},
line_kws = {"color":"r","alpha":0.7},
ax = axes[i]
)
axes[i].set_title(f" {colum}", fontsize = 16, fontweight = "bold")
#axes[i].ticklabel_format(style='sci', scilimits=(-4,4), axis='both')
axes[i].yaxis.set_major_formatter(ticker.EngFormatter())
axes[i].xaxis.set_major_formatter(ticker.EngFormatter())
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
axes[i].set_ylabel("")
#if (i-1 >= len(columnas_numeric)-1): break
# Empty axes are removed
"""for i in [8]:
fig.delaxes(axes[i])"""
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle('Correlation', fontsize = 30, fontweight = "bold")
plt.savefig("reports/figures/Correlation_Each_Numerical_Variable.png")
# +
# Scatter Plot
# ==============================================================================
fig, ax = plt.subplots(figsize=(8, 8))
plt.scatter(df.X,
df.Y,
color='darkred')
plt.xlabel('X')
plt.ylabel('Y')
fig.suptitle('M1_T01_ Scatter_Plot', fontsize = 30, fontweight = "bold")
plt.savefig("reports/figures/M1_T01_ Scatter_Plot.png")
plt.show()
# -
| Module-1-Introduction/M1_T01_ Scatter_Plot.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Octave
% language: octave
% name: octave
% ---
% + [markdown] slideshow={"slide_type": "slide"}
% [**Dr. <NAME>**](mailto:<EMAIL>), _Lecturer in Biomedical Engineering_
%
% National University of Ireland Galway.
%
% ---
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "skip"}
% This is an [Octave](https://www.gnu.org/software/octave/) (an open source alternative to MATLAB) [Jupyter notebook](https://jupyter.org/)
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% # Hyperelastic materials
%
% ## Introduction
% * So-called __hyperelastic__ formulations are non-linear constitutive (material behaviour) "laws" which are useful to describe nonlinear elastic materials undergoing large (finite strain) deformation.
%
%
% * In hyperelasticity the constitutive or material law is defined by a so __strain energy density__ function often denoted by a $W$ or $\Psi$ symbol.
%
%
% * $\Psi$ is a __scalar function__ (so not a tensor or vector valued function).
%
%
% * The strain energy density function has units of __energy per unit volume__ such as $J/m^3$.
%
%
% * However if one recalls that $J$ can be written in terms of $Nm$, then we see that $J/m^3=Nm/m^3=N/m^2$, which means we may equivalently say that $\Psi$ has __units of stress__.
% + [markdown] slideshow={"slide_type": "slide"}
% ## Stress computation
% * Derivatives of $\Psi$ with a deformation metric provide a stress metric (there are different types of strains each with their own _work conjugate_ stress type).
%
%
% * For instance, the second _Piola-Kirchoff stress_ $\mathbf{S}$ is obtained through the derivative with the _Green-Lagrange strain_ $\mathbf{E}$
% $$\mathbf{S}=\frac{\partial \Psi}{\partial \mathbf{E}}$$
%
%
% * We tend to focus on the _true stres_ or __Cauchy stress__ $\boldsymbol{\sigma}$, which is obtained through with the aid of the _deformation gradient tensor_ $\mathbf{F}$:
%
% $$\boldsymbol{\sigma}=J^{-1}\mathbf{F}\mathbf{S}\mathbf{F}^\top$$
%
% * In some cases formulations are specified using the _principal stretches_ $\lambda_i$. These may also be used to derived (principal) stresses e.g.:
% $$\sigma_i=J^{-1} \lambda_i \frac{\partial \Psi}{\partial \lambda_i}$$
% -
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\bigg(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\bigg)$$
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\bigg(\lambda_1^{m_a}-1+\lambda_2^{m_a}+\lambda_3^{m_a}-2\bigg)$$
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\bigg(\lambda_1^{m_a}-1+\lambda_2^{m_a}-1+\lambda_3^{m_a}-1\bigg)$$
%
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a^2} \bigg( \big( \lambda_1^{m_a}-1 \big)+\big( \lambda_2^{m_a}-1 \big)+ \big(\lambda_3^{m_a}-1\big) \bigg)$$
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a} \frac{1}{m_a}\bigg( \big( \lambda_1^{m_a}-1 \big)+\big( \lambda_2^{m_a}-1 \big)+ \big(\lambda_3^{m_a}-1\big) \bigg)$$
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \frac{1}{m_a}\big( \lambda_1^{m_a}-1 \big)+ \frac{1}{m_a}\big( \lambda_2^{m_a}-1 \big)+ \frac{1}{m_a}\big(\lambda_3^{m_a}-1\big) \bigg)$$
%
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \sum_{i=1}^{3} \frac{1}{m_a}\big( \lambda_i^{m_a}-1 \big) \bigg)$$
%
% * Now one may recognize these as the Seth-Hill class of strains $\mathbf{E}^{(m_a)}$
% $$E^{(m_a)}_i=\frac{1}{m_a}(\lambda_i^{m_a}-1)$$
%
% * For instance $m_a$ gets us the Green-Lagrange strain:
% $$E_i=\frac{1}{2}(\lambda_i^{2}-1)$$
%
% $$\sum_{i=1}^{3} \frac{1}{m_a}\big( \lambda_i^{m_a}-1 \big)=\mathrm{tr}(\mathbf{E}^{(m_a)})$$
%
% * Furthermore we may recognize that the sum of such parts for is actually the trace of such a strain tensor leading to:
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a}\mathrm{tr}(\mathbf{E}^{(m_a)})$$
%
%
%
%
%
% + [markdown] slideshow={"slide_type": "slide"}
% ## Three types of hyperelastic formulations
% * Three types of hyperelastic formulation types are treated here with a focus on the Ogden formulation:
%
% 1. __Constrained__ formulations (a.k.a "incompressible" formulations)
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\big)$$
%
% 2. __Unconstrained__ or __coupled__ formulations (a.k.a "compressible" formulations)
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\frac{\kappa'}{2}(J-1)^2 + \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3 - m_a \ln{(J)}\big)$$
%
% 3. __Uncoupled__ formulations (a.k.a "nearly incompressible" formulations)
% $$\Psi(\tilde{\lambda}_1,\tilde{\lambda}_2,\tilde{\lambda}_3)=\frac{\kappa}{2}\ln{(J)}^2 + \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\tilde{\lambda}_1^{m_a}+\tilde{\lambda}_2^{m_a}+\tilde{\lambda}_3^{m_a}-3\big)$$
%
%
% * This notebook discusses these and provides example implementations for uniaxial loading and first order $N=1$ __Ogden hyperelastic__ formulations.
%
% For more background information see chapter 6 Hyperelasticity" in Holzapfel's book: _<NAME>, Nonlinear solid mechanics: A continuum approach for engineering. John Wiley & Sons Ltd., 2000._
% + [markdown] slideshow={"slide_type": "slide"}
% ### Anatomy of the Ogden formulation
%
% * Typically the Ogden formulation looks something like this (contrained form showed here, implementations vary depending on software):
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\big)$$
%
% * If we "distribute" the -3 as a set of -1's, and work a factor $\frac{1}{m_a}$ into the summation we can see the above is equivalent to:
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\sum_{a=1}^{N} \frac{c_a}{m_a}\big(\frac{1}{m_a}(\lambda_1^{m_a}-1) + \frac{1}{m_a}(\lambda_2^{m_a}-1)+\frac{1}{m_a}(\lambda_3^{m_a}-1) \big)$$
%
% * Now one may recognize these as the Seth-Hill class of strains
% $$E^{(m_a)}_i=\frac{1}{m_a}(\lambda_i^{m_a}-1)$$
%
% * For instance using $m_a=2$ makes it use the Green-Lagrange strain $\mathbf{E}$
% $$E_i=\frac{1}{2}(\lambda_i^{2}-1)$$
%
% * Furthermore we may recognize that the sum of such parts for is actually the trace of such a strain tensor leading to:
% $$\Psi=\sum_{a=1}^{N} \frac{c_a}{m_a}\mathrm{tr}(\mathbf{E}^{(m_a)})$$
%
% * So the Ogden formulation is a powerful law where we define energies by scaling (multiplying) the trace of a chosen _"strain type"_ (defined by $m_a$) by a stiffness parameter $c_a$. Summing lots of terms ($N>1$) allows one to capture complex stiffnening behaviour.
% + [markdown] slideshow={"slide_type": "slide"}
% * The Ogden formulation can also conveniently be used as the __"mother" of many other formulations__
%
%
% * Using $N=1$ and $m_1=2$ makes the Ogden formulation reduce to a __Neo-Hookean__ formulation
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\frac{c_1}{4}\big(\lambda_1^{2}+\lambda_2^{2}+\lambda_3^{2}-3\big)=\frac{c_1}{4}\big(I_1-3\big)=\frac{c_1}{2}\mathrm{tr}(\mathbf{E})$$
%
%
% * The Neo-Hookean is one of the simples hyperelastic formulations and is named after the fact that it can be thought of as an extension of Hooke's law to non-linear solid mechanics (it reduces to Hooke's law for infinitesimal strains).
%
%
% * Using $N=2$ and $m_1=-m_2=2$ makes the Ogden formulation reduce to a __Mooney-Rivlin__ formulation (if $J=1$)
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\frac{c_1}{4}\big(\lambda_1^{2}+\lambda_2^{2}+\lambda_3^{2}-3\big)+\frac{c_2}{4}\big(\lambda_1^{-2}+\lambda_2^{-2}+\lambda_3^{-2}-3\big)=\frac{c_1}{4}\big(I_1-3\big)+\frac{c_2}{4}\big(I_2-3\big)$$
%
%
% _In the above $I_1$ and $I_2$ are known as the first and second invariants of the right Cauchy green tensor $\mathbf{C}$. These often appear in the literature._
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% #### Defining shared variables used by the example numerical implementations
% + slideshow={"slide_type": "fragment"} magic_args="Define parameters common to all examples"
%Define material parameters
N=1; %The Ogden law order
c1=1; %The shear modulus like parameter
m1=12; %The non-linearity parameter
kp=1000; %Bulk modulus like parameter (used for constrained model)
k=kp; %Bulk modulus (used for uncoupled model)
%Derive applied stretch
appliedStretch=1.3; %Set applied stretch
nDataPoints=50; %Number of data points to use for evaluation and graph
lambda_3=linspace(1,appliedStretch,nDataPoints); %The 3 direction stretch
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ## Constrained formulations
% * The word "constrained" relates to the fact that incompressible behaviour (no volume change) is enforced in the formulation.
%
% * These formulations are not really used in FEA and instead serve as means to easily derive analytical solutions for incompressible behaviour "by hand".
%
%
% + [markdown] slideshow={"slide_type": "slide"}
% ### The constrained Ogden formulation
% * The constrained Ogden formulation is often presented as:
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\big)$$
%
% * However, something is missing in the above, namely the treatment of the hydrostatic presssure $p$ and its contribution.
% $$\Psi(\lambda_1,\lambda_2,\lambda_3,p)=U(p) + \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\big)$$
%
% * For these constrained forms the contribution $U(p)$ is not derived from the constitutive equation but is instead determined using the boundary conditions.
%
% + [markdown] slideshow={"slide_type": "slide"}
% * Below an example for uniaxial loading is presented.
%
% * The uniaxial load (e.g. a tensile or compressive stretch) is here specified in the 3rd (or Z) direction, which means $\lambda_3 \neq 1$.
%
% * Using the "incompressiblity" and uniaxial loading assumption we can formulate some useful relations to help solve for the stress.
%
% * First of all, the uniaxial conditions mean the other "lateral" stretches are equivalent:
% $$\lambda_1=\lambda_2$$
%
% * Secondly, if the material is truely incompressible we have $J=\lambda_1 \lambda_2 \lambda_3 = 1$, and since $\lambda_1=\lambda_2$ we can derive:
%
% $$J= \lambda_1 \lambda_1 \lambda_3 = \lambda_1^2 \lambda_3 = 1 \rightarrow \lambda_1=\lambda_2= \sqrt{\frac{1}{\lambda_3}}=\lambda_3^{-\frac{1}{2}}$$
%
% * Thirdly for uniaxial conditions there is only one none-zero stress, the applied stress $\sigma_3=\sigma_{33}$, therefore:
% $$\sigma_1=\sigma_2=\sigma_{11}=\sigma_{22}=0$$
%
% * So now with an assummed $J=1$, the ability to express all stretches in terms of $\lambda_3$ (the known applied stretch), and the fact that $\sigma_1=\sigma_2=0$, we are ready to start tackling the full stress evaluation.
%
% + [markdown] slideshow={"slide_type": "slide"}
% * First the Cauchy stress tensor $\boldsymbol{\sigma}$ is defined as:
% $$\boldsymbol{\sigma}=\bar{\boldsymbol{\sigma}}-\bar{p}\mathbf{I}$$
%
% * The contribution $\bar{\boldsymbol{\sigma}}$ is derived from the constitutive equation:
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3\big)$$
%
% and is obtained from:
% $$\bar{\sigma}_i=\lambda_i \frac{\partial \Psi}{\partial \lambda_i}$$
%
% * Leading to:
% $$\bar{\sigma}_i=\sum_{a=1}^{N} \frac{c_a}{m_a}\lambda_i^{m_a}$$
% + [markdown] slideshow={"slide_type": "slide"}
% * The next step is to determine $\bar{p}$ in this relation:
% $$\boldsymbol{\sigma}=\bar{\boldsymbol{\sigma}}-\bar{p}\mathbf{I}$$
%
% * First lets rewrite the above in terms of the principal components $\sigma_i$
% $$\sigma_i=-\bar{p} +\sum_{a=1}^{N} \frac{c_a}{m_a}\lambda_i^{m_a}$$
%
% * Next we use $\sigma_1=\sigma_2=0$ to derive an expression for $\bar{p}$:
% $$\sigma_1=\sigma_2=-\bar{p} + \sum_{a=1}^{N} \frac{c_a}{m_a}\lambda_1^{m_a}=0$$
%
% $$\rightarrow \bar{p}=\sum_{a=1}^{N} \frac{c_a}{m_a} \lambda_1^{m_a}$$
% + [markdown] slideshow={"slide_type": "slide"}
% * Finaly, implementing $\lambda_1=\lambda_2=\lambda_3^{-\frac{1}{2}}$ leads to:
% $$\rightarrow \bar{p}=\sum_{a=1}^{N} \frac{c_a}{m_a}(\lambda_3^{-\frac{1}{2}})^{m_a}=\sum_{a=1}^{N} \frac{c_a}{m_a} \lambda_3^{-\frac{m_a}{2}} $$
%
% * Which therefore allows for the formulation of an expression for $\bar{\sigma}_3$:
% $$\sigma_3=-\bar{p} + \sum_{a=1}^{N} \frac{c_a}{m_a} \lambda_3^{m_a}=-\sum_{a=1}^{N} \frac{c_a}{m_a} \lambda_3^{-\frac{m_a}{2}} + \sum_{a=1}^{N} \frac{c_a}{m_a} \lambda_3^{m_a}$$
%
% * Which can be simplified to:
% $$\sigma_3=\sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_3^{m_a} - \lambda_3^{-\frac{m_a}{2}} \big)$$
%
% + [markdown] slideshow={"slide_type": "slide"}
% * The full Cauchy stress tensor can then be written as:
% $$\boldsymbol{\sigma}=\sigma_3 \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 1\end{bmatrix}=\sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_3^{m_a} - \lambda_3^{-\frac{m_a}{2}} \big) \begin{bmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 1\end{bmatrix}$$
%
%
% * Note that although $\bar{p}$ is a type of pressure contribution, it should not be confused with the full hydrostatic pressure $p$ which is derived from:
% $$p=-\frac{1}{3}\mathrm{tr}{(\boldsymbol{\sigma})}=-\frac{\sigma_3}{3}$$
%
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ### Numerical implementation
% #### Compute stresses
% + slideshow={"slide_type": "fragment"} magic_args="The constrained formulation"
%Direct stress computation
S3=(c1/m1).*(lambda_3.^m1-lambda_3.^(-m1/2));
S1=zeros(size(S3));
S2=zeros(size(S3));
%Compute Jacobian for plotting
lambda_1=sqrt(1./lambda_3);
lambda_2=lambda_1;
J=lambda_1.*lambda_2.*lambda_3;
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize stresses
% + slideshow={"slide_type": "fragment"}
%Visualize stress graphs
figure; hold on;
title(['Constrained form. Cauchy stress, min: ',num2str(min(S3(:))),...
', max: ',num2str(max(S3(:)))]); %Add title
h1=plot(lambda_3,S1,'r-','LineWidth',20); %The 1 direction principal stress
h2=plot(lambda_3,S2,'g-','LineWidth',15); %The 2 direction principal stress
h3=plot(lambda_3,S3,'b-','LineWidth',10); %The 3 direction principal stress
hl=legend([h1 h2 h3],{'\sigma_1','\sigma_2','\sigma_3'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize Jacobian
% + slideshow={"slide_type": "fragment"}
%Visualize Jacobian
figure; hold on;
title(['Constrained form. Jacobian, min: ',num2str(min(J(:))),...
', max: ',num2str(max(J(:)))]); %Add title
h1=plot(lambda_3,J,'k-','LineWidth',20); %The 1 direction principal stress
hl=legend([h1],{'J'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ## Unconstrained formulations
% These formulations are also known as coupled formulations (some literature refers to these formulations as "compressible").
%
% * The unconstrained Ogden formulation is given by
%
% $$\Psi(\lambda_1,\lambda_2,\lambda_3)=\frac{\kappa'}{2}(J-1)^2 + \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3 - m_a \ln{(J)}\big)$$
%
% * The principal Cauchy stresses $\sigma_i$ can be computed from:
%
% $$\sigma_i=J^{-1} \lambda_i \frac{\partial \Psi}{\partial \lambda_i}$$
%
% * Leading to:
%
% $$\sigma_i=\kappa' \big( J-1 \big) + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_i^{m_a} - 1 \big)$$
%
% + [markdown] slideshow={"slide_type": "slide"}
% ### Step-by-step derivation:
%
% 1. First compute
% $$J^{-1} \lambda_i \frac{\partial}{\partial \lambda_i} \big( \frac{\kappa'}{2}(J-1)^2 \big)$$
%
% 2. Take derivative with respect to $\lambda_3$ and use symmetry
% $$=J^{-1} \lambda_3 \frac{\partial}{\partial \lambda_3} \big( \frac{\kappa'}{2}(\lambda_1\lambda_2\lambda_3-1)^2 \big)$$
%
% 3. Expand square
% $$=J^{-1} \lambda_3 \frac{\partial}{\partial \lambda_3} \big( \frac{\kappa'}{2}(\lambda_1^2\lambda_2^2\lambda_3^2-2\lambda_1\lambda_2\lambda_3+1) \big)$$
% + [markdown] slideshow={"slide_type": "slide"}
% 4. Evaluate derivative
% $$=J^{-1} \lambda_3 \frac{\partial}{\partial \lambda_3} \big( \frac{\kappa'}{2}(\lambda_1^2\lambda_2^2\lambda_3^2-2\lambda_1\lambda_2\lambda_3+1) \big) = J^{-1} \lambda_3 \big( \frac{\kappa'}{2}(2\lambda_1^2\lambda_2^2\lambda_3-2\lambda_1\lambda_2) \big)$$
%
% 5. Remove factor of 2
% $$=J^{-1} \lambda_3 \kappa' \big( \lambda_1^2\lambda_2^2\lambda_3-\lambda_1\lambda_2 \big)$$
%
% 6. Work in factor $\lambda_3$
% $$=J^{-1} \kappa' \big( \lambda_1^2\lambda_2^2\lambda_3^2-\lambda_1\lambda_2\lambda_3 \big)$$
%
% 7. Recognize $J$ and $J^2$
% $$=J^{-1} \kappa' \big( J^2-J \big)$$
%
% 8. Process division by $J$ (multiply by $J^{-1}$). This result holds for any $\lambda_i$
% $$=\kappa' \big( J-1 \big)$$
%
% + [markdown] slideshow={"slide_type": "slide"}
% 9. Now compute the next part:
% $$J^{-1} \lambda_i \frac{\partial}{\partial \lambda_i} \bigg( \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3 - m_a \ln{(J)}\big) \bigg)$$
%
% 10. First notice that summation can be moved:
%
% $$=\sum_{a=1}^{N} J^{-1} \lambda_i \frac{\partial}{\partial \lambda_i} \bigg( \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3 - m_a \ln{(J)}\big) \bigg)$$
%
% 11. Next take derivative with respect to $\lambda_3$ and aim to use symmetry with respect to any stretch
%
% $$=\sum_{a=1}^{N} J^{-1} \lambda_3 \frac{\partial}{\partial \lambda_3} \bigg( \frac{c_a}{m_a^2}\big(\lambda_1^{m_a}+\lambda_2^{m_a}+\lambda_3^{m_a}-3 - m_a \ln{(J)}\big) \bigg)$$
%
% 12. Use $\frac{\partial}{\partial \lambda_i}(\lambda_i^{m_a})=m_a \lambda_i^{m_a-1}$ and $\ln{(J)}=\ln{(\lambda_1\lambda_2\lambda_3)}=\ln{(\lambda_1)}+\ln{(\lambda_2)}+\ln{(\lambda_3)}$
%
% $$=\sum_{a=1}^{N} J^{-1} \lambda_3 \frac{c_a}{m_a^2}\big( m_a \lambda_3^{m_a-1} - \frac{m_a}{\lambda_3} \big)$$
% + [markdown] slideshow={"slide_type": "slide"}
% 13. Multiply by $\lambda_3$ and move $J^{-1}$
% $$=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big( m_a \lambda_3^{m_a} - m_a \big)$$
%
% 14. Simplify by removing $m_a$ factor
% $$=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_3^{m_a} - 1 \big)$$
%
%
% 15. Generalise for any $\lambda_i$:
% $$=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_i^{m_a} - 1 \big)$$
%
% 16. Combine step 8 and 15 to produce overall result:
% $$\sigma_i=\kappa' \big( J-1 \big) + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_i^{m_a} - 1 \big)$$
% + [markdown] slideshow={"slide_type": "slide"}
% ### How to compute stresses?
% * The stress equations have the unknown $J$ as well as $\lambda_1$ and $\lambda_2$:
% $$\sigma_i=\kappa' \big( J-1 \big) + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_i^{m_a} - 1 \big)$$
%
% * The uniaxial loading conditions and boundary conditions help simplify this to a single unknown
%
% * First of all uniaxial loading in the 3rd or Z-direction means $$\lambda_1=\lambda_2$$
%
% * Next we can use the definition of the Jacobian to come to expressions for $\lambda_1$ and $\lambda_2$
%
% * Since we have $J=\lambda_1 \lambda_2 \lambda_3$, and $\lambda_1=\lambda_2$ we can derive:
% $$J=\lambda_1 \lambda_2 \lambda_3 = \lambda_1 \lambda_1 \lambda_3 = \lambda_1^2 \lambda_3 $$
% $$\rightarrow \lambda_1=\lambda_2= \sqrt{\frac{J}{\lambda_3}}$$
% + [markdown] slideshow={"slide_type": "slide"}
% * The above shows that although $\lambda_3$ is known, knowledge of $J$ is required in order to determine $\lambda_1$ and $\lambda_2$. Or conversely $\lambda_1$ (or $\lambda_2$) needs to be determined allowing for the computation of $J$. Eitherway one unknown remains.
%
%
% * To solve for the unkown $J$ we may use the fact that $\sigma_1=\sigma_2=0$
% $$\sigma_1=\kappa' \big( J-1 \big) + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_1^{m_a} - 1 \big)=0$$
%
% * If we solve for $J$ we can use $\lambda_1=\sqrt{\frac{J}{\lambda_3}}$ and write:
% $$\sigma_1=\kappa' \big( J-1 \big) + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a}\bigg(\bigg(\frac{J}{\lambda_3}\bigg)^{\frac{m_a}{2}} - 1 \bigg)$$
%
%
% * Or if instead we solve for $\lambda_1$ we can use $J=\lambda_1^2\lambda_3$ and write:
% $$\sigma_1=\kappa' \big( (\lambda_1^2 \lambda_3 )-1 \big) + \frac{1}{\lambda_1^2 \lambda_3} \sum_{a=1}^{N} \frac{c_a}{m_a}\big(\lambda_1^{m_a} - 1 \big)=0$$
%
%
% * Solving these is not trivial but numerical solutions are derived below for $J$
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ### Numerical implementation
% #### Compute stresses
% + slideshow={"slide_type": "fragment"} magic_args="The unconstrained or coupled formulation "
% One approach is to define a function for S1 and to find the J for which it is zero.
% For this application the fzero function is useful to find J for S1(J)=0.
%Compute Jacobian given boundary conditions S1=S2=0
J=zeros(size(lambda_3)); %Initialize an arry of J values which are all zeros
for q=1:1:nDataPoints %Loop over all data points
%Create stress function with current lambda
S1_fun=@(J) kp*(J-1)+(1/J)*(c1/m1)*((sqrt(J/lambda_3(q)).^m1)-1);
%Find Jacobian for zero stress, use J=1 as initial
J(q)=fzero(S1_fun,1); %Find root of nonlinear function
end
%Compute transverse stretches using J values
lambda_1=sqrt(J./lambda_3);
lambda_2=lambda_1; %Due to uniaxial loading
%Compute principal stresses (note, these are not ordered)
S1=kp*(J-1)+(1./J).*(c1/m1).*((lambda_1.^m1)-1);
S2=kp*(J-1)+(1./J).*(c1/m1).*((lambda_2.^m1)-1);
S3=kp*(J-1)+(1./J).*(c1/m1).*((lambda_3.^m1)-1);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize stresses
% + slideshow={"slide_type": "fragment"}
%Visualize stress graphs
figure; hold on;
title(['Unconstrained form. Cauchy stress, min: ',num2str(min(S3(:))),...
', max: ',num2str(max(S3(:)))]); %Add title
h1=plot(lambda_3,S1,'r-','LineWidth',20); %The 1 direction principal stress
h2=plot(lambda_3,S2,'g-','LineWidth',15); %The 2 direction principal stress
h3=plot(lambda_3,S3,'b-','LineWidth',10); %The 3 direction principal stress
hl=legend([h1 h2 h3],{'\sigma_1','\sigma_2','\sigma_3'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize Jacobian
% + slideshow={"slide_type": "fragment"}
%Visualize Jacobian
figure; hold on;
title(['Unconstrained form. Jacobian, min: ',num2str(min(J(:))),...
', max: ',num2str(max(J(:)))]); %Add title
h1=plot(lambda_3,J,'k-','LineWidth',20); %The 1 direction principal stress
hl=legend([h1],{'J'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Alternative solving method featuring interpolation
% + slideshow={"slide_type": "fragment"}
% For this approach the function for S1 is evaluate for a range of J which should cover the required J.
% Where this graph crosses the x-axis S1(J)=0, and this point is approximated using interpolation.
%Compute Jacobian given boundary conditions S1=S2=0
nTestPoints=100; %Set up a number of test values (more=better but slower)
J_test=linspace(0.9,1.1,nTestPoints); %The test J values
J=zeros(size(lambda_3)); %Initialize an arry of J values which are all zeros
for q=1:1:nDataPoints %Loop over all data points
%Compute test stresses
S1_test=kp*(J_test-1)+(1./J_test).*(c1/m1).*((sqrt(J_test./lambda_3(q)).^m1)-1);
%Find Jacobian for S1(J)=0 using interpolation
% J(q)=interp1(S1_test,J_test,0,'linear'); %linear interpolation
J(q)=interp1(S1_test,J_test,0,'pchip'); %piece-wise cubic hermite interpolation
end
%Compute transverse stretches using J values
lambda_1=sqrt(J./lambda_3);
lambda_2=lambda_1; %Due to uniaxial loading
%Compute principal stresses (note, these are not ordered)
S1=kp*(J-1)+(1./J).*(c1/m1).*((lambda_1.^m1)-1);
S2=kp*(J-1)+(1./J).*(c1/m1).*((lambda_2.^m1)-1);
S3=kp*(J-1)+(1./J).*(c1/m1).*((lambda_3.^m1)-1);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize stresses
% + slideshow={"slide_type": "fragment"}
%Visualize stress graphs
figure; hold on;
title(['Unconstrained form. Cauchy stress, min: ',num2str(min(S3(:))),...
', max: ',num2str(max(S3(:)))]); %Add title
h1=plot(lambda_3,S1,'r-','LineWidth',20); %The 1 direction principal stress
h2=plot(lambda_3,S2,'g-','LineWidth',15); %The 2 direction principal stress
h3=plot(lambda_3,S3,'b-','LineWidth',10); %The 3 direction principal stress
hl=legend([h1 h2 h3],{'\sigma_1','\sigma_2','\sigma_3'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize Jacobian
% + slideshow={"slide_type": "fragment"}
%Visualize Jacobian
figure; hold on;
title(['Unconstrained form. Jacobian, min: ',num2str(min(J(:))),...
', max: ',num2str(max(J(:)))]); %Add title
h1=plot(lambda_3,J,'k-','LineWidth',20); %The 1 direction principal stress
hl=legend([h1],{'J'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ## Uncoupled formulations
% * Given the numerical difficulties in handling truely incompressible behaviour (theoretically requiring $\kappa=\infty$) a special class of constitutive formulations has been developed referred to as _uncoupled_ formulations.
%
%
% * These uncoupled formulations are useful to model nearly-incompressible behaviour
%
%
% * The term _uncoupled_ relates to the fact that strain energy density $\Psi$ is split into two additively seperated parts, namely:
% 1. A purely _deviatoric_ (or isochoric = no volume change) part relating to shape change only $\Psi_{dev}$
% 1. A purely _volumetric_ part relating to volume change only $\Psi_{vol}$
%
% $$\Psi=\Psi_{dev}+\Psi_{vol}$$
% + [markdown] slideshow={"slide_type": "slide"}
% ### Uncoupling the deformation
% * To accomodate the split special shape and volume changing deformation metrics are required.
%
%
% * The Jacobian or volume ratio $J$ is already suitable to describe volume change ($J=0.9$ means 10% volume loss, $J=1.1$ means 10% volume gain).
%
% * From the definition $J=\lambda_1\lambda_2\lambda_3$ one could imagine a single "spherical" average stretch $\lambda$ which is the same in all directions such that:
% $$J=\lambda_1\lambda_2\lambda_3=\lambda\lambda\lambda=\lambda^3 \rightarrow \lambda=J^{\frac{1}{3}}$$
%
% * To "take away" the effect of this spherical volume changing stretch $\lambda$ from each of the stretches we can multiply them by $\frac{1}{\lambda}=J^{-\frac{1}{3}}$:
% $$\tilde{\lambda}_i=J^{-\frac{1}{3}}\lambda_i$$
%
% * This introduces the _deviatoric stretches_ denoted $\tilde{\lambda}_i$
%
% * We can check if these deviatoric stretches really only change the shape by computing $\tilde{J}$ which should be 1 in magnitude for all stretches:
% $$\tilde{J}=\tilde{\lambda}_1\tilde{\lambda}_2\tilde{\lambda}_3=J^{-\frac{1}{3}}\lambda_1J^{-\frac{1}{3}}\lambda_2J^{-\frac{1}{3}}\lambda_3=J^{-\frac{1}{3}}J^{-\frac{1}{3}}J^{-\frac{1}{3}}\lambda_1\lambda_2\lambda_3=\frac{1}{J}J=1$$
% + [markdown] slideshow={"slide_type": "slide"}
% ### The uncoupled Ogden formulation
% * The uncoupled Ogden formulation is given as:
% $$\Psi(\tilde{\lambda}_1,\tilde{\lambda}_2,\tilde{\lambda}_3)=\frac{\kappa}{2}\ln{(J)}^2 + \sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\tilde{\lambda}_1^{m_a}+\tilde{\lambda}_2^{m_a}+\tilde{\lambda}_3^{m_a}-3\big)$$
%
% * Where
% $$\Psi_{vol}=\frac{\kappa}{2}\ln{(J)}^2$$
% and
% $$\Psi_{dev}=\sum_{a=1}^{N} \frac{c_a}{m_a^2}\big(\tilde{\lambda}_1^{m_a}+\tilde{\lambda}_2^{m_a}+\tilde{\lambda}_3^{m_a}-3\big)$$
%
% * The principal Cauchy stresses $\sigma_i$ can be computed from:
%
% $$\boldsymbol{\sigma}=\boldsymbol{\sigma}_{vol}+\boldsymbol{\sigma}_{dev}$$
% + [markdown] slideshow={"slide_type": "slide"}
% * The volumetric stress $\boldsymbol{\sigma}_{vol}$ is derived from:
% $$\boldsymbol{\sigma}_{vol}=p\mathbf{I}$$
%
% where the hydrostatic pressure is now derived directly from the constitutive equation:
%
% $$p=\frac{\partial \Psi_{vol}}{\partial J}$$
% resulting in:
% $$\boldsymbol{\sigma}_{vol}=\kappa\frac{\ln{(J)}}{J}\mathbf{I}$$
%
% + [markdown] slideshow={"slide_type": "slide"}
% * The deviatoric stress $\boldsymbol{\sigma}_{dev}$ is derived from:
% $${\sigma_{dev_i}}=J^{-1} \lambda_i \frac{\partial \Psi_{dev}}{\partial \lambda_i}=J^{-1} \bigg( \tilde{\lambda}_i \frac{\partial \Psi_{dev}}{\partial \tilde{\lambda}_i} -\frac{1}{3}\sum_{j=1}^3\tilde{\lambda}_j \frac{\partial \Psi_{dev}}{\partial \tilde{\lambda}_j } \bigg)$$
%
%
% * Since $J=\lambda_1 \lambda_2 \lambda_3$, and $\lambda_1=\lambda_2$ (due to uniaxial loading in the 3rd direction) we can derive:
%
% $$J=\lambda_1 \lambda_2 \lambda_3 = \lambda_1 \lambda_1 \lambda_3 = \lambda_1^2 \lambda_3 $$
% $$\rightarrow \lambda_1=\lambda_2= \sqrt{\frac{J}{\lambda_3}}$$
%
% * Using
% $$\lambda_i \frac{\partial \Psi_{dev}}{\partial \tilde{\lambda}_i}=\sum_{a=1}^{N} \frac{c_a}{m_a}\tilde{\lambda}_i^{m_a}$$
%
% we can formulate
% $$\sigma_{dev_i}=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \tilde{\lambda}_i^{m_a} -\frac{1}{3} \bigg( \tilde{\lambda}_1^{m_a} + \tilde{\lambda}_2^{m_a} + \tilde{\lambda}_3^{m_a} \bigg) \bigg)$$
% + [markdown] slideshow={"slide_type": "slide"}
% $$\sigma_{dev_i}=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \tilde{\lambda}_i^{m_a} -\frac{1}{3} \bigg( \tilde{\lambda}_1^{m_a} + \tilde{\lambda}_2^{m_a} + \tilde{\lambda}_3^{m_a} \bigg) \bigg)$$
%
% * And using $\lambda_1=\lambda_2= \sqrt{\frac{J}{\lambda_3}}$
%
% $$\sigma_{dev_i}=J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \tilde{\lambda}_i^{m_a} -\frac{1}{3} \bigg( 2\bigg(\frac{J}{\lambda_3}\bigg)^{\frac{m_a}{2}} + \tilde{\lambda}_3^{m_a} \bigg) \bigg)$$
%
% * Leading to:
%
% $$\sigma_i=\kappa\frac{\ln{(J)}}{J} + J^{-1} \sum_{a=1}^{N} \frac{c_a}{m_a} \bigg( \tilde{\lambda}_i^{m_a} -\frac{1}{3} \bigg( 2\bigg(\frac{J}{\lambda_3}\bigg)^{\frac{m_a}{2}} + \tilde{\lambda}_3^{m_a} \bigg) \bigg)$$
%
% * Numerical metods are now needed to solve for J such that $\sigma_1=\sigma_2=0$
% + [markdown] slideshow={"slide_type": "slide"}
% * **Note/tip**: To achieve nearly incompressible behaviour ($J\approx1$), the bulk modulus $\kappa$ is often set several orders of magnitude higher than the effective shear modulus (e.g. $c_1$ here). The codes here use $\kappa=1000 c_1$.
% + [markdown] slideshow={"slide_type": "skip"}
% \newpage
% + [markdown] slideshow={"slide_type": "slide"}
% ### Numerical implementation
% #### Compute stresses
% + slideshow={"slide_type": "fragment"} magic_args="The uncoupled formulation "
% One approach is to define a function for S1 and to find the J for which it is zero.
% For this application the fzero function is useful to find J for S1(J)=0.
%Compute Jacobian given boundary conditions S1=S2=0
J=zeros(size(lambda_3)); %Initialize an arry of J values which are all zeros
for q=1:1:nDataPoints %Loop over all data points
%Create stress function with current lambda
S1_fun=@(J) k*(log(J)/J)+(1/J)*(c1/m1)*(sqrt(J/lambda_3(q))^m1...
-((1/3)*(2*(J/lambda_3(q))^(m1/2)+lambda_3(q)^m1)));
%Find Jacobian for zero stress, use J=1 as initial
J(q)=fzero(S1_fun,1); %Find root of nonlinear function
end
%Compute transverse stretches using J values
lambda_1=sqrt(J./lambda_3);
lambda_2=lambda_1; %Due to uniaxial loading
%Compute principal stresses (note, these are not ordered)
S1=k*(log(J)./J)+(1./J).*(c1/m1).*(lambda_1.^m1-((1/3)*(lambda_1.^m1+lambda_2.^m1+lambda_3.^m1)));
S2=k*(log(J)./J)+(1./J).*(c1/m1).*(lambda_2.^m1-((1/3)*(lambda_1.^m1+lambda_2.^m1+lambda_3.^m1)));
S3=k*(log(J)./J)+(1./J).*(c1/m1).*(lambda_3.^m1-((1/3)*(lambda_1.^m1+lambda_2.^m1+lambda_3.^m1)));
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize stresses
% + slideshow={"slide_type": "fragment"}
%Visualize stress graphs
figure; hold on;
title(['Uncoupled form. Cauchy stress, min: ',num2str(min(S3(:))),...
', max: ',num2str(max(S3(:)))]); %Add title
h1=plot(lambda_3,S1,'r-','LineWidth',20); %The 1 direction principal stress
h2=plot(lambda_3,S2,'g-','LineWidth',15); %The 2 direction principal stress
h3=plot(lambda_3,S3,'b-','LineWidth',10); %The 3 direction principal stress
hl=legend([h1 h2 h3],{'\sigma_1','\sigma_2','\sigma_3'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
% + [markdown] slideshow={"slide_type": "slide"}
% #### Visualize Jacobian
% + slideshow={"slide_type": "fragment"}
%Visualize Jacobian
figure; hold on;
title(['Uncoupled form. Jacobian, min: ',num2str(min(J(:))),...
', max: ',num2str(max(J(:)))]); %Add title
h1=plot(lambda_3,J,'k-','LineWidth',20); %The 1 direction principal stress
hl=legend([h1],{'J'}); %Add legend
set(hl,'FontSize',15,'Location','NorthEastOutside','Box','off'); %Adjust legend
axis tight; axis square; grid on; box on;
set(gca,'FontSize',15);
| notebooks/nbx_Ogden.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
np.random.seed(2343243)
mean_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1 = np.random.multivariate_normal(mean_vec1, cov_mat1, 100)
mean_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2 = np.random.multivariate_normal(mean_vec2, cov_mat2, 100)
# +
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,projection='3d')
ax.plot(class1[:, 0], class1[:, 1], class1[:, 2], 'o')
ax.plot(class2[:, 0], class2[:, 1], class2[:, 2], '^')
plt.show()
# -
all_data = np.concatenate((class1, class2))
pca = PCA(n_components = 2)
transformed_data = pca.fit_transform(all_data)
transformed_data
pca.components_
plt.plot(transformed_data[0:100,0],transformed_data[0:100,1],"o")
plt.plot(transformed_data[100:200,0],transformed_data[100:200,1],"^")
plt.show()
X_approx = pca.inverse_transform(transformed_data)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111,projection='3d')
ax.plot(X_approx[:, 0], X_approx[:, 1], X_approx[:, 2], '^')
plt.show()
a = -0.409689
b = 7.2827
c = - 7.1008
i = 10
a * X_approx[i][0] + b* X_approx[i][1] + c * X_approx[i][2]
| Lecture 19 PCA/Applying PCA on 3 D data/PCA-3D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Sample Data Description(Japanese Only)
#
# I'm sorry to wirte this part only in Japanese.
#
# 申し訳ないが,先ずは日本語のみでサンプルデータについて説明をする.
# 今回,ディレクトリの中には,サイト上で2018年5月3日現在に取得可能な2016年から,2018年5月2日分までをファイル上にuploadしている.
#
# +
# -*- coding:utf8 -*
import pandas as pd
#read 2 data and to pandas DataFrame
pd_2017 = pd.read_csv('./data/external/power_demand2017.csv')
pd_2018 = pd.read_csv('./data/external/power_demand2018.csv')
# -
ごく普通の時系列の処理
# ## 2017年の電力需要の結果
# +
#first, plot sample data
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
plt.figure(figsize=(8,2),dpi=160)
sns.set('talk', 'whitegrid', 'dark', font_scale=0.5,
rc={"lines.linewidth": 0.2, 'grid.linestyle': '--', 'grid.linewidth': 0.1})
sns.tsplot(pd_2017['DEMAND(10GW)'])
# -
# ###
plt.figure(figsize=(8,2),dpi=160)
sns.set('talk', 'whitegrid', 'dark', font_scale=0.5,
rc={"lines.linewidth": 0.2, 'grid.linestyle': '--', 'grid.linewidth': 0.1})
sns.tsplot(pd_2018['DEMAND(10GW)'])
| notebooks/data_summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Making some Mars maps with pygmt (extended)
#
# This tutorial page covers the basics of creating some maps and 3D plot of Mars (yes! Mars). The idea here is to demonstrate that you can use a simple sequence of commands with PyGMT, a Python wrapper for the Generic Mapping Tools (GMT), and with some public data about the topography of Mars, create your own maps, as well as compare this topography with what we know of our own planet Earth.
#
# ## first, some options
#
# You can run this notebook using your local pygmt installation, or via Binder, or even Google Colaboratory. See comments for each option below.
# **A)** A short note if you are using COLAB
#
# The version of python in COLAB is different from what the newer GMT needs to install along with pygmt. So, one way around this problem is to reinstall GMT from scratch, along with other important packages. This is done with this block of commands below.
#
# **comment out the first line of the block (%%script echo skipping) if you want to use colab**
# + colab={"base_uri": "https://localhost:8080/"} id="nnMDokWZuQyN" outputId="0be69909-5cb9-4d8c-acfb-1b0b6b303b8f" magic_args="echo skipping" language="script"
#
# # because I like to enjoy my coffee in silence, it takes time.
# # (3 runs averaged 6 minutes to install everything ! keep drinking your coffee)
# # comment the %%capture line if you want to see the colab VM working
# %%capture
# !sudo apt update
# !sudo apt upgrade -y
# !sudo apt install -y build-essential cmake libcurl4-gnutls-dev libnetcdf-dev gdal-bin libgdal-dev libfftw3-dev libpcre3-dev liblapack-dev libblas-dev libglib2.0-dev ghostscript ghostscript-x graphicsmagick ffmpeg xdg-utils
# # clone gmt from source
# !git clone --depth 50 https://github.com/GenericMappingTools/gmt
# # cmake everything
# !cmake /content/gmt
# # build and install
# !cmake --build . --target install
#
# # and last but not least
# !pip install pygmt
#
# # and if you don't believe in it
# !gmt --version
# !python --version
# + colab={"base_uri": "https://localhost:8080/"} id="oIdVNbxrr1lw" outputId="05379551-c66c-4d0b-917c-74cc25fb8942" tags=["remove-stdout"]
# Also, if you are in colab or trying from your jupyter, you will need the Mars Topography (MOLA) already in Netcdf
# a copy of the original file distributed from the Mars Climate Database,
# from the European Space Agency under ESTEC contract 11369/95/NL/JG(SC) and Centre National D'Etude Spatial
# is in the gdrive.
# !gdown 1fDzz8AxR1T58y0IGPhmbb1ZwrTLckp2G
# -
# **B)** Now, if you are using Binder or in your local jupyter
#
# You just skip the block above. Make sure you have the `mola32.nc` in your folder.
#
# %matplotlib inline
# ## Mars dataset
#
# First, we open the `mola32.nc` file using xarray. Note the longitudes are from 0-360°, latitudes are distributed from North to South and the `alt`variable is the MOLA Topography at 32 pixels/degree built from original MOLA file `megt90n000fb.img`.
# + id="IImGpHBZrwG0"
import xarray as xr
dset_mars = xr.open_dataset('mola32.nc')
dset_mars
# -
# Just like any other notebook with pygmt, we import the library and manipulate other data. To make a map of the entire Martian surface without a lot of time and memory, let's reduce the resolution using `grdsample`. We also take the opportunity to transform an `alt` variable into a `float` to be used in maps.
# + colab={"base_uri": "https://localhost:8080/"} id="XFfzKq4-42S2" outputId="e1697b32-8a4c-46c6-cccf-bed9c5f9d421"
import pygmt
# convert from int16 to float
dset_mars_topo = dset_mars.alt.astype(float)
# May be a global Mars map is very interesting. We just need to get a better resolution not to consume all memory
# translate here changes from grid to pixel registration and spacing sets to 1 degree resolution
dset_mars_topo = pygmt.grdsample(grid=dset_mars_topo,translate=True,spacing=[1,1])
# don't be worried about the warnings.
# -
# Here we can create a map of the entire Martian surface, in the same projections we use for our planet.
# + colab={"base_uri": "https://localhost:8080/", "height": 50} id="4jmXhEsk0kYi" outputId="cfeef10d-e42f-4ce4-9d21-97c1c6828d72"
fig = pygmt.Figure()
fig.grdimage(grid=dset_mars_topo,region='g',frame=True,projection='Cyl_stere/0/0/12c')
# you can try with different cylindrical or miscellaneous projections
# see at https://www.pygmt.org/dev/projections/index.html
# some ideas: Eckert IV = Kf; Hammer = H; Mollweide = W
fig.colorbar(frame=["a5000", "x+lElevation", "y+lm"])
fig.show()
# -
# A very interesting feature is Mount Olympus (Olympus Mons - see more details at https://mars.nasa.gov/resources/22587/olympus-mons), centered at approximately 19°N and 133°W, with a height of 22 km (14 miles) and approximately 700 km (435 miles) in diameter. Let's use the original dataset at 32 pixels/degree resolution and plot a (not so interesting) map with `xarray`.
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ypeLac-gtjVI" outputId="f9013715-c01e-484c-b63a-8714852ea755"
# Olympus Mons is located in these slices of 12 degrees of latitude and 30 degrees of longitude
# note we are cutting the region of interest and converting here the original "alt" data in int16 to float (for grid)
dset_olympus = dset_mars.sel(latitude=slice(25,13),longitude=slice(210,240)).alt.astype('float')
dset_olympus.plot()
# -
# We use the same sequence as other pygmt tutorial notebooks to make a map.
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="4xPXldUJsbha" outputId="4e68b731-68ff-46d2-f5b2-3fcd39c46f7f"
# first things, first
fig = pygmt.Figure()
# note I can add projection, after cmap and after, frame (and control frame)
fig.grdimage(grid=dset_olympus,projection='M12c',frame='a5f1',cmap='geo')
# also, I can add a colorbar (later)
fig.colorbar(frame=["a2500", "x+lElevation", "y+lm"])
fig.show()
# -
# And we're going to add some perspective, as well as a more interesting color scale. For ease of understanding, let's separate the region of interest with the same cutout that we created the base of the Olympus Mons topography dataset.
#
# **A few notes**
#
# `zsize` is a bit critical here because the volcano is very big (28 km if we consider -5000 to +23000 m). Likewise, `perspective=[150.45]` was chosen attempting (it's a matter of taste) and depends of which flank of the volcano you want to show. But this choice has to be made according to `shading` since to give a good 3D impression, the lighting must be adjusted according to the elevation and azimuth of the perspective. Finally, the pen outline is made smooth and small to enhance the contours of the topography.
#
# Finally, let's make a combined map showing the planet in an inset in the upper right corner. We use the same bounding box coordinates used to cut out the topography, drawing in red on the map. Obviously here the color scale is the same.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="GwCV6HIJ_Aa6" outputId="7ccb2823-bbb8-4921-8ebd-e07cf3ba9ddc"
# a little perspective
fig = pygmt.Figure()
# note I can add projection, after cmap and after, frame (and control frame)
topo_cpt = pygmt.makecpt(cmap='sealand',series=f'-5000/24000/1000',continuous=True)
frame = ["xa5f1","ya5f1", "z5000+lmeters", "wSEnZ"]
fig.grdview(grid=dset_olympus,
region=[210,240,13,25,-5000,23000],
frame=frame,
perspective=[150,45],
projection='M18c',
zsize='4c',
surftype='s',
cmap=topo_cpt,
plane="-5000+ggrey",
shading='+a100+nt1',
# Set the contour pen thickness to "0.1p"
contourpen="0.1p",)
fig.colorbar(perspective=True, frame=["a5000", "x+lElevation", "y+lm"])
bounds = [[210.,13.],
[210.,25.],
[240.,25.],
[240.,13.],
[210.,13.]]
with fig.inset(position="JTR+w3.5c+o0.2c", margin=0, box=None):
# Create a figure in the inset using the global projection centered at Olympus MOns
fig.grdimage(grid=dset_mars_topo,region='g',frame='g',projection='G225/19/3.5c"')
fig.plot(bounds,pen="1p,red")
fig.show()
# -
# ## Now, how about Hawaii?
#
# When we read about Olympus Mons, it is usually compared to Everest here on Earth. However, the most interesting thing is to compare it with another mountain range taking as a reference the abyssal seabed (without the ocean) - Hawaii. Interestingly, in terms of latitudes and longitudes on the planet, these two features are in almost the same position. To match the approximate dimensions, let's crop a sample of the `Earth Global Relief` using `pygmt.datasets` with slices of 12 degrees of latitude and 30 degrees of longitude.
# + colab={"base_uri": "https://localhost:8080/"} id="XkeCs3NaLSbY" outputId="920cd17f-cbe1-43d6-b6b2-7cfce418bfb8"
# get SRTM around Hawaii
topo_hawaii = pygmt.datasets.load_earth_relief(region=[-170,-140,13,25],resolution="05m")
# and get the whole Earth at the same resolution of our low resolution Mars dataset
topo_globe = pygmt.datasets.load_earth_relief(region=[-180,180,-90,90],resolution="01d")
# -
# And we use the same sequence as above to make a map.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="SUsRWFJ-MZ3Y" outputId="1830e744-b7a8-4c51-b543-c403716d8785"
# second things, second
fig = pygmt.Figure()
# note I can add projection, after cmap and after, frame (and control frame)
fig.grdimage(grid=topo_hawaii,projection='M12c',frame='a5f1',cmap='geo')
# also, I can add a colorbar (later)
fig.colorbar(frame=["a2500", "x+lElevation", "y+lm"])
fig.show()
# -
# **Another few notes**
#
# As we want to make a comparison, let's keep the same color scale as Mars, still using as a basis for the Z plane, -5000 meters (see the line `plane="-5000+ggrey"` exactly like the map above. The inset in the upper right corner is the same and we adjust the bounding box coordinates used to cut out the topography, drawing in red on the map.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Q4IiufkLG5cW" outputId="57e43215-a8b5-45e2-c9ea-9544eaea1ef8"
fig = pygmt.Figure()
# note I can add projection, after cmap and after, frame (and control frame)
frame = ["xa5f1","ya5f1", "z5000+lmeters", "wSEnZ"]
topo_cpt = pygmt.makecpt(cmap='sealand',series=f'-5000/24000/1000',continuous=True)
fig.grdview(grid=topo_hawaii,
region=[-170,-140,13,25,-5000,23000],
frame=frame,
perspective=[150,45],
projection='M15c',
zsize='4c',
surftype='s',
cmap=topo_cpt,
plane="-5000+ggrey",
shading='+a100+nt1',
# Set the contour pen thickness to "0.1p"
contourpen="0.1p",)
fig.colorbar(perspective=True, frame=["a5000", "x+lElevation", "y+lm"])
bounds = [[-170.,13.],
[-170.,25.],
[-140.,25.],
[-140.,13.],
[-170.,13.]]
with fig.inset(position="JTR+w3.5c+o0.2c", margin=0, box=None):
# Create a figure in the inset using the global projection centered at Olympus MOns
fig.grdimage(grid=topo_globe,region='g',frame='g',projection='G-160/19/3.5c"')
fig.coast(region='g',shorelines="thin", frame="g")
fig.plot(bounds,pen="1p,red")
fig.show()
# -
# ## Combining the two maps side by side
#
# Basically it's the same blocks as above, just using `pygmt`'s `Figure.set_panel` mechanism to tile.
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="UiBEecTSRp6T" outputId="1bd76a70-5892-40c8-8209-5414e6e4a46f"
fig = pygmt.Figure()
with fig.subplot(
nrows=1, ncols=2, figsize=("28c", "16c"), autolabel=True, margins="1c"
):
with fig.set_panel(panel=0):
topo_cpt = pygmt.makecpt(cmap='sealand',series=f'-5000/24000/1000',continuous=True)
frame = ["xa5f1","ya5f1", "z5000+lmeters", "wSEnZ"]
fig.grdview(grid=dset_olympus,
region=[210,240,13,25,-5000,23000],
frame=frame,
perspective=[150,45],
projection='M',
zsize='4c',
surftype='s',
cmap=topo_cpt,
plane="-5000+ggrey",
shading='+a100+nt1',
# Set the contour pen thickness to "0.1p"
contourpen="0.1p",)
# we don't need the colormap in both figures
#fig.colorbar(perspective=True, frame=["a5000", "x+lElevation", "y+lm"])
with fig.set_panel(panel=1):
frame = ["xa5f1","ya5f1", "z5000+lmeters", "wSEnZ"]
topo_cpt = pygmt.makecpt(cmap='sealand',series=f'-5000/24000/1000',continuous=True)
fig.grdview(grid=topo_hawaii,
region=[-170,-140,13,25,-5000,23000],
frame=frame,
perspective=[150,45],
projection='M',
zsize='4c',
surftype='s',
cmap=topo_cpt,
plane="-5000+ggrey",
shading='+a100+nt1',
# Set the contour pen thickness to "0.1p"
contourpen="0.1p",)
fig.colorbar(perspective=True, frame=["a5000", "x+lElevation", "y+lm"])
fig.show()
# -
# ## Bonus map
#
# Recently the rover Zhurong from the Tianwen-1's mission landed successfully at 109.926°E, 25.066°N, in southern Utopia Planitia on Mars (check out the article of <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Geomorphologic exploration targets at the Zhurong landing site in the southern Utopia Planitia of Mars. Earth and Planetary Science Letters, 576, 117199. https://doi.org/10.1016/j.epsl.2021.117199). We can create a map of the region with the landing point.
#
# First, let's locate Utopia Planitia. Take a look at Figure 1 by Ye et al. (2021).
# +
fig = pygmt.Figure()
# we are using a Orthographic view centered at the landing site
fig.grdimage(grid=dset_mars_topo,region='g',frame='g',projection='G109.926/25.066/12c"',shading='+a100+nt1')
zhurong = [109.926,25.066]
Olympus = [360-210,19.0] #position for Olympus Mons - see the letf border of the area
# and we drop a "star" in the landing site and write with a small displacement of text
fig.plot(x=zhurong[0],y=zhurong[1],style="a0.5c", pen="1p,black", color="darkorange")
fig.text(x=zhurong[0]+5,y=zhurong[1]+5, text="Zhurong", font='10p,Helvetica-Bold')
fig.text(x=Olympus[0],y=Olympus[1], text="<NAME>", font='10p,Helvetica-Bold')
fig.colorbar(frame=["a5000", "x+lElevation", "y+lm"])
fig.show()
# -
# # additional maps
#
# 1. You can use the same strategy as above to make a 3D map of the Zhurong landing and exploration area
# 2. Note that in this case you should use the MOLA dataset with the highest resolution.
# 3. Test different color palettes to see the result, and don't forget to manipulate perspective and shading accordingly.
#
# We hope you enjoyed it.
| book/mars_maps_extended.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 50-audio-training
# > Starting to use audio for training
#
# In this notebook, we use a few custom labeled transcripts (see [Issue #49](https://github.com/vanderbilt-data-science/wise/issues/49) for details) to extract subsegments of the audio files which correspond to the labels. For this reason, we can now use this to directly train the head of a Wav2Vec2 Sequence Classification model. We'll look into subsetting the data reliably and training the models below.
# +
#all_no_test
#default_exp audio_modeling
# +
#export
#modeling imports
from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification, pipeline, TrainingArguments, Trainer
from datasets import load_metric
import torch
import soundfile as sf
import torch
import librosa
#ds imports
import pandas as pd
import numpy as np
#python imports
import os.path
import glob
import re
import warnings
# -
# # Organize data
# First, we need to have the data in some sort of reasonable form. We'll make some functions here that can help us out with this.
#file constants
base_prefix = '/data/p_dsi/wise/data/'
sample_csv_dir = base_prefix + 'test_files/'
audio_dir = base_prefix + 'resampled_audio_16khz/'
test_audio_id = '055-1'
sampling_rate = 16000
# ## Read in sampled csv
# Currently, we're just going to take a look at a few files that have been hand-labeled with timestamps provided. Let's check out just one to start out with.
available_csvs = glob.glob(sample_csv_dir + '*.csv')
len(available_csvs)
# +
#print some info
print('Using file:', available_csvs[0])
#read dataframe and preview
ts_df = pd.read_csv(available_csvs[0])
display(ts_df.head())
ts_df.shape
# -
# Things are looking as expected here. We can clearly see that we'll have to do some work on the timestamp to get it into a sampling index.
# ## Conversion of timestamp to sampling index
# Here, we'll make some functions to help with the generation of the sampling index.
#export
def timestamp2index(ts, round_type = 'ceil', sampling_rate=16000):
'''
Function timestamp2index: converts a timestamp with format dd:dd.ddd to an index given the sampling rate
ts: string of timestamp in
round_type (default 'ceil'): string of rounding to perform; can be 'ceil' or 'floor'
sampling_rate (default 16000): integer of the sampling rate (in Hz) of the audio
Returns: integer of index of converted timestamp or None if formatted incorrectly
'''
#define regex
ts_pat = re.compile('(\d{1,2}):(\d{1,2}).(\d{1,3})')
#get the match
ts_match = ts_pat.match(ts)
#throw a warning if you have issues
if ts_match is None:
warnings.warn('There is an issue with value: {0} and it could not be converted.'.format(ts))
return None
#convert to full time (note that ljust zero pads on the right)
ts_seconds = 60*int(ts_match.group(1)) + int(ts_match.group(2)) + int(ts_match.group(3).ljust(3,'0'))/1000
#identify rounding type
round_func = np.ceil
if round_type == 'floor':
round_func = np.floor
#create index and apply rounding
ts_ind = int(round_func(ts_seconds * sampling_rate))
return ts_ind
#A few unit tests
ts_utests = ['00:00.000',
'01:00.000',
'00:01.000',
'00:00.500',
'01:01.50']
[print('Timestamp:', uts, 'Index:', timestamp2index(uts)) for uts in ts_utests];
# Fantastic. This appears to work correctly. Let's add this onto the data, then.
ts_df['start_index'] = ts_df['start_timestamp'].apply(lambda x: timestamp2index(x, round_type='floor'))
ts_df['end_index'] = ts_df['end_timestamp'].apply(lambda x: timestamp2index(x, round_type='ceil'))
ts_df.head(3)
# Fantastic. It looks like things are looking good in terms of reading the data.
# ## Adding on integer label
# We also need to have an integer label in the dataset. Let's make and add that here.
# +
#Create dictionary
label_dict = {0:"PRS", 1:"OTR", 2:"NEU", 3:"REP"}
#Invert original
rev_label_dict = {value:key for key, value in label_dict.items()}
rev_label_dict
#Substitute in dataframe
ts_df['i_label'] = ts_df['label'].replace(rev_label_dict)
# -
# # Preparing Inputs to Model
# Here, we'll use the facebook wav2vec2 models, but we need to do some prep on the inputs to make sure things will go well. Let's check it out.
# ## Split the data
# We're going to choose to just randomly split the data willy nilly. Let's check this out.
# +
#randomly permute
arr_df = ts_df.sample(frac=1, random_state=2021)
#assign split based on physical location after reordering
arr_df = arr_df.reset_index()
arr_df = arr_df.rename(columns={'index':'true_order'})
arr_df['split'] = (arr_df.index>np.ceil(len(arr_df)*0.8)).astype(int)
arr_df.head(3)
# -
# ## Pre-process audio data
#read audio data
class_audio, class_sr = sf.read(audio_dir + test_audio_id + '.wav')
#get subsets of audio as a list
audio_clips_train = [class_audio[start:end] for start, end in arr_df.query('split==0')[['start_index', 'end_index']].values]
audio_clips_test = [class_audio[start:end] for start, end in arr_df.query('split==1')[['start_index', 'end_index']].values]
#this looks about right
print(len(audio_clips_train))
print(len(audio_clips_train[0]))
print(len(audio_clips_test))
print(len(audio_clips_test[0]))
#load processor
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
#process inputs appropriately
train_inputs = processor(audio_clips_train, return_tensors="pt", padding="longest", sampling_rate=sampling_rate)
test_inputs = processor(audio_clips_test, return_tensors="pt", padding="longest", sampling_rate=sampling_rate)
# # Train the model
# Now, we have all of our inputs ready, let's try to train this model!
#helpers for class size and class names
no_classes = len(label_dict)
# +
#Create custom Datasets Class
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
#Create datasets from encodings
train_dataset = CustomDataset(train_inputs, arr_df.query('split==0')['i_label'].tolist())
val_dataset = CustomDataset(test_inputs, arr_df.query('split==1')['i_label'].tolist())
# -
# ## Create model for task
model = Wav2Vec2ForSequenceClassification.from_pretrained("facebook/wav2vec2-base-960h", num_labels=no_classes, id2label=label_dict)
# We see the error above and we're happy to see it. This means that we've added the "Sequence Classification" part onto the base and it realizes that the assigned weights are meaningless. Perfect!
# ## Setup and model training
# +
#set parameters around training
training_args = TrainingArguments("test_trainer",
num_train_epochs = 3,
logging_strategy='epoch',
evaluation_strategy='epoch',
per_device_train_batch_size=3,
per_device_eval_batch_size=3,
report_to='all'
)
#define the metric; we use accuracy here but we shouldn't
metric = load_metric("accuracy")
#function to calculate metrics
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
# -
#train the model
trainer = Trainer(
model=model,
args=training_args,
tokenizer=processor,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics
)
trainer.train()
trainer.evaluate(train_dataset)
# Well! This is pretty exciting! We can train the model, which is great! The performance, on the other hand, is terrible. There are many ways I think this can be remedied, the first of which would be running more epochs. We'll take a look!
| 50-audio-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import requests
import csv
import pandas as pd
import os
import matplotlib.pylab as plt
# %matplotlib inline
import boto3
from boto3.s3.transfer import S3Transfer
import sys
def readFile():
homepath = os.path.expanduser('~')
indicator_data = pd.read_csv('./Data/TimeSeries/Indicators_TimeSeries_Combined.csv', \
low_memory=False)
return indicator_data
# # Handling Missing values for Argentina
def argentina():
indicator_data = readFile()
argentina_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind1['Value'] = argentina_df_ind1['Value'].interpolate()
argentina_df_ind1['Value'] = argentina_df_ind1['Value'].fillna(method='bfill', axis = 0)
argentina_df_ind2['Value'] = argentina_df_ind2['Value'].fillna(method='bfill', axis = 0)
argentina_df_ind5['Value'] = argentina_df_ind5['Value'].interpolate()
argentina_df_ind6['Value'] = argentina_df_ind6['Value'].interpolate()
# Combining all the Argentina Dataframes
Argentina_df = pd.concat([argentina_df_ind1, argentina_df_ind2, argentina_df_ind3, argentina_df_ind4, argentina_df_ind5,\
argentina_df_ind6, argentina_df_ind7, argentina_df_ind8, argentina_df_ind9])
print('Timeseries Wrangling completed for Argentina!!', '\n')
return Argentina_df
# # Handling Missing values for Brazil
def brazil():
indicator_data = readFile()
brazil_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) &\
(indicator_data['CountryCode'] == 'BR')]
brazil_df_ind1['Value'] = brazil_df_ind1['Value'].interpolate()
brazil_df_ind1['Value'] = brazil_df_ind1['Value'].fillna(method='bfill', axis = 0)
brazil_df_ind2['Value'] = brazil_df_ind2['Value'].fillna(method='bfill', axis = 0)
brazil_df_ind6['Value'] = brazil_df_ind6['Value'].interpolate()
# Combining all the Brazil Dataframes
Brazil_df = pd.concat([brazil_df_ind1, brazil_df_ind2, brazil_df_ind3, brazil_df_ind4, brazil_df_ind5,\
brazil_df_ind6, brazil_df_ind7, brazil_df_ind8, brazil_df_ind9])
print('Timeseries Wrangling completed for Brazil!!', '\n')
return Brazil_df
# # Handling Missing values for Ecuador
def ecuador():
indicator_data = readFile()
Ecuador_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'EC')]
Ecuador_df_ind1['Value'] = Ecuador_df_ind1['Value'].interpolate()
Ecuador_df_ind1['Value'] = Ecuador_df_ind1['Value'].fillna(method='bfill', axis = 0)
Ecuador_df_ind2['Value'] = Ecuador_df_ind2['Value'].fillna(method='bfill', axis = 0)
Ecuador_df_ind6['Value'] = Ecuador_df_ind6['Value'].interpolate()
# Combining all the Ecuador Dataframes
Ecuador_df = pd.concat([Ecuador_df_ind1, Ecuador_df_ind2, Ecuador_df_ind3, Ecuador_df_ind4, Ecuador_df_ind5,\
Ecuador_df_ind6, Ecuador_df_ind7, Ecuador_df_ind8, Ecuador_df_ind9])
print('Timeseries Wrangling completed for Ecuador!!', '\n')
return Ecuador_df
# # Handling Missing values for India
def india():
indicator_data = readFile()
India_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'IN')]
India_df_ind1['Value'] = India_df_ind1['Value'].interpolate()
India_df_ind1['Value'] = India_df_ind1['Value'].fillna(method='bfill', axis = 0)
India_df_ind2['Value'] = India_df_ind2['Value'].fillna(method='bfill', axis = 0)
India_df_ind6['Value'] = India_df_ind6['Value'].interpolate()
# Combining all the India Dataframes
India_df = pd.concat([India_df_ind1, India_df_ind2, India_df_ind3, India_df_ind4, India_df_ind5,\
India_df_ind6, India_df_ind7, India_df_ind8, India_df_ind9])
print('Timeseries Wrangling completed for India!!', '\n')
return India_df
# # Handling Missing values for Libya
def libya():
indicator_data = readFile()
Libya_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'LY')]
Libya_df_ind1['Value'] = Libya_df_ind1['Value'].interpolate()
Libya_df_ind1['Value'] = Libya_df_ind1['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind2['Value'] = Libya_df_ind2['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind4['Value'] = Libya_df_ind4['Value'].interpolate()
Libya_df_ind4['Value'] = Libya_df_ind4['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind5['Value'] = Libya_df_ind5['Value'].interpolate()
Libya_df_ind5['Value'] = Libya_df_ind5['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind6['Value'] = Libya_df_ind6['Value'].interpolate()
Libya_df_ind6['Value'] = Libya_df_ind6['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind8['Value'] = Libya_df_ind8['Value'].fillna(method='bfill', axis = 0)
Libya_df_ind9['Value'] = Libya_df_ind9['Value'].interpolate()
Libya_df_ind9['Value'] = Libya_df_ind9['Value'].fillna(method='bfill', axis = 0)
# Combining all the Libya Dataframes
Libya_df = pd.concat([Libya_df_ind1, Libya_df_ind2, Libya_df_ind3, Libya_df_ind4, Libya_df_ind5,\
Libya_df_ind6, Libya_df_ind7, Libya_df_ind8, Libya_df_ind9])
print('Timeseries Wrangling completed for Libya!!', '\n')
return Libya_df
# # Handling Missing values for South Africa
def south_Africa():
indicator_data = readFile()
South_Africa_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & \
(indicator_data['CountryCode'] == 'ZA')]
South_Africa_df_ind1['Value'] = South_Africa_df_ind1['Value'].interpolate()
South_Africa_df_ind1['Value'] = South_Africa_df_ind1['Value'].fillna(method='bfill', axis = 0)
South_Africa_df_ind2['Value'] = South_Africa_df_ind2['Value'].fillna(method='bfill', axis = 0)
South_Africa_df_ind6['Value'] = South_Africa_df_ind6['Value'].interpolate()
# Combining all the South_Africa Dataframes
South_Africa_df = pd.concat([South_Africa_df_ind1, South_Africa_df_ind2, South_Africa_df_ind3, South_Africa_df_ind4,\
South_Africa_df_ind5, South_Africa_df_ind6, South_Africa_df_ind7, South_Africa_df_ind8, \
South_Africa_df_ind9])
print('Timeseries Wrangling completed for South Africa!!', '\n')
return South_Africa_df
def writeFile():
Argentina_df = argentina()
Brazil_df = brazil()
Ecuador_df = ecuador()
India_df = india()
Libya_df = libya()
South_Africa_df = south_Africa()
# Combining all countries DataFrame
final_df = pd.concat([Argentina_df, Brazil_df, Ecuador_df, India_df,\
Libya_df, South_Africa_df])
actual_filename = './Data/TimeSeries/Indicators_TimeSeries_Cleaned.csv'
final_df.to_csv(actual_filename, index=False)
print('Timeseries Wrangling completed and file created!!', '\n')
def fileUploadToS3(AWS_ACCESS_KEY, AWS_SECRET_KEY):
conn = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY)
transfer = S3Transfer(conn)
response = conn.list_buckets()
existent = []
for bucket in response["Buckets"]:
existent.append(bucket['Name'])
bucket_name = 'worlddevelopmentindicators'
target_dir = './Data/TimeSeries/'
filenames = []
file_list = os.listdir(target_dir)
for file in file_list:
if '_Cleaned' in file:
filenames.append(file)
if bucket_name in existent:
print('Bucket already exists!!', '\n')
print('TimeSeries Cleaned File upload started to s3!!!!!', '\n')
for files in filenames:
upload_filename = 'TimeSeries/'+files
transfer.upload_file(os.path.join(target_dir, files), bucket_name, upload_filename)
print('TimeSeries CLeaned File uploaded to s3!!!!!','\n')
else:
print('Bucket not present. Created bucket!!', '\n')
conn.create_bucket(Bucket=bucket_name, ACL='public-read-write')
print('TimeSeries CLeaned File upload started to s3!!!!!', '\n')
for files in filenames:
upload_filename = 'TimeSeries/'+files
transfer.upload_file(os.path.join(target_dir, files), bucket_name, upload_filename)
print('TimeSeries Cleaned File uploaded to s3!!!!!','\n')
def main():
user_input = sys.argv[1:]
print("----Process Started----")
counter = 0
if len(user_input) == 0:
print('No Input provided. Process is exiting!!')
exit(0)
for ip in user_input:
if counter == 0:
AWS_ACCESS_KEY = str(ip)
else:
AWS_SECRET_KEY = str(ip)
counter += 1
readFile()
writeFile()
fileUploadToS3('AKIAJEYOARR4SIMN7MIQ', '<KEY>')
print('Timeseries Wrangling Process completed!!','\n')
if __name__ == '__main__':
main()
| Development Indicators Project/Docker/TimeSeries_Wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# ### ODPi Egeria Hands-On Lab
# # Welcome to the Understanding Platform Services Lab
# ## Introduction
#
# ODPi Egeria is an open source project that provides open standards and implementation libraries to connect tools,
# catalogues and platforms together so they can share information about data and technology (called metadata).
#
# The ODPi Egeria platform services provide APIs for understanding the operation of an OMAG Server Platform.
# This hands-on lab steps through each of the platform services operations, providing a explaination and the code to call it.
# ## The Scenario
#
# <NAME> is the IT Infrastructure leader at Coco Pharmaceuticals. He has set up a number of OMAG Server Platforms and
# is validating they are operating correctly.
#
# 
#
# In this hands-on lab Gary is issuing queries to the platform services. Gary's userId is `garygeeke`.
# +
import requests
adminUserId = "garygeeke"
# -
# In the **Metadata Server Configuration**, gary configured servers for the OMAG Server Platforms shown in Figure 1:
#
# 
# > **Figure 1:** Coco Pharmaceuticals' OMAG Server Platforms
#
# Below are the host name and port number where the core, data lake and development platforms will run.
# +
import os
corePlatformURL = os.environ.get('corePlatformURL','http://localhost:8080')
dataLakePlatformURL = os.environ.get('dataLakePlatformURL','http://localhost:8081')
devPlatformURL = os.environ.get('devPlatformURL','http://localhost:8082')
# -
# The `platformURLroot` is the platform that will be called in this lab. You can change this value to call other platforms.
platformURLroot = corePlatformURL
# All of the platform services begin with the following URL root:
platformServicesURLRoot = platformURLroot + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform"
# Whichever platform you choose, make sure it is running as you begin the exercises.
# ## Exercise 1 - Verifying the platform origin
#
# The OMAG Server Platform can return information about its origin and version using the following command.
# +
print (" ")
print ("Retrieving the OMAG Server Platform origin ...")
url = platformServicesURLRoot + '/origin'
print ("GET " + url)
response = requests.get(url)
print ("Response: ")
print (response.text)
print (" ")
# -
# ## Exercise 2 - Understanding the registered services
#
# The OMAG Server Platform provides implementations of the open metadata and governance services plus additional services to support them.
#
# The following command returns a list of the services that can be enabled in any type of server:
# +
import pprint
import json
print (" ")
print ("Retrieving the registered common services ...")
url = platformServicesURLRoot + '/registered-services/common-services'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
# ----
# This next command returns the Open Metadata Access Services (OMASs). These services run in an open metadata server and provide specialized services for tools, platforms and engines.
# +
print (" ")
print ("Retrieving the registered access services ...")
url = platformServicesURLRoot + '/registered-services/access-services'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
# ----
# These are the services that are used in the governance servers.
# +
print (" ")
print ("Retrieving the registered governance services ...")
url = platformServicesURLRoot + '/registered-services/governance-services'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
# ----
# Finally it is possible to query all of the services together:
# +
print (" ")
print ("Retrieving all of the registered services ...")
url = platformServicesURLRoot + '/registered-services'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
# ## Exercise 2 - Querying servers on the platform
#
# The OMAG Server Platform keeps track of the servers it hosts. A **known server** is one that has run on the platform since the platform was started. An **active server** is one that is currently running.
#
# Try the following commands on a platform that has been running a variety of servers since it was started.
# +
print (" ")
print ("Querying the known servers ...")
url = platformServicesURLRoot + '/servers'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
print (" ")
print ("Querying the active servers ...")
url = platformServicesURLRoot + '/servers/active'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
# ----
# It is also possible to find out more information about an individual server.
# +
serverName = "cocoMDS2"
print (" ")
print ("Querying if a server is known ...")
url = platformServicesURLRoot + '/servers/' + serverName + '/is-known'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
print (" ")
print ("Querying the status of a server ...")
url = platformServicesURLRoot + '/servers/' + serverName + '/status'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
print (" ")
print ("Querying the services that are active a server ...")
url = platformServicesURLRoot + '/servers/' + serverName + '/services'
print ("GET " + url)
response = requests.get(url)
prettyResponse = json.dumps(response.json(), indent=4)
print ("Response: ")
print (prettyResponse)
print (" ")
# -
| open-metadata-resources/open-metadata-labs/administration/understanding-platform-services.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import chardet
import pandas as pd
import numpy as np
import pickle
import os
import json
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
# ### GET DATA
# +
# look at the first ten thousand bytes to guess the character encoding
with open('./all-data.csv', 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
# check what the character encoding might be
print(result)
# -
df = pd.read_csv('./all-data.csv', header=None, sep=",", encoding='ISO-8859-1')
df.head()
X, y_ = df[1].to_numpy(), df[0].to_numpy()
X.shape
np.unique(y_)
y = pd.get_dummies(y_)
print(y)
y = y.to_numpy()
# ### ESTABLISHING A BASELINE
# ### Most frequent class in y
"Baseline for random guessing = %.2f%%" % (np.unique(y_, return_counts=True)[1].max() / len(y)*100)
# ## Using already existing sentiment regressor from Algorithmia
# #### Get a sentiment [-1, +1] from Algorithmia and use a decision tree for classification then
# https://algorithmia.com/algorithms/nlp/SentimentAnalysis
# +
# import Algorithmia
# input = {
# "document": "I really like Algorithmia!"
# }
# client = Algorithmia.client('sim1DmSFr2RBDovXpR+AfDip0iW1')
# algo = client.algo('nlp/SentimentAnalysis/1.0.5')
# algo.set_options(timeout=300) # optional
# print(algo.pipe(input).result)
# -
def get_sentiment(X):
print("API CALLING ...")
resp = algo.pipe([{"document": x, "language": "auto"} for x in X]).result
return resp
# +
test_len = len(X) # how many samples should be analyzed
algorithmia_results_loc = f"intermediate_results/algorithmia_{test_len:04d}.pkl"
if os.path.exists(algorithmia_results_loc):
with open(algorithmia_results_loc, "rb") as f:
algorithmia_results = pickle.load(f)
print("LOADED RESULTS")
else:
algorithmia_results = get_sentiment(X[:test_len])
with open(algorithmia_results_loc, "wb") as f:
print("SAVING RESULTS ...", end="\r")
pickle.dump(algorithmia_results, f)
print("SAVED RESULTS \t\t\t")
# -
# ##### Check that all docs are assigned the right sentiment
assert np.mean([algorithmia_results[i]['document'] == x for i, x in enumerate(X[:test_len])]) == 1
# #### Run logRegression now
X_algorithmia = np.array([e['sentiment'] for e in algorithmia_results]).reshape(-1,1)
y_algorithmia = np.argmax(y[:test_len],axis=1)
X_algorithmia_train, X_algorithmia_test, y_algorithmia_train, y_algorithmia_test = train_test_split(X_algorithmia, y_algorithmia, test_size=0.33, random_state=42)
model = DecisionTreeClassifier()
model.fit(X_algorithmia_train,y_algorithmia_train)
"Baseline for classifying Algorithmia sentiment = %.2f%%" % (model.score(X_algorithmia_test,y_algorithmia_test)*100)
np.unique(model.predict(X_algorithmia), return_counts=True), np.unique(y_algorithmia, return_counts=True)
# Can somehow distinguish between positive and negative, but not very good
plt.figure(figsize=(15,10))
plt.hist(X_algorithmia[y_algorithmia==1], color="gray", alpha=0.5,label="neutral", bins=30)
plt.hist(X_algorithmia[y_algorithmia==0], color="red", alpha=0.5,label="negative", bins=30)
plt.hist(X_algorithmia[y_algorithmia==2], color="green", alpha=0.5,label="positive", bins=30)
plt.legend()
plt.show()
| Track-1-financial-messages-sentiments/Algorithmia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Variable magnitude
#
# ### Does the magnitude of the variable matter?
#
# In Linear Regression models, the scale of variables used to estimate the output matters. Linear models are of the type **y = w x + b**, where the regression coefficient w represents the expected change in y for a one unit change in x (the predictor). Thus, the magnitude of w is partly determined by the magnitude of the units being used for x. If x is a distance variable, just changing the scale from kilometers to miles will cause a change in the magnitude of the coefficient.
#
# In addition, in situations where we estimate the outcome y by contemplating multiple predictors x1, x2, ...xn, predictors with greater numeric ranges dominate over those with smaller numeric ranges.
#
# Gradient descent converges faster when all the predictors (x1 to xn) are within a similar scale, therefore having features in a similar scale is useful for Neural Networks as well as.
#
# In Support Vector Machines, feature scaling can decrease the time to find the support vectors.
#
# Finally, methods using Euclidean distances or distances in general are also affected by the magnitude of the features, as Euclidean distance is sensitive to variations in the magnitude or scales of the predictors. Therefore feature scaling is required for methods that utilise distance calculations like k-nearest neighbours (KNN) and k-means clustering.
#
# In summary:
#
# #### Magnitude matters because:
#
# - The regression coefficient is directly influenced by the scale of the variable
# - Variables with bigger magnitude / value range dominate over the ones with smaller magnitude / value range
# - Gradient descent converges faster when features are on similar scales
# - Feature scaling helps decrease the time to find support vectors for SVMs
# - Euclidean distances are sensitive to feature magnitude.
#
# #### The machine learning models affected by the magnitude of the feature are:
#
# - Linear and Logistic Regression
# - Neural Networks
# - Support Vector Machines
# - KNN
# - K-means clustering
# - Linear Discriminant Analysis (LDA)
# - Principal Component Analysis (PCA)
#
# #### Machine learning models insensitive to feature magnitude are the ones based on Trees:
#
# - Classification and Regression Trees
# - Random Forests
# - Gradient Boosted Trees
# ===================================================================================================
#
# ## In this Demo
#
# We will study the effect of feature magnitude on the performance of different machine learning algorithms.
#
# We will use the Titanic dataset.
#
# - To download the dataset please refer to the **Datasets** lecture in **Section 1** of the course.
# +
import pandas as pd
import numpy as np
# import several machine learning algorithms
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
# to scale the features
from sklearn.preprocessing import MinMaxScaler
# to evaluate performance and separate into
# train and test set
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
# -
# ### Load data with numerical variables only
# +
# load numerical variables of the Titanic Dataset
data = pd.read_csv('../titanic.csv',
usecols=['pclass', 'age', 'fare', 'survived'])
data.head()
# +
# let's have a look at the values of those variables
# to get an idea of the feature magnitudes
data.describe()
# -
# We can see that Fare varies between 0 and 512, Age between 0 and 80, and Class between 0 and 3. So the variables have different magnitude.
# +
# let's now calculate the range
for col in ['pclass', 'age', 'fare']:
print(col, 'range: ', data[col].max() - data[col].min())
# -
# The range of values that each variable can take are quite different.
# +
# let's separate into training and testing set
# the titanic dataset contains missing information
# so for this demo, I will fill those in with 0s
X_train, X_test, y_train, y_test = train_test_split(
data[['pclass', 'age', 'fare']].fillna(0),
data.survived,
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# -
# ### Feature Scaling
# For this demonstration, I will scale the features between 0 and 1, using the MinMaxScaler from scikit-learn. To learn more about this scaling visit the Scikit-Learn [website](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)
#
# The transformation is given by:
#
# X_rescaled = X - X.min() / (X.max - X.min()
#
# And to transform the re-scaled features back to their original magnitude:
#
# X = X_rescaled * (max - min) + min
#
# **There is a dedicated section to feature scaling later in the course, where I will explain this and other scaling techniques in more detail**. For now, let's carry on with the demonstration.
# +
# scale the features between 0 and 1.
# cal the scaler
scaler = MinMaxScaler()
# fit the scaler
scaler.fit(X_train)
# re scale the datasets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# +
#let's have a look at the scaled training dataset
print('Mean: ', X_train_scaled.mean(axis=0))
print('Standard Deviation: ', X_train_scaled.std(axis=0))
print('Minimum value: ', X_train_scaled.min(axis=0))
print('Maximum value: ', X_train_scaled.max(axis=0))
# -
# Now, the maximum values for all the features is 1, and the minimum value is zero, as expected. So they are in a more similar scale.
# ### Logistic Regression
#
# Let's evaluate the effect of feature scaling in a Logistic Regression.
# +
# model build on unscaled variables
# call the model
logit = LogisticRegression(
random_state=44,
C=1000, # c big to avoid regularization
solver='lbfgs')
# train the model
logit.fit(X_train, y_train)
# evaluate performance
print('Train set')
pred = logit.predict_proba(X_train)
print('Logistic Regression roc-auc: {}'.format(
roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = logit.predict_proba(X_test)
print('Logistic Regression roc-auc: {}'.format(
roc_auc_score(y_test, pred[:, 1])))
# -
# let's look at the coefficients
logit.coef_
# +
# model built on scaled variables
# call the model
logit = LogisticRegression(
random_state=44,
C=1000, # c big to avoid regularization
solver='lbfgs')
# train the model using the re-scaled data
logit.fit(X_train_scaled, y_train)
# evaluate performance
print('Train set')
pred = logit.predict_proba(X_train_scaled)
print('Logistic Regression roc-auc: {}'.format(
roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = logit.predict_proba(X_test_scaled)
print('Logistic Regression roc-auc: {}'.format(
roc_auc_score(y_test, pred[:, 1])))
# -
logit.coef_
# We observe that the performance of logistic regression did not change when using the datasets with the features scaled (compare roc-auc values for train and test set for models with and without feature scaling).
#
# However, when looking at the coefficients we do see a big difference in the values. This is because the magnitude of the variable was affecting the coefficients. After scaling, all 3 variables have the relatively the same effect (coefficient) towards survival, whereas before scaling, we would be inclined to think that PClass was driving the Survival outcome.
# ### Support Vector Machines
# +
# model build on unscaled variables
# call the model
SVM_model = SVC(random_state=44, probability=True, gamma='auto')
# train the model
SVM_model.fit(X_train, y_train)
# evaluate performance
print('Train set')
pred = SVM_model.predict_proba(X_train)
print('SVM roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = SVM_model.predict_proba(X_test)
print('SVM roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# +
# model built on scaled variables
# call the model
SVM_model = SVC(random_state=44, probability=True, gamma='auto')
# train the model
SVM_model.fit(X_train_scaled, y_train)
# evaluate performance
print('Train set')
pred = SVM_model.predict_proba(X_train_scaled)
print('SVM roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = SVM_model.predict_proba(X_test_scaled)
print('SVM roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# -
# Feature scaling improved the performance of the support vector machine. After feature scaling the model is no longer over-fitting to the training set (compare the roc-auc of 0.881 for the model on unscaled features vs the roc-auc of 0.68). In addition, the roc-auc for the testing set increased as well (0.66 vs 0.68).
# ### K-Nearest Neighbours
# +
#model built on unscaled features
# call the model
KNN = KNeighborsClassifier(n_neighbors=5)
# train the model
KNN.fit(X_train, y_train)
# evaluate performance
print('Train set')
pred = KNN.predict_proba(X_train)
print('KNN roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = KNN.predict_proba(X_test)
print('KNN roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# +
# model built on scaled
# call the model
KNN = KNeighborsClassifier(n_neighbors=5)
# train the model
KNN.fit(X_train_scaled, y_train)
# evaluate performance
print('Train set')
pred = KNN.predict_proba(X_train_scaled)
print('KNN roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = KNN.predict_proba(X_test_scaled)
print('KNN roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# -
# We observe for KNN as well that feature scaling improved the performance of the model. The model built on unscaled features shows a better generalisation, with a higher roc-auc for the testing set (0.72 vs 0.69 for model built on unscaled features).
#
# Both KNN methods are over-fitting to the train set. Thus, we would need to change the parameters of the model or use less features to try and decrease over-fitting, which exceeds the purpose of this demonstration.
# ### Random Forests
# +
# model built on unscaled features
# call the model
rf = RandomForestClassifier(n_estimators=200, random_state=39)
# train the model
rf.fit(X_train, y_train)
# evaluate performance
print('Train set')
pred = rf.predict_proba(X_train)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = rf.predict_proba(X_test)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# +
# model built in scaled features
# call the model
rf = RandomForestClassifier(n_estimators=200, random_state=39)
# train the model
rf.fit(X_train_scaled, y_train)
# evaluate performance
print('Train set')
pred = rf.predict_proba(X_train_scaled)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = rf.predict_proba(X_test_scaled)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# -
# As expected, Random Forests shows no change in performance regardless of whether it is trained on a dataset with scaled or unscaled features. This model in particular, is over-fitting to the training set. So we need to do some work to remove the over-fitting. That exceeds the scope of this demonstration.
# +
# train adaboost on non-scaled features
# call the model
ada = AdaBoostClassifier(n_estimators=200, random_state=44)
# train the model
ada.fit(X_train, y_train)
# evaluate model performance
print('Train set')
pred = ada.predict_proba(X_train)
print('AdaBoost roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = ada.predict_proba(X_test)
print('AdaBoost roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# +
# train adaboost on scaled features
# call the model
ada = AdaBoostClassifier(n_estimators=200, random_state=44)
# train the model
ada.fit(X_train_scaled, y_train)
# evaluate model performance
print('Train set')
pred = ada.predict_proba(X_train_scaled)
print('AdaBoost roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = ada.predict_proba(X_test_scaled)
print('AdaBoost roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# -
# As expected, AdaBoost shows no change in performance regardless of whether it is trained on a dataset with scaled or unscaled features
# **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| FeatureEngineering/Section-03-Variable-Characteristics/03.7-Variable-magnitude.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimizing Code: Common Books
# Here's the code your coworker wrote to find the common book ids in `books_published_last_two_years.txt` and `all_coding_books.txt` to obtain a list of recent coding books.
import time
import pandas as pd
import numpy as np
# +
with open('books_published_last_two_years.txt') as f:
recent_books = f.read().split('\n')
with open('all_coding_books.txt') as f:
coding_books = f.read().split('\n')
# +
start = time.time()
recent_coding_books = []
for book in recent_books:
if book in coding_books:
recent_coding_books.append(book)
print(len(recent_coding_books))
print('Duration: {} seconds'.format(time.time() - start))
# -
# ### Tip #1: Use vector operations over loops when possible
#
# Use numpy's `intersect1d` method to get the intersection of the `recent_books` and `coding_books` arrays.
start = time.time()
recent_coding_books = np.intersect1d(recent_books, coding_books)
print(len(recent_coding_books))
print('Duration: {} seconds'.format(time.time() - start))
# ### Tip #2: Know your data structures and which methods are faster
# Use the set's `intersection` method to get the common elements in `recent_books` and `coding_books`.
start = time.time()
recent_coding_books = set(recent_books).intersection(coding_books)
print(len(recent_coding_books))
print('Duration: {} seconds'.format(time.time() - start))
| Software Engineering Practices Part I/optimizing_code_common_books.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Crypto Arbitrage
#
# In this Challenge, you'll take on the role of an analyst at a high-tech investment firm. The vice president (VP) of your department is considering arbitrage opportunities in Bitcoin and other cryptocurrencies. As Bitcoin trades on markets across the globe, can you capitalize on simultaneous price dislocations in those markets by using the powers of Pandas?
#
# For this assignment, you’ll sort through historical trade data for Bitcoin on two exchanges: Bitstamp and Coinbase. Your task is to apply the three phases of financial analysis to determine if any arbitrage opportunities exist for Bitcoin.
#
# This aspect of the Challenge will consist of 3 phases.
#
# 1. Collect the data.
#
# 2. Prepare the data.
#
# 3. Analyze the data.
#
#
# ### Import the required libraries and dependencies.
import pandas as pd
from pathlib import Path
# %matplotlib inline
# ## Collect the Data
#
# To collect the data that you’ll need, complete the following steps:
#
# Instructions.
#
# 1. Using the Pandas `read_csv` function and the `Path` module, import the data from `bitstamp.csv` file, and create a DataFrame called `bitstamp`. Set the DatetimeIndex as the Timestamp column, and be sure to parse and format the dates.
#
# 2. Use the `head` (and/or the `tail`) function to confirm that Pandas properly imported the data.
#
# 3. Repeat Steps 1 and 2 for `coinbase.csv` file.
# ### Step 1: Using the Pandas `read_csv` function and the `Path` module, import the data from `bitstamp.csv` file, and create a DataFrame called `bitstamp`. Set the DatetimeIndex as the Timestamp column, and be sure to parse and format the dates.
# +
#EH: Define function to read csv and import as DataFrame
def dataframe_import(csvfile):
data= pd.read_csv(
Path(csvfile),
index_col="Timestamp",
parse_dates=True,
infer_datetime_format=True
)
return data
# +
# Read in the CSV file called "bitstamp.csv" using the Path module.
# The CSV file is located in the Resources folder.
# Set the index to the column "Date"
# Set the parse_dates and infer_datetime_format parameters
# bitstamp_csv=Path("Resources/bitstamp.csv")
#EH: import bitstamp from csv to Pandas DataFrame
bitstamp= dataframe_import("Resources/bitstamp.csv")
# -
# ### Step 2: Use the `head` (and/or the `tail`) function to confirm that Pandas properly imported the data.
# Use the head (and/or tail) function to confirm that the data was imported properly.
#EH: Display first and last 5 rows to confirm proper import.
display(bitstamp.head(), bitstamp.tail())
# ### Step 3: Repeat Steps 1 and 2 for `coinbase.csv` file.
# +
# Read in the CSV file called "coinbase.csv" using the Path module.
# The CSV file is located in the Resources folder.
# Set the index to the column "Timestamp"
# Set the parse_dates and infer_datetime_format parameters
#EH: import coinbase from csv to Pandas DataFrame via function-'dataframe_import'.
coinbase = dataframe_import("Resources/coinbase.csv")
# -
# Use the head (and/or tail) function to confirm that the data was imported properly.
#EH: Display first and last 5 rows to confirm proper import.
display(coinbase.head(),coinbase.tail())
# ## Prepare the Data
#
# To prepare and clean your data for analysis, complete the following steps:
#
# 1. For the bitstamp DataFrame, replace or drop all `NaN`, or missing, values in the DataFrame.
#
# 2. Use the `str.replace` function to remove the dollar signs ($) from the values in the Close column.
#
# 3. Convert the data type of the Close column to a `float`.
#
# 4. Review the data for duplicated values, and drop them if necessary.
#
# 5. Repeat Steps 1–4 for the coinbase DataFrame.
# ### Step 1: For the bitstamp DataFrame, replace or drop all `NaN`, or missing, values in the DataFrame.
# +
# For the bitstamp DataFrame, replace or drop all NaNs or missing values in the DataFrame
#EH: Drop NaN or missing value from bitstamp via function-'clean_data'
bitstamp=bitstamp.dropna()
#check any NaN or missing values
bitstamp.isnull().sum()
# -
#EH: check datatype of bitstamp dataframe
bitstamp.info()
# ### Step 2: Use the `str.replace` function to remove the dollar signs ($) from the values in the Close column.
#EH: Define function to remove $ and update data type as float in Close column, then confirm the datatype of Close
def update_dtype(data):
#EH: replace $ with blank
data['Close']=data['Close'].str.replace("$","")
#EH: replace Close data type to float
data['Close']=data['Close'].astype('float')
return data['Close'].dtype
# +
# Use the str.replace function to remove the dollar sign, $
#EH: update bitstamp close value to dataype-float
update_dtype(bitstamp)
# -
# ### Step 3: Convert the data type of the Close column to a `float`.
#see above cell
# ### Step 4: Review the data for duplicated values, and drop them if necessary.
# +
# Review the data for duplicate values, and drop them if necessary
#EH: Check any duplicates data in bitstamp dataframe
bitstamp.duplicated().sum()
# -
# ### Step 5: Repeat Steps 1–4 for the coinbase DataFrame.
# +
# Repeat Steps 1–4 for the coinbase DataFrame
#EH: For coinbase DataFrame, Drop not a value or missing value.
coinbase=coinbase.dropna()
#check any NaN or missing values
coinbase.isnull().sum()
# -
#EH: For coinbase DataFrame, check datatype of 'Close" column
coinbase.info()
#EH: For coinbase DataFrame, Use the str.replace function to remove the dollar sign, $
#EH: Convert the Close data type to a float
#EH: For coinbase DataFrame, confirm type
update_dtype(coinbase)
#EH: For coinbase DataFrame, Review the data for duplicate values, and drop them if necessary
coinbase.duplicated().sum()
# ### Step 1: Choose columns of data on which to focus your analysis.
#
# Select the data you want to analyze. Use `loc` or `iloc` to select the following columns of data for both the bitstamp and coinbase DataFrames:
#
# * Timestamp (index)
#
# * Close
#
# +
# Use loc or iloc to select `Timestamp (the index)` and `Close` from bitstamp DataFrame
bitstamp_sliced = bitstamp.loc[:,'Close']
# Review the first five rows of the DataFrame
bitstamp_sliced.head()
# +
# Use loc or iloc to select `Timestamp (the index)` and `Close` from coinbase DataFrame
coinbase_sliced = coinbase.iloc[:,3]
# Review the first five rows of the DataFrame
coinbase_sliced.head()
# -
# ### Step 2: Get summary statistics and plot the data.
#
# Sort through the time series data associated with the bitstamp and coinbase DataFrames to identify potential arbitrage opportunities. To do so, complete the following steps:
#
# 1. Generate the summary statistics for each DataFrame by using the `describe` function.
#
# 2. For each DataFrame, create a line plot for the full period of time in the dataset. Be sure to tailor the figure size, title, and color to each visualization.
#
# 3. In one plot, overlay the visualizations that you created in Step 2 for bitstamp and coinbase. Be sure to adjust the legend and title for this new visualization.
#
# 4. Using the `loc` and `plot` functions, plot the price action of the assets on each exchange for different dates and times. Your goal is to evaluate how the spread between the two exchanges changed across the time period that the datasets define. Did the degree of spread change as time progressed?
# +
# Generate the summary statistics for the bitstamp DataFrame
#EH: display bitstamp stats
bitstamp.describe()
# + tags=[]
# Generate the summary statistics for the coinbase DataFrame
#EH: display coinbase stats
coinbase.describe()
# +
# Create a line plot for the bitstamp DataFrame for the full length of time in the dataset
# Be sure that the figure size, title, and color are tailored to each visualization
#EH: visualize bitstamp trend for all period
bitstamp_sliced.plot(figsize=(10,5),title="Bitstamp trend", color="orange",ylabel="USD$")
# -
# Create a line plot for the coinbase DataFrame for the full length of time in the dataset
# Be sure that the figure size, title, and color are tailored to each visualization
#EH: visualize coinbase trend for all period
coinbase_sliced.plot(figsize=(10,5),title="Coinbase trend", color="green",ylabel="USD$")
#EH: Define an overlay plot function with 2 arguments, start and end dates.
def overlay_plot(start,end):
bitstamp_sliced.loc[start:end].plot(legend=True,figsize=(10,5),title=f"Bitstamp vs Coinbase trend {start} - {end}", color="orange",label="Bitstamp",ylabel="USD$")
coinbase_sliced.loc[start:end].plot(legend=True,figsize=(10,5),color="green",label="Coinbase")
# +
# Overlay the visualizations for the bitstamp and coinbase DataFrames in one plot
# The plot should visualize the prices over the full lenth of the dataset
# Be sure to include the parameters: legend, figure size, title, and color and label
#EH: visualize bitstamp and coinbase trend for all period.
overlay_plot("2018-01-01","2018-04-01")
# +
# Using the loc and plot functions, create an overlay plot that visualizes
# the price action of both DataFrames for a one month period early in the dataset
# Be sure to include the parameters: legend, figure size, title, and color and label
#EH: visualize bitstamp and coinbase trend for Jan-2018.
overlay_plot('2018-01-01','2018-01-31')
# +
# Using the loc and plot functions, create an overlay plot that visualizes
# the price action of both DataFrames for a one month period later in the dataset
# Be sure to include the parameters: legend, figure size, title, and color and label
#EH: visualize bitstamp and coinbase trend for Mar-2018.
overlay_plot('2018-03-01','2018-03-31')
# -
# **Question** Based on the visualizations of the different time periods, has the degree of spread change as time progressed?
#
# **Answer** There is slight spread between 1/27/2018-1/29/2018 between bitstamp and coinbase. As time progressed from Jan'18 to Mar'18, there wasn't much degree of spread change at all between bitstamp and coinbase.
# ### Step 3: Focus Your Analysis on Specific Dates
#
# Focus your analysis on specific dates by completing the following steps:
#
# 1. Select three dates to evaluate for arbitrage profitability. Choose one date that’s early in the dataset, one from the middle of the dataset, and one from the later part of the time period.
#
# 2. For each of the three dates, generate the summary statistics and then create a box plot. This big-picture view is meant to help you gain a better understanding of the data before you perform your arbitrage calculations. As you compare the data, what conclusions can you draw?
# +
# Create an overlay plot that visualizes the two dataframes over a period of one day early in the dataset.
# Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label`
#EH: visualize bitstamp and coinbase trend for Jan-4,2018.
overlay_plot('2018-01-04','2018-01-04')
# -
#EH: Define a function for arbitrage_spread between coinbase and bitstamp over specific date/time period.
def arbitrage_spread(period):
print(f"Bitstamp vs Coinbase Arbitrage Spread {period}.")
return coinbase_sliced.loc[period]-bitstamp_sliced.loc[period]
#EH: Define a function to visulize the arbitrage spread between coinbase and bitstamp over specific date/time period
def arbitrage_plot(data):
return data.plot(figsize=(10,5),title="Bitstamp vs Coinbase Arbitrage Spread", color="pink",ylabel="USD$")
# + tags=[]
# Using the early date that you have selected, calculate the arbitrage spread
# by subtracting the bitstamp lower closing prices from the coinbase higher closing prices
#EH: find the arbitrage spread between coinbase and bitstamp on Jan 4-2018
arbitrage_spread_early = arbitrage_spread('2018-01-04')
# Generate summary statistics for the early DataFrame
#EH: generate arbitrate spread stats between coinbase and bitstamp on Jan 4-2018
arbitrage_spread_early.describe()
# +
# Visualize the arbitrage spread from early in the dataset in a box plot
#EH: Visulize arbitrage spread between coinbase and bitstamp for Jan 4-2018
arbitrage_plot(arbitrage_spread_early)
# +
# Create an overlay plot that visualizes the two dataframes over a period of one day from the middle of the dataset.
# Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label`
#EH: visualize bitstamp and coinbase trend for Feb 15,2018.
overlay_plot('2018-02-15','2018-02-15')
# +
# Using the date in the middle that you have selected, calculate the arbitrage spread
# by subtracting the bitstamp lower closing prices from the coinbase higher closing prices
#EH: find the arbitrage spread between coinbase and bitstamp on Feb 15-2018
arbitrage_spread_middle = arbitrage_spread('2018-02-15')
# Generate summary statistics
#EH: generate arbitrate spread stats between coinbase and bitstamp on Feb 15-2018
arbitrage_spread_middle.describe()
# +
# Visualize the arbitrage spread from the middle of the dataset in a box plot
#EH: Visulize arbitrage spread between coinbase and bitstamp for Feb 15-2018
arbitrage_plot(arbitrage_spread_middle)
# +
# Create an overlay plot that visualizes the two dataframes over a period of one day from late in the dataset.
# Be sure that the plots include the parameters `legend`, `figsize`, `title`, `color` and `label`
#EH: visualize bitstamp and coinbase trend for Mar 30,2018.
overlay_plot('2018-03-30','2018-03-30')
# +
# Using the date from the late that you have selected, calculate the arbitrage spread
# by subtracting the bitstamp lower closing prices from the coinbase higher closing prices
#EH: find the arbitrage spread between coinbase and bitstamp on Mar 30,2018
arbitrage_spread_late = arbitrage_spread('2018-03-30')
# Generate summary statistics for the late DataFrame
#EH: generate arbitrate spread stats between coinbase and bitstamp on Mar 30,2018
arbitrage_spread_late.describe()
# +
# Visualize the arbitrage spread from late in the dataset in a box plot
#EH: Visulize arbitrage spread between coinbase and bitstamp on Mar 30,2018
arbitrage_plot(arbitrage_spread_late)
# -
# ### Step 4: Calculate the Arbitrage Profits
#
# Calculate the potential profits for each date that you selected in the previous section. Your goal is to determine whether arbitrage opportunities still exist in the Bitcoin market. Complete the following steps:
#
# 1. For each of the three dates, measure the arbitrage spread between the two exchanges by subtracting the lower-priced exchange from the higher-priced one. Then use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame, where the spread is greater than zero.
#
# 2. For each of the three dates, calculate the spread returns. To do so, divide the instances that have a positive arbitrage spread (that is, a spread greater than zero) by the price of Bitcoin from the exchange you’re buying on (that is, the lower-priced exchange). Review the resulting DataFrame.
#
# 3. For each of the three dates, narrow down your trading opportunities even further. To do so, determine the number of times your trades with positive returns exceed the 1% minimum threshold that you need to cover your costs.
#
# 4. Generate the summary statistics of your spread returns that are greater than 1%. How do the average returns compare among the three dates?
#
# 5. For each of the three dates, calculate the potential profit, in dollars, per trade. To do so, multiply the spread returns that were greater than 1% by the cost of what was purchased. Make sure to drop any missing values from the resulting DataFrame.
#
# 6. Generate the summary statistics, and plot the results for each of the three DataFrames.
#
# 7. Calculate the potential arbitrage profits that you can make on each day. To do so, sum the elements in the profit_per_trade DataFrame.
#
# 8. Using the `cumsum` function, plot the cumulative sum of each of the three DataFrames. Can you identify any patterns or trends in the profits across the three time periods?
#
# (NOTE: The starter code displays only one date. You'll want to do this analysis for two additional dates).
# #### 1. For each of the three dates, measure the arbitrage spread between the two exchanges by subtracting the lower-priced exchange from the higher-priced one. Then use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame, where the spread is greater than zero.
#
# *NOTE*: For illustration, only one of the three dates is shown in the starter code below.
#EH: Define function to measure arbitrage_spread > 0
def arbitrage_measure(period):
arbitrage_measure = coinbase_sliced.loc[period]-bitstamp_sliced.loc[period]
arbitrage_measure = arbitrage_measure[arbitrage_measure>0]
#print header for table
print(f"Positive Bitstamp vs Coinbase Arbitrage Spread for {period}.")
return arbitrage_measure
# +
# For the date early in the dataset, measure the arbitrage spread between the two exchanges
# by subtracting the lower-priced exchange from the higher-priced one
#EH: Measure aribitrage spread on Jan 4-2018 via function 'arbitrage_measure'
arbitrage_spread_early = arbitrage_measure('2018-01-04')
# Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame
#EH: generate stats for aribitrage spread on Jan 4-2018
arbitrage_spread_early.describe()
# +
# For the date middle in the dataset, measure the arbitrage spread between the two exchanges
# by subtracting the lower-priced exchange from the higher-priced one
#EH: Measure aribitrage spread on Feb 15-2018 via function 'arbitrage_measure'
arbitrage_spread_middle = arbitrage_measure('2018-02-15')
# Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame
#EH: generate stats for aribitrage spread on Feb 15-2018
arbitrage_spread_middle.describe()
# +
# For the date late in the dataset, measure the arbitrage spread between the two exchanges
# by subtracting the lower-priced exchange from the higher-priced one
#EH: Measure aribitrage spread on Mar-30-2018 via function 'arbitrage_measure'
arbitrage_spread_late = arbitrage_measure('2018-03-30')
# Use a conditional statement to generate the summary statistics for each arbitrage_spread DataFrame
#EH: generate stats for aribitrage spread on Mar-30-2018
arbitrage_spread_late.describe()
# -
# #### 2. For each of the three dates, calculate the spread returns. To do so, divide the instances that have a positive arbitrage spread (that is, a spread greater than zero) by the price of Bitcoin from the exchange you’re buying on (that is, the lower-priced exchange). Review the resulting DataFrame.
# +
#EH: Define function for positive spread return of a period.
def spread_return(period):
#EH: calculate spread returns
spread_return=arbitrage_measure(period)/bitstamp_sliced.loc[period]
#EH: filter spread return greater than zero and rename series to Close return.
spread_return=spread_return[spread_return>0].rename("Close Return")
#EH: print header of table
return spread_return
# +
# For the date early in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0)
# by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange).
#EH: Find positive spread return on Jan 4, 2018.
spread_return_early= spread_return('2018-01-04')
# Review the spread return DataFrame
#EH: Review the spread return DataFrame
spread_return_early.head()
# +
# For the date middle in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0)
# by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange).
#EH: Find positive spread return on Feb 15, 2018.
spread_return_middle= spread_return('2018-02-15')
# Review the spread return DataFrame
#EH: Review the spread return DataFrame
spread_return_middle.head()
# +
# For the date late in the dataset, calculate the spread returns by dividing the instances when the arbitrage spread is positive (> 0)
# by the price of Bitcoin from the exchange you are buying on (the lower-priced exchange).
#EH: Find positive spread return on Mar 30, 2018.
spread_return_late = spread_return('2018-03-30')
# Review the spread return DataFrame
#EH: Review the spread return DataFrame
spread_return_late.head()
# -
# #### 3. For each of the three dates, narrow down your trading opportunities even further. To do so, determine the number of times your trades with positive returns exceed the 1% minimum threshold that you need to cover your costs.
# +
#EH: Define function to determine the number of times the trade with positive return exceed the 1% minimum threshold to cover the costs.
def profitable_trade(spread_return):
#EH: filter spread return > 0.01 as profitable trade
profitable_trade=spread_return[spread_return>0.01]
#EH: rename the profitable trade series as spread return > 0.01
profitable_trade=profitable_trade.rename('spread return > 0.01')
return profitable_trade
#EH: Define function to print count statement
def print_count(profitable_trade):
#EH: count profitable trade
count=profitable_trade.count()
#EH: print statement for the number of profitable opportunities
print(f"There are {count} opportunities with positive returns exceed the 1% minimum threshold.")
# +
# For the date early in the dataset, determine the number of times your trades with positive returns
# exceed the 1% minimum threshold (.01) that you need to cover your costs
#EH: find number of positive return exceed the 1% minimum threshold for Jan 4, 2018
profitable_trades_early = profitable_trade(spread_return_early)
#EH: print the analysis date
print("For Jan 4, 2018,")
#EH: count profitable trade and print count statement via function-"print_count"
print_count(profitable_trades_early)
# +
# Review the first five profitable trades
#EH: Review the first five profitable trades
profitable_trades_early.head()
# +
# For the date middle in the dataset, determine the number of times your trades with positive returns
# exceed the 1% minimum threshold (.01) that you need to cover your costs
#EH: find number of positive return exceed the 1% minimum threshold for Feb 15, 2018
profitable_trades_middle = profitable_trade(spread_return_middle)
#EH: print the analysis date
print("For Feb 15, 2018,")
#EH: count profitable trade and print count statement via function-"print_count"
print_count(profitable_trades_middle)
# Review the first five profitable trades
#EH: Review the first five profitable trades
profitable_trades_middle.head()
# +
# For the date late in the dataset, determine the number of times your trades with positive returns
# exceed the 1% minimum threshold (.01) that you need to cover your costs
#EH: find number of positive return exceed the 1% minimum threshold for Mar 30, 2018
profitable_trades_late = profitable_trade(spread_return_late)
#EH: print the analysis date
print("For Mar 30, 2018,")
#EH: count profitable trade and print count statement via function-"print_count"
print_count(profitable_trades_late)
# Review the first five profitable trades
#EH: Review the first five profitable trade
profitable_trades_late.head()
# -
# #### 4. Generate the summary statistics of your spread returns that are greater than 1%. How do the average returns compare among the three dates?
# +
# For the date early in the dataset, generate the summary statistics for the profitable trades
# or you trades where the spread returns are are greater than 1%
#EH: print header of stats
print('spread return stats > 1% for Jan 4, 2018')
#EH: Generate stats of spread return > 1% for Jan 4, 2018
profitable_trades_early.describe()
# +
#EH: print header of stats
print('spread return > 1% for Feb 25, 2018')
#EH: Generate stats of spread return > 1% for Feb 25, 2018
profitable_trades_middle.describe()
# +
#EH: print header of stats
print('spread return > 1% for Mar 30, 2018')
#EH: Generate stats of spread return > 1% for Mar 30, 2018
profitable_trades_late.describe()
# -
# #### 5. For each of the three dates, calculate the potential profit, in dollars, per trade. To do so, multiply the spread returns that were greater than 1% by the cost of what was purchased. Make sure to drop any missing values from the resulting DataFrame.
# +
# For the date early in the dataset, calculate the potential profit per trade in dollars
# Multiply the profitable trades by the cost of the Bitcoin that was purchased
#EH: Calculate the potential profitable trades for Jan 4, 2018 by multiplying spread return with correspondent bitstamp price on 1/4/2018.
profit_early = profitable_trades_early * bitstamp_sliced.loc['2018-01-04']
# Drop any missing values from the profit DataFrame
#EH: remove any missing value or not a number value from 1/4/2018 potential profitable trades.
profit_per_trade_early = profit_early.dropna()
#EH: rename the Series to potential profit
profit_per_trade_early=profit_per_trade_early.rename("potential profit")
# View the early profit DataFrame
#EH: Review potential profitable trades on 1/4/2018.
profit_per_trade_early.head()
# -
# #### 6. Generate the summary statistics, and plot the results for each of the three DataFrames.
# +
# Generate the summary statistics for the early profit per trade DataFrame
#EH: set header for stats
print(f' Jan 4, 2018 potential profit $ stats')
#EH: generate stats for 1/4/2018 potential profit trade.
profit_per_trade_early.describe()
# +
# Plot the results for the early profit per trade DataFrame
#EH: visualize the potential profitable trades in USD for 1/4/2018.
profit_per_trade_early.plot(figsize=(10,5),title="Potential Profit of Jan-4,2018", color="green",ylabel="USD$",xlabel="Timestamp MM-DD-HH")
# -
# #### 7. Calculate the potential arbitrage profits that you can make on each day. To do so, sum the elements in the profit_per_trade DataFrame.
# +
# Calculate the sum of the potential profits for the early profit per trade DataFrame
#EH: Sum up 1/4/2018 profitable trades
print(f'Sum of 1/4/2018 potential profits is {round(profit_per_trade_early.sum(),2):,}.')
# -
# #### 8. Using the `cumsum` function, plot the cumulative sum of each of the three DataFrames. Can you identify any patterns or trends in the profits across the three time periods?
# +
# Use the cumsum function to calculate the cumulative profits over time for the early profit per trade DataFrame
#EH: cumulate each potential profitable trade for 1/4/2018.
cumulative_profit_early = profit_per_trade_early.cumsum()
#EH: review cumulative 1/4/2018 trades
cumulative_profit_early.head()
# +
# Plot the cumulative sum of profits for the early profit per trade DataFrame
#EH: visulize the 1/8/2018 cumulative trades.
cumulative_profit_early.plot(figsize=(10,5),title="Cumulative Potential Profit of Jan-4,2018", color="green",ylabel="USD$",xlabel="Timestamp MM-DD-HH")
# -
# **Question:** After reviewing the profit information across each date from the different time periods, can you identify any patterns or trends?
#
# **Answer: Please see below Markdown report.
# # Title: Crypto Aribitrage Analysis - Bitstamp vs Coinbase Q1-2018
# + [markdown] tags=[]
# ## __Analysis Summary:__
# ## There wasn't much profitable arbitrage trades between Bitstamp and Coinbase from Feb-2018 to March-2018. The potential profitable trade is at the beginning of January.
#
# + [markdown] tags=[]
# ### __Discoveries:__
# > 1. Price points: Bitstamp and Coinbase have similar price point from USD $6000$ to USD $17000$.
# > 1. Trades stats: Coinbase has slight higher trade count and mean of trade price than bitstamp.
#
# -
# > __Bitstamp Stats__
# > 
# + [markdown] tags=[]
# > __Coinbase Stats__
# -
# > 
# + [markdown] tags=[]
# > 3. Price trend: Bitstamp and Coinbase have similar price `downward` trend from begining of Jan-2018 to beginning of Feb-2018 and then similar to fluctation through Feb-2018 to Mar-2018. Please see below `Bitstamp vs Coinbase trend`.
# -
# 
# > 4. The profitable arbitrage trade with profit greater than 1% to cover the cost is identified only in Jan-2018.
# > `Jan 4, 2018 profitable trade stats`
# + [markdown] tags=[]
# > 
# -
# > `Feb 15, 2018 profitable trade stats`
# > 
# > `Mar 30, 2018 profitable trade stats`
# > 
# > 5. On Jan 4, 2018, the sum of the potential profits is $3747.68$. The profit USD range and stats is as below.
# > 
# > 6. On Jan4, 2018 the cumulative of potential profit chart is as below.
# > 
| arbitrage_check/crypto_arbitrage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib
import time
# %pylab inline
# ## Simple Keras 92% 16s For Comparison
# +
import keras
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D
from keras.models import Sequential
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
X_train = keras.utils.normalize(X_train, axis=1)
X_test = keras.utils.normalize(X_test, axis=1)
Y_train = keras.utils.to_categorical(Y_train)
Y_test = keras.utils.to_categorical(Y_test)
model = Sequential()
model.add(Dense(28, name='dense_in', activation='relu', input_shape=(28,28,1)))
model.add(Flatten(name='flat'))
model.add(Dense(10, name='dense_last', activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=32, epochs=1, verbose=1)
results = model.evaluate(X_test, Y_test, batch_size=32)
print('test loss, test acc:', results)
# -
# ## Issues
# - Try Cross Entropy Loss
# - Find why algo so slow
# - ***Working*** - Batching
# - Dropout
# - Max Pooling
# - Fix code redundancy between batched and non batched
# - ***Working*** - Data Augmentation
# - Better learning decay algo
#
# Data Augmentation - Mostly it seems to add time if your goal is just 96% or so. But to get over 97% it might help. I'll try to run a test to confirm this.
# ## Data Augmentation
# +
def add_noise(x):
amt = 10
return (x + amt*np.random.rand(x.shape[0], x.shape[1]))/(1+(amt/255))
def rot(x):
ang = np.random.rand()*30-15 # Degrees
h, w = x.shape # Numpy puts out the image axes in the wrong order
cx = w / 2
cy = h / 2
theta = ang * 3.14 / 180
ang = -theta
rotmat = np.array([[np.cos(ang), -np.sin(ang)],
[np.sin(ang), np.cos(ang)]])
new_im = np.zeros((h, w))
for i in range(w):
for j in range(h):
newx = i - cx
newy = (h-j) - cy
vec = np.matmul(rotmat, np.array([[newx],[newy]]))
oldx = round(vec[0][0] + cx)
oldy = round(cy-vec[1][0])
if oldx >= 0 and oldx < w and oldy >= 0 and oldy < h:
new_im[j][i] = x[oldy][oldx]
return new_im
def shift_hor(x):
if np.random.rand() >= 0.999:
for i in range(x.shape[1]-1): # Left
x[:,i] = x[:,i+1]
else:
for i in range(x.shape[1]): # Right
i = x.shape[0] - i
if i <27:
x[:,i+1] = x[:,i]
return x
def shift_vert(x):
if np.random.rand() >= 0.5:
for i in range(x.shape[0]-1): # Up
x[i] = x[i+1]
else:
for i in range(x.shape[0]): # Down
i = x.shape[0] - i
if i <27:
x[i] = x[i-1]
return x
def smooth_blur(x):
size = 3
w, h = x.shape
kernel = np.ones((size,size)) # Smooth Kernel
for a in range(w-size+1):
for b in range(h-size+1):
x[a][b] = (x[a][b] + 0.25*np.sum( x[a:a+size, b:b+size]*kernel ) / (size**2) ) / 1.25
return x
def data_aug(x):
'''
Calls functions to augment data
'''
if np.random.rand() >= 100: # Adds more compute time than it removes
x = smooth_blur(x)
if np.random.rand() >= 0.5:
x = shift_vert(x)
if np.random.rand() >= 0.5:
x = shift_hor(x)
if np.random.rand() >= 0.01: # This probably helps less than shifting since test data has no noise like this
x = add_noise(x)
if np.random.rand() >= 100: # Adds more compute time than it removes
x = rot(x)
return x
# Testing
#(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
#x = X_train[randint(0,10000)]
#matplotlib.pyplot.figure(0)
#matplotlib.pyplot.imshow(x)
#y = data_aug(x)
#print(y.max())
#print(y.min())
#matplotlib.pyplot.figure(1)
#matplotlib.pyplot.imshow(y)
print('Done')
# -
# ## NUMPY ATTEMPT OOP 94% 25s
# +
# Setup
start = time.time()
epochs = 1
lr = 0.01
batch = 1
class Dense():
'''
Dense Layer
'''
def __init__(self, rows, cols):
self.weights = np.random.randn(rows, cols)*np.sqrt(1/(rows+cols)) # Xavier Initialization
def forward(self, x):
self.res = np.dot(self.weights, x)
self.output = np.maximum(self.res, 0)
def backward(self, error, x):
self.dx = np.outer(error, x)
self.passing_error = np.dot(self.weights.T, error) * (x > 0)
# Data
print('Importing MNIST Data')
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train #.reshape(-1, 784)/255
X_test = X_test.reshape(-1, 784)/255
from keras.utils.np_utils import to_categorical
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)
# Layers
print('Setup')
D0 = Dense(32, 784)
D1 = Dense(32, 32)
D2 = Dense(32, 32)
out = Dense(10, 32)
model = [D0, D1, D2, out]
num_layers = len(model)
def shuffl3(x, y):
'''
Shuffle the order of incoming images
'''
assert len(x) == len(y)
ids = numpy.random.permutation(len(x))
return x[ids], y[ids]
def for_back_pass(x, y, backpass=True):
'''
x is the incoming singular image
y is the label such as [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
backpass is True by default. Set as true if you want to correct weights. False if you want to leave weights alone.
'''
# Forward pass
forward_start = time.time()
for i in range(num_layers):
if i == 0:
model[i].forward(x)
else:
model[i].forward(model[i-1].output)
#https://www.youtube.com/watch?v=mlaLLQofmR8 softmax video
guess = np.exp(model[-1].output - model[-1].output.max()) / np.sum(np.exp(model[-1].output - model[-1].output.max()), axis=0) # Softmax eqn I found somewhere
loss = abs((guess - y)).mean(axis=0)
correct = (np.argmax(y) == np.argmax(guess))
error = (guess - y)
# Backward Prop
if backpass:
dd = guess*(1-guess)
error = error * dd
for i in range(num_layers):
if i == 0:
model[-1].backward(error, model[-2].output)
elif i == num_layers-1:
model[0].backward(model[1].passing_error, x)
else:
model[num_layers-i-1].backward(model[num_layers-i].passing_error, model[num_layers-i-2].output)
else:
for layer in model:
layer.dx = 0
return guess, loss, correct
# Loop
loss_list = []
print('Running {} epochs'.format(epochs))
old_dxs = []
for i in range(num_layers):
old_dxs.append([0,0])
backpass = True
validate = True
for epoch in range(epochs):
if epoch == 5:
lr = lr / 2
if epoch == 8:
lr = lr / 2
if epoch == 10:
lr = lr / 2
temp_loss = []
correct = []
solver = 'my_momentum_v2'
if batch == 1:
X = X_train
Y = Y_train
X, Y = shuffl3(X, Y)
for x, y in zip(X, Y):
#x = data_aug(x) # This doesn't help much for accuracy 95-97% and it slows things down a bit. Use >97%
x = x.reshape(-1, 784)/255 # It will be faster to do this before the epochs but data_aug easier with square img
x = x[0]
guess, loss, correcti = for_back_pass(x, y, backpass=backpass)
if backpass:
if solver == 'my_momentum_v2':
# Trying Momentum
for i, layer in enumerate(model):
layer.weights = layer.weights - lr*layer.dx - 0.5*lr*old_dxs[i][0] - 0.25*lr*old_dxs[i][1]
old_dxs[i][1] = old_dxs[i][0]
old_dxs[i][0] = layer.dx
elif solver == 'adam':
pass
correct.append(correcti)
else: # batching will require more epochs
ids = [randint(0, X_train.shape[0]) for i in range(batch)]
X = X_train[ids]
Y = Y_train[ids]
dx_out_l = np.zeros_like(out.weights)
dx_w0_l = np.zeros_like(D0.weights)
dx_w1_l = np.zeros_like(D1.weights)
loss_l = []
correcti_l = []
for x, y in zip(X, Y):
# TODO Update to generic like batch == 1 above
guess, loss, correcti = for_back_pass(x, y, backpass=backpass)
dx_out_l += out.dx
dx_w0_l += D0.dx
dx_w1_l += D1.dx
loss_l.append(loss)
correcti_l.append(correcti)
dx_out = dx_out_l / batch
dx_w0 = dx_w0_l / batch
dx_w1 = dx_w1_l / batch
loss = sum(loss_l) / batch
correcti = sum(correcti_l) / batch
if backpass:
if solver == 'my_momentum_v2':
out = out - lr*dx_out - 0.5*lr*old_dx_out - 0.25*lr*vold_dx_out
w0 = w0 - lr*dx_w0 - 0.5*lr*old_dx_w0 - 0.25*lr*vold_dx_w0
w1 = w1 - lr*dx_w1 - 0.5*lr*old_dx_w1 - 0.25*lr*vold_dx_w1
# Trying Momentum
vold_dx_out = old_dx_out
vold_dx_w0 = old_dx_w0
vold_dx_w1 = old_dx_w1
old_dx_out = dx_out
old_dx_w0 = dx_w0
old_dx_w1 = dx_w1
elif solver == 'adam':
pass
correct.append(correcti)
correct_percent = sum(correct) / len(correct)
loss_list.append(loss)
if epochs > 10000:
if epoch % 10000 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs > 1000:
if epoch % 1000 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs > 100:
if epoch % 100 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs < 100:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
print('Final Epoch Result')
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
if validate:
print()
print('Validating...')
X = X_test
Y = Y_test
X, Y = shuffl3(X, Y)
correct_l = []
for x, y in zip(X, Y):
guess, loss, correcti = for_back_pass(x, y, backpass=False)
correct_l.append(correcti)
correct_percent = sum(correct_l) / len(correct_l)
print()
print()
print()
print('######################################')
print('VALIDATION CORRECT = {}'.format(correct_percent))
print('######################################')
print()
print()
# -
# ## NUMPY ATTEMPT 1 90% 45s
# +
# Setup
start = time.time()
epochs = 1
lr = 0.01
batch = 1
# Data
print('Importing MNIST Data')
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train #.reshape(-1, 784)/255
X_test = X_test.reshape(-1, 784)/255
from keras.utils.np_utils import to_categorical
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)
# Layers
print('Setup')
w0 = np.random.randn(64, 784)*np.sqrt(1/(64+784)) # Xavier Initialization
w1 = np.random.randn(32, 64)*np.sqrt(1/(32+64))
out = np.random.randn(10, 32)*np.sqrt(1/(10+32))
def shuffl3(x, y):
'''
Shuffle the order of incoming images
'''
assert len(x) == len(y)
ids = numpy.random.permutation(len(x))
return x[ids], y[ids]
def for_back_pass(x, y, backpass=True):
'''
x is the incoming singular image
y is the label such as [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
backpass is True by default. Set as true if you want to correct weights. False if you want to leave weights alone.
'''
# Forward pass
forward_start = time.time()
res_w0 = np.dot(w0, x)
res_rel0 = np.maximum(res_w0, 0)
res_w1 = np.dot(w1, res_rel0)
res_rel1 = np.maximum(res_w1, 0)
res_out = np.dot(out, res_rel1)
#https://www.youtube.com/watch?v=mlaLLQofmR8 softmax video
guess = np.exp(res_out - res_out.max()) / np.sum(np.exp(res_out - res_out.max()), axis=0) # Softmax eqn I found somewhere
loss = abs((guess - y)).mean(axis=0)
correct = (np.argmax(y) == np.argmax(guess))
error = (guess - y)
# Backward Prop
if backpass:
dd = guess*(1-guess)
error = error * dd
dx_out = np.outer(error, res_rel1)
error = np.dot(out.T, error) * (res_rel1 > 0)
dx_w1 = np.outer(error, res_rel0)
error = np.dot(w1.T, error) * (res_rel0 > 0)
dx_w0 = np.outer(error, x)
else:
dx_out, dx_w0, dx_w1 = 0, 0, 0
return dx_out, dx_w0, dx_w1, guess, loss, correct
# Loop
loss_list = []
print('Running {} epochs'.format(epochs))
vold_dx_out = 0
vold_dx_w0 = 0
vold_dx_w1 = 0
old_dx_out = 0
old_dx_w0 = 0
old_dx_w1 = 0
backpass = True
validate = True
for epoch in range(epochs):
if epoch == 5:
lr = lr / 2
if epoch == 8:
lr = lr / 2
if epoch == 10:
lr = lr / 2
temp_loss = []
correct = []
solver = 'my_momentum_v2'
if batch == 1:
X = X_train
Y = Y_train
X, Y = shuffl3(X, Y)
for x, y in zip(X, Y):
#x = data_aug(x) # This doesn't help much for accuracy 95-97% and it slows things down a bit. Use >97%
x = x.reshape(-1, 784)/255 # It will be faster to do this before the epochs but data_aug easier with square img
x = x[0]
dx_out, dx_w0, dx_w1, guess, loss, correcti = for_back_pass(x, y, backpass=backpass)
if backpass:
if solver == 'my_momentum_v2':
out = out - lr*dx_out - 0.5*lr*old_dx_out - 0.25*lr*vold_dx_out
w0 = w0 - lr*dx_w0 - 0.5*lr*old_dx_w0 - 0.25*lr*vold_dx_w0
w1 = w1 - lr*dx_w1 - 0.5*lr*old_dx_w1 - 0.25*lr*vold_dx_w1
# Trying Momentum
vold_dx_out = old_dx_out
vold_dx_w0 = old_dx_w0
vold_dx_w1 = old_dx_w1
old_dx_out = dx_out
old_dx_w0 = dx_w0
old_dx_w1 = dx_w1
elif solver == 'adam':
pass
correct.append(correcti)
else: # batching will require more epochs
ids = [randint(0, X_train.shape[0]) for i in range(batch)]
X = X_train[ids]
Y = Y_train[ids]
dx_out_l = np.zeros_like(out)
dx_w0_l = np.zeros_like(w0)
dx_w1_l = np.zeros_like(w1)
loss_l = []
correcti_l = []
for x, y in zip(X, Y):
dx_out, dx_w0, dx_w1, guess, loss, correcti = for_back_pass(x, y, backpass=backpass)
dx_out_l += dx_out
dx_w0_l += dx_w0
dx_w1_l += dx_w1
loss_l.append(loss)
correcti_l.append(correcti)
dx_out = dx_out_l / batch
dx_w0 = dx_w0_l /batch
dx_w1 = dx_w1_l /batch
loss = sum(loss_l)/batch
correcti = sum(correcti_l)/batch
if backpass:
if solver == 'my_momentum_v2':
out = out - lr*dx_out - 0.5*lr*old_dx_out - 0.25*lr*vold_dx_out
w0 = w0 - lr*dx_w0 - 0.5*lr*old_dx_w0 - 0.25*lr*vold_dx_w0
w1 = w1 - lr*dx_w1 - 0.5*lr*old_dx_w1 - 0.25*lr*vold_dx_w1
# Trying Momentum
vold_dx_out = old_dx_out
vold_dx_w0 = old_dx_w0
vold_dx_w1 = old_dx_w1
old_dx_out = dx_out
old_dx_w0 = dx_w0
old_dx_w1 = dx_w1
elif solver == 'adam':
pass
correct.append(correcti)
correct_percent = sum(correct) / len(correct)
loss_list.append(loss)
if epochs > 10000:
if epoch % 10000 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs > 1000:
if epoch % 1000 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs > 100:
if epoch % 100 == 0:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
elif epochs < 100:
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
print('Final Epoch Result')
print('Epoch{} Time = {}s loss={} accuracy = {}'.format(epoch, time.time() - start, loss, correct_percent))
if validate:
print()
print('Validating...')
X = X_test
Y = Y_test
X, Y = shuffl3(X, Y)
correct_l = []
for x, y in zip(X, Y):
dx_out, dx_w0, dx_w1, guess, loss, correcti = for_back_pass(x, y, backpass=False)
correct_l.append(correcti)
correct_percent = sum(correct_l) / len(correct_l)
print()
print()
print()
print('######################################')
print('VALIDATION CORRECT = {}'.format(correct_percent))
print('######################################')
print()
print()
# -
# ## Extra Spot to Validate if Model Interrupted
# 98% @ 5min 20 epochs 794-64-32-10 Data Aug lr decay
# 98.4% @ 37min 20 epochs 794-128-64-10 Data Aug lr decay
if validate:
print()
print('Validating...')
X = X_test
Y = Y_test
X, Y = shuffl3(X, Y)
correct_l = []
for x, y in zip(X, Y):
dx_out, dx_w0, dx_w1, guess, loss, correcti = for_back_pass(x, y, backpass=False)
correct_l.append(correcti)
correct_percent = sum(correct_l) / len(correct_l)
print()
print()
print()
print('######################################')
print('VALIDATION CORRECT = {}'.format(correct_percent))
print('######################################')
print()
print()
# ## Testing Area Getting Gradients Working
# +
actual = np.array([[0, 1, 0, 0, 0]])
print('actual = {}'.format(actual))
res_out = np.array([[0, .8, .5, .25, .7]])
res_out = res_out[0]
print('res_out = {}'.format(res_out))
#guess = 1/(1+np.exp(-res_out))
guess = np.exp(res_out - res_out.max()) / np.sum(np.exp(res_out - res_out.max()), axis=0)
print('guess = {}'.format(guess))
error = (actual-guess)
print('error = {}'.format(error))
dx_guess = guess*(1-guess)
print('dx_guess = {}'.format(dx_guess))
print((np.argmax(actual) == np.argmax(guess)))
# -
| cv/mnist-from-scratch/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MedicalImageAnalysisTutorials/ImageRegistrationTutorial/blob/master/m2p_image_registration_example_mse_translation_GradientDescent.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Qty1BmnmPQty" colab_type="text"
# # **Image Registration using Mean Squared Error Metric, 2D Translation Transform, and Gradient Descent**
#
# This is a complete image registration example for beginners. It uses MSE metric, to find parameters of a 2D translation transform using gradient descent optimiser. I first provide an example using ITK tool then using a simple python implmentation.
#
#
# References:
# * [Insight Into Images book](https://dl.acm.org/doi/book/10.5555/1024225).
# * [ITK Software Guide Book 2](https://itk.org/ITKSoftwareGuide/html/Book2/ITKSoftwareGuide-Book2ch3.html).
# * A related [video lecture](http://biglab.ri.cmu.edu/galeotti/methods_course/video_of_lectures_2012/Class19-720p-1Mbps.mp4) by <NAME>.
#
#
# ### TODOs:
#
# * Implement interpolation
# * Implement transformation
#
# I believe learning is two-ways communications. If you have a question, correction, or feedback, you are more than welcome to open an issue and I will try to answer.
#
# For questions, I suggest ask in popular platform e.g. stack exchange or [ITK official forum](https://discourse.itk.org/) to get answers from more experts, you may link this notebook in your question.
#
#
# <br/>
# This tutorial is prepared by: [<NAME>](mailto:<EMAIL>).
#
# ----
#
#
# + [markdown] id="VG2K0RFjPqbj" colab_type="text"
# ## Instalaltion
#
#
# + id="HEAHuCmpWvp7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 800} outputId="c609cc70-5928-44ac-80ab-a23c49f297d8"
#========================================
# Setup
#========================================
# !pip install itk
# !pip install vtk
# !pip install SimpleITK
# !pip install Pillow
#clone data files from github
# ! rm -r PapersExplained
# !git clone https://github.com/MedicalImageAnalysisTutorials/PapersExplained.git
print("Python version : ")
# !python --version
# - compatibility with Python 2
from __future__ import print_function # print('me') instead of print 'me'
from __future__ import division # 1/2 = 0.5, not 0
# - import common modules
import os, time, random, IPython, datetime, math, itk, PIL, urllib.request
import numpy as np, scipy.ndimage as snd, pandas as pd,SimpleITK as sitk
import plotly.offline as pyo, plotly.graph_objs as go, cufflinks as cf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
from scipy import ndimage, misc, signal, stats
from PIL import Image, ImageDraw
from datetime import date
from plotly import __version__
from plotly.offline import iplot
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from ipywidgets import *
from IPython.html.widgets import *
from numpy import array, linalg, matrix
# from scipy.misc import comb as nOk
# the Python plotting package
# #%matplotlib notebook
# %matplotlib inline
# - set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
myDataPath = "PapersExplained/Data"
# + [markdown] id="BGJve-kXw5kq" colab_type="text"
# ## Input images and parameters
#
# An image from ITK examples is downloaded then we create a moving image by shifting the fixed image in x and y directions
# + id="K3mRrDrOJbQM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 500} outputId="6153d618-2bba-497d-d620-bfa3e193ed8e"
#---------------------------------------------------------------
# genral paramaeters:
#---------------------------------------------------------------
swImage = 0 # switch moving and fixed
#Optimiser parameters
numIterations = 100
stepSize = 15
accuracy = 0.00001
# translation transform parameters
tx = 13;
ty = 17;
mu = [tx,ty]
#---------------------------------------------------------------
# Input data
#---------------------------------------------------------------
print("downloading data")
fixedFilename = "BrainProtonDensitySliceBorder20.png"
fixedLink = "https://github.com/InsightSoftwareConsortium/ITK/raw/master/Examples/Data/BrainProtonDensitySliceBorder20.png"
urllib.request.urlretrieve (fixedLink, fixedFilename)
def imTransform(imgA,tPars):
#input image array and a transformation matrix, return transformed image array
img = sitk.GetImageFromArray(imgA)
origin = img.GetOrigin()
img.SetOrigin((0,0,0))
transform = sitk.TranslationTransform(2)
transform.SetParameters(tPars)
outimage=sitk.Resample(img,img.GetSize(),transform,sitk.sitkLinear,[0,0,0], img.GetSpacing(), img.GetDirection())
outimage.SetOrigin(origin)
resultImage = sitk.GetArrayFromImage(outimage)
return resultImage
def getMovingImage(fixed,mu,isMSE):
#transform
imgA =imTransform(itk.GetArrayFromImage(fixed),mu)
if not isMSE:
#invert in case of mutual information
imgA = np.max(imgA)-imgA
movingImage = itk.GetImageFromArray(imgA)
movingImage.CopyInformation(fixed)
return movingImage
# Reading the image using ITK
# define data types
dimension = 2
PixelType = itk.F
FixedImageType = itk.Image[PixelType, dimension]
MovingImageType = itk.Image[PixelType, dimension]
fixedImageReader = itk.ImageFileReader[FixedImageType].New()
fixedImageReader.SetFileName(fixedFilename); fixedImageReader.Update()
fixedImage = fixedImageReader.GetOutput()
# fixedImageRegion = fixedImage.GetBufferedRegion()
# generate a moving image
# to test the effect of multi-modality we can invert the color by replacing 1 with 0
movingImage = getMovingImage(fixedImage,mu,1)
u = itk.GetArrayFromImage(fixedImage)
v = itk.GetArrayFromImage(movingImage)
print(u.shape)
print(v.shape)
f,axs = plt.subplots(1, 3);
f.set_figheight(15);f.set_figwidth(15)
axs[0].set_title('fixed image')
axs[0].imshow(u)
axs[1].set_title('moving image')
axs[1].imshow(v)
axs[2].set_title('difference: fixed - moving')
axs[2].imshow(u-v)
print("fixed image size : ", fixedImage.GetLargestPossibleRegion().GetSize())
print("fixed image spacing : ", fixedImage.GetSpacing())
print("fixed image origin : ", fixedImage.GetOrigin())
print("moving image size : ", movingImage.GetLargestPossibleRegion().GetSize())
print("moving image spacing : ", movingImage.GetSpacing())
print("moving image origin : ", movingImage.GetOrigin())
# + [markdown] id="XIj-IvMcGTRQ" colab_type="text"
# ## Image registration
#
# Image registtraion is the problem of finding parameters $\mu$ of a transformation $T(P,\mu)$ that aligns an image called the moving image $I_M(P)$ to another imaeg called the fixed image $I_F(P)$.
#
# Finding these parameters is challenging. Optimisation e.g. gradient descent optimiser is used to find these parameters. The gradient descent tries to minimise a similarity metric $S(I_F,I_M)$by updating a transformation parameters.
#
# $$\tag{1}
# \mu = \mathrm{arg\,min}\quad S(I_F(P),I_M(T(P,\mu_{init}))
# $$
#
# where $\mu_{init}$ is the initial parameters.
#
# To updatet he parameters one can use an optimiser such gradient descent. The update rule is:
#
# $$\tag{1.1}
# \mu_{k} = \mu _{k-1}-\lambda \frac{\partial S(I_F(P),I_M(T(P,\mu_{k-1}))}{\partial \mu}
# $$
#
# where $k$ is the current opitmisation iteration and $\lambda$ is the step size or the learning rate. When k =0 we use the initial parameters. These intial paremeters could be randome values or zeros but it is better if they are close to our solution which can be obtained if we know how our problem looks like.
#
# All the image registration methods using a vriation of the above equation so it is important to undersatnd it which is the goal of this tutorial.
#
#
# In this example the similarity metric is the mean squared error $S_{MSE}(I_F,I_M)$ which is defined as:
#
# $$\tag{2}
# S_{MSE}(I_F(P),I_M(T(P,\mu)) =\frac{1}{n}\sum^{n-1}_{i=0}(I_F(p_i)-I_M(T(p_i,\mu)))^2
# $$
# where $n$ is the number of pixels in the image.
#
#
# The derivative of this mtric is :
#
# $$\tag{3}
# \frac{\partial S_{MSE}(I_F(P),I_M(T(P,\mu))}{\partial \mu}=\frac{2}{n}\sum^{n-1}_{i=0}[\left(I_M(T(p_i,\mu)- I_F(p_i))\right)\left(\frac{-\partial I_M(T(p,\mu))}{\partial \mu}\right)]
# $$
#
# Using chain rule:
#
# $$\tag{4}
# \frac{\partial S_{MSE}}{\partial \mu}=\frac{2}{n}\sum^{n-1}_{i=0}[\left( I_M(T(p_i,\mu)-I_F(p_i))\right)\frac{\partial I_M(T(\tilde p,\mu))}{\partial \tilde p}\frac{\partial (T(p_i,\mu))}{\partial \mu}]
# $$
#
# where:
#
# * $\left( I_M(T(p_i,\mu)-I_F(p_i))\right)$ is constant during the optimisation
# * $\frac{\partial I_M(T(\tilde p,\mu))}{\partial \tilde p}$ is also constant during the optimisation. We compute the gradient of the moving image $dI_M=\frac{\partial I_M(p)}{\partial p}$ (one time operation before the optimisation). During the optiisation we get a transform point $\tilde p_i=T(p_i,\mu))$, then we get the intensity value $v$ from the moving image gradient at a transformed point location $v=dI_M(\tilde p)$.
# * $\frac{\partial (T(p_i,\mu))}{\partial \mu}$ is a row from $J_\mu$ that represent the current point. $J_\mu$ is the Jacobian of the transformation with repect to its parameters. Note that $J_\mu$ is computed during each iteration of the optimisation:
#
# $$\tag{5}
# J_\mu = \frac{\partial T(P,\mu)}{\partial \mu}=\begin{pmatrix}
# \frac{\partial T(p_1,\mu_1)}{\partial \mu_{1}}& \frac{\partial T(p_1,\mu_2)}{\partial \mu_{2}} & \frac{\partial T(p_1,\mu_3)}{\partial \mu_{3}} & . & . & . & \frac{\partial T(p_1,\mu_m)}{\partial \mu_{m}} \\
# \frac{\partial T(p_2,\mu_1)}{\partial \mu_{1}} & \frac{\partial T(p_2,\mu_2)}{\partial \mu_{2}} & \frac{\partial T(p_2,\mu_3)}{\partial \mu_{3}} & . & . & . & \frac{\partial x_{2}}{\partial \mu_{m}} \\
# & & .& & & & \\
# & & .& & & & \\
# & & .& & & & \\
# \frac{\partial T(p_n,\mu_1)}{\partial \mu_{1}} & \frac{\partial T(p_n,\mu_2)}{\partial \mu_{2}} & \frac{\partial T(p_n,\mu_3)}{\partial \mu_{3}} & . & . & . & \frac{\partial T(p_n,\mu_m)}{\partial \mu_{m}} \\
# \end{pmatrix}
# $$
#
# Note that $J_\mu=\frac{\partial T(P,\mu)}{\partial \mu}$ is differnt from the Jacobian of the transformation $J=\frac{\partial T(P,\mu)}{\partial P}$
#
# **Math Explaination:**
#
# $\frac{\partial T(p_i,\mu_j)}{\partial \mu_{1}}$ means the derivative of the transformation $T$ at point location $p$ with repect to the transform parameter $\mu_j$. Here is an example, assuming:
#
# * a 2D point $p = (x,y)=(2,3)$
# * a 2D translation transform $T(p,\mu) = T((x,y),(\mu_0,\mu_1)) = (x+\mu0,y+\mu_1)$
# * we have :
#
# $$
# \frac{\partial T(p,\mu)}{\partial \mu}=\frac{\partial T((x,y),(\mu_0,\mu_1))}{\partial (\mu_0,\mu_1) }= \\
# \begin{pmatrix}
# \frac{\partial (x+\mu0,y+\mu_1)}{\partial \mu_0} \\
# \frac{\partial (x+\mu0,y+\mu_1)}{\partial \mu_1} \\
# \end{pmatrix}
# \begin{pmatrix}
# \frac{\partial (x+\mu0)}{\partial \mu_0} & \frac{\partial (y+\mu_1)}{\partial \mu_0}\\
# \frac{\partial (x+\mu0)}{\partial \mu_1} & \frac{\partial (y+\mu_1)}{\partial \mu_1}\\
# \end{pmatrix} = \begin{pmatrix}
# 1 & 0 \\
# 0 & 1 \\
# \end{pmatrix}
# $$
#
# In this example we have :
# $$
# \frac{\partial T((2,3),\mu_0)}{\partial \mu_{0}}=(1,0) \\
# \frac{\partial T((2,3),\mu_1)}{\partial \mu_{1}}=(0,1)
# $$
#
# which means the term:
#
# $$
# \underbrace{(I_F(p_i)-I_M(T(p_i,\mu)))}_\text{scalar} \underbrace{\frac{\partial I_M(T(\tilde p,\mu))}{\partial \tilde p}}_\text{[dx,dy]}\underbrace{\frac{\partial (T(p_i,\mu))}{\partial \mu}}_\text{[[1,0],[0,1]]}=[d\mu0,d\mu_1] = [scalar * (dx *1 +dy*0) , scalar * (dx *0 +dy*1) ] =[scalar*dx,scalar*dy]
# $$
#
# So it seems to update the parameters we just multiply the gradient of the transformed point with twice the error value then divided by the number of pixels.
#
#
# + [markdown] id="aPcUhWApGGJQ" colab_type="text"
# ## ITK example
#
# Here I am using an ITK example to align the input fixed and moving images.
# + id="Dvk68KWLhmcF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b157c9dc-3632-442a-cf7a-3f6dda2a52cb"
# This is an image registration code using ITK
InternalImageType = itk.Image[itk.F, dimension]
FixedNormalizeFilterType = itk.NormalizeImageFilter[FixedImageType, InternalImageType]
MovingNormalizeFilterType = itk.NormalizeImageFilter[MovingImageType, InternalImageType]
GaussianFilterType = itk.DiscreteGaussianImageFilter[InternalImageType, InternalImageType]
OptimizerType = itk.GradientDescentOptimizer
RegistrationType = itk.ImageRegistrationMethod[InternalImageType, InternalImageType]
InterpolatorType = itk.LinearInterpolateImageFunction[InternalImageType, itk.D]
TransformType = itk.TranslationTransform[itk.D, dimension]
miMetricType = itk.MutualInformationImageToImageMetric[InternalImageType, InternalImageType]
mseMetricType = itk.MeanSquaresImageToImageMetric[InternalImageType, InternalImageType]
#Instantiate the classes
fixedSmoother = GaussianFilterType.New()
movingSmoother = GaussianFilterType.New()
fixedNormalizer = FixedNormalizeFilterType.New()
movingNormalizer = MovingNormalizeFilterType.New()
transform = TransformType.New()
optimizer = OptimizerType.New()
interpolator = InterpolatorType.New()
registration = RegistrationType.New()
metricMI = miMetricType.New()
metricMSE = mseMetricType.New()
fixedSmoother = GaussianFilterType.New()
movingSmoother = GaussianFilterType.New()
#preprocessing
# normalization is important
fixedNormalizer.SetInput(fixedImage)
movingNormalizer.SetInput(movingImage)
fixedNormalizer.Update()
metric = metricMSE
metric.SetUseAllPixels(True)
initialParameters = transform.GetParameters()
initialParameters[0] = 0.0 # Initial offset in mm along X
initialParameters[1] = 0.0 # Initial offset in mm along Y
# it seems smoothing is not important for mutual information
fixedSmoother.SetVariance(2.0)
movingSmoother.SetVariance(2.0)
fixedSmoother.SetInput(fixedNormalizer.GetOutput())
movingSmoother.SetInput(movingNormalizer.GetOutput())
# fixedImage = fixedNormalizer.GetOutput()
# movingImage = movingNormalizer.GetOutput()
fixedImage = fixedSmoother.GetOutput()
movingImage = movingSmoother.GetOutput()
u = itk.GetArrayFromImage(fixedImage)
v = itk.GetArrayFromImage(movingImage)
#image registration
registration.SetOptimizer(optimizer)
registration.SetTransform(transform)
registration.SetInterpolator(interpolator)
registration.SetMetric(metric)
registration.SetFixedImage(fixedImage)
registration.SetMovingImage(movingImage)
# registration.SetFixedImage(fixedImage)
# registration.SetMovingImage(movingImage)
registration.SetInitialTransformParameters(initialParameters)
optimizer.SetLearningRate(stepSize)
optimizer.SetNumberOfIterations(numIterations)
optimizer.SetMinimize(True)
# Create the Command observer and register it with the optimizer.
def iterationUpdate():
currentParameter = registration.GetOutput().Get().GetParameters()
print( "%d : M: %f P: %f %f " % (optimizer.GetCurrentIteration(), optimizer.GetValue(), currentParameter.GetElement(0), currentParameter.GetElement(1)))
if optimizer.GetValue()<accuracy:
optimizer.StopOptimization()
observer = itk.PyCommand.New()
observer.SetCommandCallable(iterationUpdate)
optimizer.AddObserver(itk.IterationEvent(), observer)
registration.Update()
finalParameters = registration.GetLastTransformParameters()
TranslationAlongX = finalParameters[0]
TranslationAlongY = finalParameters[1]
numberOfIterations = optimizer.GetCurrentIteration()
bestValue = optimizer.GetValue()
# Print out results
print("Result = ")
print(" Translation X = " + str(TranslationAlongX))
print(" Translation Y = " + str(TranslationAlongY))
print(" Iterations = " + str(numberOfIterations))
print(" Metric value = " + str(bestValue))
ResampleFilterType = itk.ResampleImageFilter[MovingImageType, FixedImageType]
finalTransform = TransformType.New()
finalTransform.SetParameters(finalParameters)
finalTransform.SetFixedParameters(transform.GetFixedParameters())
resample = ResampleFilterType.New()
resample.SetTransform(finalTransform)
resample.SetInput(movingImage)
resample.SetDefaultPixelValue(100)
resample.SetSize(fixedImage.GetLargestPossibleRegion().GetSize())
resample.SetOutputOrigin(fixedImage.GetOrigin())
resample.SetOutputSpacing(fixedImage.GetSpacing())
resample.SetOutputDirection(fixedImage.GetDirection())
registeredImage = resample.GetOutput()
r0 = itk.GetArrayFromImage(registeredImage)
# visualize the result
print(u.shape)
print(v.shape)
f,axs = plt.subplots(1, 4);
f.set_figheight(15);f.set_figwidth(15)
axs[0].set_title('fixed image')
axs[0].imshow(u)
axs[1].set_title('moving image')
axs[1].imshow(v)
axs[2].set_title('difference before: fixed - moving')
axs[2].imshow(u-v)
axs[3].set_title('difference after: fixed - registered')
axs[3].imshow(u-r0)
print("all tasks are done!.............")
# + [markdown] id="1rrGnIpTFj0p" colab_type="text"
# ## Simple Implementation
# + id="S34ybyL9PolU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="dff21a23-f30d-4607-eee8-a4fa6b48c729"
# Note, we will use the same preprocessed input images as in ITK
# TODO: use our implemnetation of:
# image gradient
# image interpolation
def mbNormalize(x):
# normalize: nx will have a mean = 0 and std = 1
nx = ( x - np.mean(x) ) / np.std(x)
return nx
#mse metric
def mseMetric(u,v):
mbMSE = 0.0
n = 1
for i in range(len(u.shape)):
n = n * u.shape[i]
mbMSE = np.sum([pow(x-y,2) for x,y in zip(u,v) ]) / float(n)
#mbSMSE = math.sqrt(mbMSE)
return mbMSE
def mbConvolution(F,g):
#Todo: add implementation
#convolution of function F with a filter g
cF = 0
return cF
def mbImageGradient(F):
# retrun:
# image gradient for each dimension: [dF/dx,dF/dy,...]
# image magnitude dFmag= math.sqrt(dx^2+dy^2)
# image direction: theta = math.atan(dy/dx)
# using Sobel kernel
SbX2D = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
SbY2D = np.array([[1,2,1 ],[0,0,0 ],[-1,-2,-1]])
dX = ndimage.convolve(F, SbX2D, mode='constant', cval=0.0)
dY = ndimage.convolve(F, SbY2D, mode='constant', cval=0.0)
g = np.dstack((dX,dY))
return g
# get image gradient using ITK
def imGradient(img):
f = itk.GetImageFromArray(img)
df = itk.gradient_image_filter(f)
g = itk.GetArrayFromImage(df)
return g
#mse derivative
def mbdMSE(u,v,dT):
# dMSE the derivative of mse with repect to the transform parameters mu
# dM = d M(y)/ dy # gradient of the transformed moving image
# dT = D T(x,mu) / dmu # gradient of the transformation with respect to its parameters mu
# dMdT = d M(y)/ dy * D T(x,mu) / dmu
dMSE = 0.0
# compute the gradient of the transformed image
#dM = imGradient(v)
dM = mbImageGradient(v)
# the first term in eq.4, a scalar
n = 1
for i in range(len(u.shape)):
n = n * u.shape[i]
dmse = (2/float(n))* (v-u)
# second and third terms in eq.4
dMdT = np.array([0.0,0.0])
for i in range(dM.shape[0]):
for j in range(dM.shape[1]):
dx = dM[i,j,0]
dy = dM[i,j,1]
dxy0 = np.dot([dx,dy],dT[:,0])
dxy1 = np.dot([dx,dy],dT[:,1])
dMdT[0] = dMdT[0] + dmse[i,j]*dxy0
dMdT[1] = dMdT[1] + dmse[i,j]*dxy1
# we can also use the gradient directly as dT is just identity matrix
# dMdT[0] = dMdT[0] + dmse[i,j]*dx
# dMdT[1] = dMdT[1] + dmse[i,j]*dy
dMSE = np.array([dMdT[0], dMdT[1]])
return dMSE # this should be a vector of length d dimension
# image registration using gradient descent
def gdOptimiser(u,v,numIterations,stepSize):
# initial parameters
nMu = [0,0]
dT = np.array([[1,0],[0,1]])
# start the optimisation
for i in range(numIterations):
# transform the moving image using nMu
v_t = imTransform(v,nMu)
tMshow = 0
if tMshow:
f = plt.figure();f.set_figheight(15);f.set_figwidth(15)
f.add_subplot(1,2, 1)
plt.imshow(v_t)
#compute the metric value
mv = mseMetric(u,v_t)
# compute the metric derivative
dMu = mbdMSE(u,v_t,dT)
# update the parameters
nMu = np.array(nMu) - ( stepSize * np.array(dMu ))
print(i,": ",mv," , ", nMu)
return nMu
print("numIterations : ", numIterations)
print("stepSize : ", stepSize)
# numIterations = 119
# stepSize = 15
fMu = gdOptimiser(u,v,numIterations,stepSize)
r0=imTransform(v,fMu)
f,axs = plt.subplots(1, 4);
f.set_figheight(15);f.set_figwidth(15)
axs[0].set_title('fixed image')
axs[0].imshow(u)
axs[1].set_title('moving image')
axs[1].imshow(v)
axs[2].set_title('difference before: fixed - moving')
axs[2].imshow(u-v)
axs[3].set_title('difference after: fixed - registered')
axs[3].imshow(u-r0)
| m2p_image_registration_example_mse_translation_GradientDescent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.datasets import fetch_openml
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
import numpy as np
import pandas as pd
# +
### Fetching the MNIST Dataset
mnist = fetch_openml('mnist_784', version=1)
mnist.keys()
# +
### 'data' is an array of data with 1 row per instance and one column per feature
### 'target' is the label
x, y = mnist['data'], mnist['target']
x.shape
# -
y.shape
# +
### There are 70,000 images of digits and each image has 784 features as it is a 28 x 28 pixels image.
### Whereas each number determines one pixel intensity of black. (white 0 to black 255).
some_number = x[7]
some_number
# -
# some_number_image = some_number.reshape(28,28)
#
# plt.imshow(some_number_image, cmap='binary')
y[7]
# +
### Indeed it is a 2. But note the arrays of y are stored as a string. So lets convert it to an int.
y = y.astype(np.uint8)
# -
y[7]
# ## Training and Test Set
# +
### The MNIST dataset has already split into training and testing set for us, which it is already shuffled and the first 60,000
### instances should be used as a training set and the rest as a testing set.
x_train, x_test, y_train, y_test = x[:60000],x[60000:],y[:60000],y[60000:]
# -
# ## Training a Binary Classifier
# +
### With this we can train a model to identify whether it is that certain digit or not. For instance, whether it is a 3 or not.
### It then returns a true or false boolean.
y_train_3 = (y_train == 3)
y_test_3 = (y_test == 3)
### This returns an array of True for all 3s, False for all other digit
# -
# ### Using Stochastic Gradient Descent (SGD) classifier
# +
## Note: SGD is similar to GD. It computes gradients, intercepts and a regression however can be used as a classifier as well, as most regression do.
## SGD is more computationally efficient than gradient descent when you have much larger datasets. SGD is also really good when
## you have a lot of attributes or parameters.
## for more information, watch a YouTube video: https://www.youtube.com/watch?v=vMh0zPT0tLI
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state=47)
sgd_clf.fit(x_train, y_train_3)
# -
sgd_clf.predict([some_number])
y[7]
sgd_clf.predict([x[7]])
y[7]
# ## Performance Measure
# ### - Measuring Accuracy using Cross-Validation
# +
## For more info abt cross validation: https://www.youtube.com/watch?v=fSytzGwwBVw&t=3s
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, x_train, y_train_3, cv=3, scoring="accuracy")
# +
### To show that whether this performance measure is credible or not, we can map all of the x_train label as false or 0. Then
## see how accurate it would get. If it is a high percentage, then that means the labels of 'true' is much much less than the
## labels of 'false'. Essentially, you have a high percentage of getting the answer right although, you are justing saying all of them
## are false.
from sklearn.base import BaseEstimator
class AlwaysNever3Classifier(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return np.zeros((len(X),1), dtype=bool)
never_3_clf = AlwaysNever3Classifier()
cross_val_score(never_3_clf, x_train, y_train_3, cv=3, scoring="accuracy")
# +
### So essentially 89% to 90% of this whole data are non-3s. So measuing accuracy is generally not the preferred performance
### measure for classifiers, especially when dealing with skewed datasets (ie: when some classes are much more frequent than othes).
# -
# ### - Confusion Matrix
# +
### Before computing the confusion matrix, we need a set of predictions so it can be compared with the actual labels.
# +
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, x_train, y_train_3, cv=3)
# +
### The cross_val_predict() method performs K-fold cross-validation, but instead of returning the evaluation scores, it returns prediction
### made on each test fold. This means that you can get a clean prediction for each instance in the training set.
### Note: Clean = prediction made by the model that never saw the data during training.
# +
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_3, y_train_pred)
# +
### To understand what is being displayed, check 'Confusion-matrix-for-binary-classification' in the media and notes folder.
# +
### We have 2 measures: Precision & Recall.
### Check in the media and notes folder.
# -
# ### - Precision and Recall
# +
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_3, y_train_pred) # == 5114 / (1442 + 5114)
# -
recall_score(y_train_3, y_train_pred) # == 5114 / (1017 + 5114)
# +
## - Precision is the score representing out of all the time when the model claims the digit as 3, 78% of it is correct.
## - Recall is the score representing out all the 3s, it detects 83% of them.
## - F1 score is a score combining both measures above. Formula is in the M&N folder. It is a harmonic mean. Rather than treating
## the values equally, it gives much more weight to low values. So it can only have a bigger value when BOTH measures is high.
# -
from sklearn.metrics import f1_score
f1_score(y_train_3, y_train_pred)
# +
## - F1 score favors classifiers that have similar precision and recall. This is not always the case: sometimes you would care
## more about the precision and sometimes more about the recall.
## - Eg.1: You would prefer a higher precision model if you are using the model for detecting safe videos for kids.
## - Eg.2: You would prefer a higher recall model if you are using to detect shop-lifting (sure they can get more false-alarm but
## But it is better than missing an actual shop-lifting activity.)
## You can't have both at high. This is called the precision/recall trade-off.
# -
# ## Precision and Recall Trade-off
y_scores = sgd_clf.decision_function([some_number])
y_scores
# +
## the decision funtion returns a score for each instance, then use it to compare with the threshold to predict whether positive
## or negative
# +
## if the threshold is at 0, then the prediction would say the some_number is a 3. (which is true)
threshold = 0
y_some_number_pred = (y_scores > threshold)
y_some_number_pred
# +
## if we increase the threshold to 8000, then the prediction would say some_number isn't a 3.
threshold = 8000
y_some_number_pred = (y_scores > threshold)
y_some_number_pred
# +
## This show us that when we increase the threshold, the recall score decreases (ie: higher false negative rate).
## So how do we know which threshold to use?
# +
## first use cross_val_predict
y_scores = cross_val_predict(sgd_clf, x_train, y_train_3, cv=3, method='decision_function')
y_scores
# +
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_3, y_scores)
# +
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "r-", label="Recalls")
plt.legend()
plt.xlabel('Threshold')
plt.grid()
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
# -
plt.plot(recalls[:-1], precisions[:-1], "b-")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.grid()
precisions
thresholds
recalls
# +
## Suppose I want to get a threshold for when recall is at 90%, we can use np.argmax() for precision method
## which will return the first index of the maximum value (which in this case means the first True value)
## If we want recall then use np.argmin()
threshold_90_recall = thresholds[np.argmax(recalls <= 0.90)]
threshold_90_recall
# +
## Instead of running the predict() function for now, we can return a boolean like this instead
y_train_pred_90 = (y_scores >= threshold_90_recall)
y_train_pred_90
# -
precision_score(y_train_3, y_train_pred_90)
recall_score(y_train_3, y_train_pred_90)
# ## The ROC Curve (Reciever Operating Characteristic)
# +
## Similar to the precision/recall curve, but instead of plotting precision against recall, we plot true positive rate (same thing
## as recall) against the false positive rate (ratio of actual negative instances that are incorrectly classified as positive).
## Vocab: Recall is also sometimes called as Sensitivity
## True Negative Rate (TNR) is the opposite of false positive rate so it is equal to 1 - FPR and is also known as specificity
## FPR = 1 - TNR or 1 - specificity
## for more information: read P.97 and watch https://www.youtube.com/watch?v=4jRBRDbJemM&t=114s
# +
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_3, y_scores)
# -
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0,1],[0,1], 'k--')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity / Recall)')
plt.grid()
plt.show()
# +
## This graph shows us the higher the positive rate or recall (which is a good thing), beyond a point, there will be a large
## increase in the False Positive Rate (which is a bad thing). As you can see, there is a trade off.
## Ideally, we want the ROC curve to be curved up like a right triangle towards the top left corner to minimise the effect of
## an increase of FPR.
# +
## One way to compare classifiers is to measure the Area Under the Curve (AUC). A perfect one will have AUC = 1. So the higher
## the better.
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_3, y_scores)
# +
## Generally, If you care about the precision more, use the precision-recall graph instead. If you care more about the recall,
## look at the ROC curve.
# -
# ## Trying a different algorithm, Random Forest Classifier
# +
## RFC doesn't have a decision_function() method, instead, has a predict_proba() which returns an array containing a row per
## instances and a column per class, each containing the probability that the given instance belongs to the given class (eg. 50%
## chance of that image represent a 3)
# +
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state=47)
y_probas_forest = cross_val_predict(forest_clf, x_train, y_train_3, cv=3, method="predict_proba")
y_probas_forest
# +
## The 1st column shows the probability of a negative instances while the second column shows the probability of a positive instance
y_pos_scores_forest = y_probas_forest[:,1]
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_3, y_pos_scores_forest)
# -
plt.plot(fpr, tpr, 'b--', linewidth=2, label="SGD")
plt.plot(fpr_forest, tpr_forest, 'r-', linewidth=2, label="R. Forest")
plt.plot([0,1],[0,1], 'k--')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity / Recall)')
plt.grid()
plt.legend(loc='lower right')
plt.show()
# +
## This shows R.Forest has a higher AUC! Hence, it is better if you want a model with good recall.
roc_auc_score(y_train_3, y_pos_scores_forest)
# -
# ### Finding Precision and recall curve for random forest
y_pred_forest = cross_val_predict(forest_clf, x_train, y_train_3, cv=3)
precision_score(y_train_3, y_pred_forest)
recall_score(y_train_3, y_pred_forest)
# ## Multiclass Classification
# +
## Multiclass classifier aka Multinomial Classifier, can distinguised between more than 2 classes.
### Notes: Logistic Regressions are strickly binary classifier but to perform a multiclass classifier, it can take various strategy.
## First, One-versus-the-rest (OvR). To classify 10 classes, we can train 10 binary classifier. For example, one for each digit.
## Like (O-detector, 1-detector, 2-detector and etc). Then you get the score from each classifier and select the highest one.
# Second, One-versus-One (OvO). To classify 10 classes, we train a binary classifier for every pair of digits. So we need to train
# N x (N-1) / 2 amount of classifers. In this case of digits, 45! Main advantage is that each classifier only need to be trained
# on the part of the training set for 2 classes that it must distinguish. So smaller training sets. It better (generally) for
# SVM as it scale poorly with size of training set. But for others, OvR is generally perferred.
# -
from sklearn.svm import SVC
svm_clf = SVC()
svm_clf.fit(x_train, y_train)
svm_clf.predict([some_number])
# +
## when you call the decision_function() method, you will get 10 scores per instances. That's one score PER CLASS. So column is
## the class index.
some_num_scores = svm_clf.decision_function([some_number])
some_num_scores
# -
np.argmax(some_num_scores)
svm_clf.classes_
svm_clf.classes_[3]
# +
## Normally, you don't always get a matching index to the value. This is just a conviency and coincidence of the dataset.
# +
## If you want to force Scikit-Learn to use either OvR or OvO, you can use OneVsOneClassifier or OneVsRestClassifier classes.
## For example:
# -
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SVC())
ovo_clf.fit(x_train, y_train)
ovo_clf.predict([some_number])
len(ovo_clf.estimators_)
# ### Looking back to SGD
sgd_clf.fit(x_train, y_train)
sgd_clf.predict([some_number])
sgd_clf.decision_function([some_number])
cross_val_score(sgd_clf, x_train, y_train, cv=3, scoring="accuracy")
# +
### maybe we can try to improve the accuracy by simply scaling the input
# -
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float64))
cross_val_score(sgd_clf, x_train_scaled, y_train, cv=3, scoring="accuracy")
# +
## Yes It did slightly improve.
# -
# ## Error Analysis
y_train_pred = cross_val_predict(sgd_clf, x_train_scaled, y_train, cv=3)
conf_matrix = confusion_matrix(y_train, y_train_pred)
conf_matrix
import seaborn as sns
plt.figure(figsize = (20,12))
sns.heatmap(conf_matrix, annot=True)
plt.ylabel('Truth')
plt.xlabel('Predicted')
plt.show()
# +
## That looks pretty good. But lets focus more on the error and normalise it.
row_sums = conf_matrix.sum(axis=1, keepdims=True)
norm_conf_matrix = conf_matrix / row_sums
# +
np.fill_diagonal(norm_conf_matrix, 0)
plt.figure(figsize = (20,12))
sns.heatmap(norm_conf_matrix, annot=True)
plt.ylabel('Truth')
plt.xlabel('Predicted')
plt.show()
# +
## As you can see, the cases and errors are similar to the book as well. The model seems to be misclassifying images as 8s with
## bright spot. So approxiamately 8.6% of 5s are misclassify as 8.
## Also there is seem to be confusion in the model trying to distinguished between 3 and 5 in both directions.
# +
## We can try analysing individual errors but it would be a bit more time consuming and difficult.
# -
cl_a, cl_b = 3, 5
x_aa = x_train[(y_train == cl_a) & (y_train_pred == cl_a)]
x_ab = x_train[(y_train == cl_a) & (y_train_pred == cl_b)]
x_ba = x_train[(y_train == cl_b) & (y_train_pred == cl_a)]
x_bb = x_train[(y_train == cl_b) & (y_train_pred == cl_b)]
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = matplotlib.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(x_aa[:25], images_per_row=5)
plt.subplot(222); plot_digits(x_ab[:25], images_per_row=5)
plt.subplot(223); plot_digits(x_ba[:25], images_per_row=5)
plt.subplot(224); plot_digits(x_bb[:25], images_per_row=5)
plt.show()
# +
## There are some digits which looks to be horrendously written even a human can't really classify them. However, most are kinda
## obvious mistakes. It is maybe due to the model being a linear model, hence, it would put the weights equally on each pixel.
## Which then just sums up all the weight.
# -
# ## Multilabel Classification
# +
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(x_train, y_multilabel)
# -
knn_clf.predict([some_number]) ## 3 is not large and is odd.
# +
## There are many ways to evaluate a multilabel classifer.
y_train_knn_pred = cross_val_predict(knn_clf, x_train, y_multilabel, cv=3)
f1_score(y_multilabel, y_train_knn_pred, average="macro")
# -
# ## Multioutput Classification
# +
## This is a generalisation of multilabel classification where each label can be a multiclass (ie: have more than 2 values.)
## To illustrate, we're going to add some noise to a clean images then make a model to remove the noise.
noise = np.random.randint(0, 100, (len(x_train), 784))
x_train_mod = x_train + noise
noise = np.random.randint(0, 100, (len(x_test), 784))
x_test_mod = x_test + noise
y_train_mod = x_train
y_test_mod = x_test
# -
knn_clf.fit(x_train_mod, y_train_mod)
clean_digit = knn_clf.predict([x_test_mod[some_number]])
plot_digit(clean_digit)
| ch3 - classifications.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mfanalysis3]
# language: python
# name: conda-env-mfanalysis3-py
# ---
# # Demo
# The `pymultifracs` package allows us to perform fractal and multifractal analysis of signals
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set(style="whitegrid")
sns.set_context('notebook')
# ## Loading a real signal
# First load a signal from a real, preprocessed MEG recording
with open('signal.npy', 'rb') as f:
X = np.load(f)
plt.plot(X)
sns.despine()
plt.ylabel('X')
plt.xlabel('t')
plt.show()
# ## PSD
# Display the Power Spectral Density of the signal, computed using wavelet and Fourier based estimators
# +
import pymultifracs.psd as psd
fs = 2000
# -
psd.plot_psd(X, fs, n_fft=1024)
plt.xlim((-4, 9))
plt.ylim((-85, -70))
plt.show()
# Here we can clearly see the signal is scale-free in the infra-slow range ($\log_2 f < 3$)
#
# We will therefore proceed with the analysis in this range of frequencies: $f < 2^2$
#
# We need then to determine the scale range to use for the analysis
# ### scale-frequency relationship
# There is a direct inverse relation between scale and frequency, which can be used with the `scale2freq` and `freq2scale` functions.
# The relationship is dependant on the sampling frequency, here 2000Hz
from pymultifracs.utils import scale2freq, freq2scale
fs = 2000
# We're looking for a scale that corresponds to a frequency below 4Hz
freq2scale(4, fs)
# Here since the ouptut is not an int, we need to round it to the next higher integer, giving us a scale of 9 which will be our lower bound for the scale range.
#
# We can check to what frequency a scale of 9 corresponds
scale2freq(9, fs)
j1 = 9
# The upper bound for our scale range can be determined using the rule of thumb: $j_2 = \log_2 N - 3$.
# The aim is to avoid relying on the highest scales, since for these scales there are not many wavelet coefficients computed -- meaning that the variance of the estimates at these scales will be high.
j2 = np.floor(np.log2(len(X)) - 3)
# Note: When analysing multiple signals at once, it should be done using the same frequency range for all. Pay special attention when working with signals that have different sampling frequencies, as either they need to be resampled to the same frequency, or the scale ranges will have to be different for each frequency.
# Note: When performing analysis on signals of different length, the shortest one should be used for determining the highest usable scale.
# ## Wavelet Analysis
# We first perform the wavelet filtering in order to have an estimate of `hmin`, which indicates us what value to set for `gamint`
from pymultifracs.estimation import estimate_hmin
from pymultifracs.wavelet import wavelet_analysis
WT = wavelet_analysis(X, j1=j1, j2=j2, gamint=0.0)
hmin, _ = estimate_hmin(WT.wt_coefs, 10, WT.j2_eff, weighted=True)
hmin
# Here since `hmin` is negative, this means that in order for the estimation of the multifractal parameters to be correct, we need to set a higer value of `gamint`.
#
# `gamint` is the fractional integration parameter, and increases in this parameters will raise `hmin` by the same amount.
# It is recommended to use multiples of 0.5. In this case, setting `gamint` to 0.5 is enough.
#
# Note that in the case of MEG signals, the value will most often be higher than 1.
# In case the value of `j2` chosen was too high, an automatic correction will take place and the `WT.j2_eff` variable contains the effective j2 value used in the analysis.
gamint = 0.5
# **Using p-leaders**
# ## Multifractal analysis
import pymultifracs.mfa as mfa
from pymultifracs.utils import build_q_log
dwt, lwt = mfa.mf_analysis_full(X,
j1=j1, j2=WT.j2_eff,
q = build_q_log(1, 10, 20),
n_cumul=2,
p_exp=2,
gamint=0.5,
weighted=False
)
# We can check that our correction was good by checking the `hmin` value of the signal that was analysed. As long as it is strictly positive it fine, otherwise `gamint` would need to be increased
dwt.hmin
# The `dwt` variable contains the output of the multifractal analysis of the wavelet coefficients, and `lwt` of the wavelet leaders (or p-leaders if `p_exp` is an int, as is the case here)
# #### structure functions
# We use the wavelet coef-based results to get the $H$ estimate
dwt.structure.H
# We can also plot the structure functions in order to check the scaling properties of the signal for multiple moments $q$.
#
# We're making sure that overall the behavior of the structure functions is scale-free in our scale range
dwt.structure.plot(nrow=5)
# #### cumulants
# Plotting the cumulant functions is done as below:
lwt.cumulants.plot()
# This plot is used to make sure the estimation of the slopes (called log-cumulants $c_1$ and $c_2$) is done correctly.
# We can also directly access the values of the slopes:
lwt.cumulants.c1, lwt.cumulants.c2
# The $M = -c2$ estimate is obtained as follows:
lwt.cumulants.M
# #### mutlifractal spectrum
# Drawing the multifractal spectrum allows us to visualize the $h$ distribution.
#
# The mode of the spectrum is $c_1$, and the spread is $c_2$
lwt.spectrum.plot()
sns.despine()
| examples/Demo-real.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ParalelaUnsaac/2020-2/blob/main/tercera_parcial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="GF1ufs7z2K-E" outputId="767d767d-2972-4032-d0ce-39fa924f0a96"
# ! pip install mpi4py
# + colab={"base_uri": "https://localhost:8080/"} id="MA38x0z62L11" outputId="fb50e358-0d10-43c1-d2ea-f3b196bfd3fe"
# %%writefile 01masterWorker.py
from mpi4py import MPI
def main():
comm = MPI.COMM_WORLD
id = comm.Get_rank() #number of the process running the code
numProcesses = comm.Get_size() #total number of processes running
myHostName = MPI.Get_processor_name() #machine name running the code
sendValue = "'ya se acerca las vacaciones'"
if id == 0:
for i in range(1,numProcesses):
stat = MPI.Status()
receivedValue = comm.recv(source=i)
print("Recibiendo mensaje {} desde procesador {}"\
.format(receivedValue, i))
else:
comm.send(sendValue, dest=0)
print("Enviando mensaje {} desde procesador {} "\
.format(sendValue, id))
########## Run the main function
main()
# + colab={"base_uri": "https://localhost:8080/"} id="qE2kv1lO2TBv" outputId="c39d462a-93c6-4345-f12f-7c59d6e78984"
# ! mpirun --allow-run-as-root -np 3 python 01masterWorker.py
| tercera_parcial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Portfolio Optimization via Regression
# ## FINM 36700 Discussion
# ### <NAME>
# ### Autumn 2021
# +
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.linear_model import QuantileRegressor
from sklearn.decomposition import PCA
from scipy.optimize import lsq_linear
import matplotlib.pyplot as plt
import yfinance as yf
import sys
sys.path.insert(0, '../cmds/')
from portfolio import *
# -
# ## Get Data
#
# - Try using the data from HW#1, related to the Harvard Endowment.
# - Or try a handful of equities downloaded from Yahoo.
# +
filepath_data = '../data/multi_asset_etf_data.xlsx'
info = pd.read_excel(filepath_data,sheet_name='descriptions')
info.rename(columns={'Unnamed: 0':'Symbol'},inplace=True)
info.set_index('Symbol',inplace=True)
rets = pd.read_excel(filepath_data,sheet_name='total returns')
rets.set_index('Date',inplace=True)
rets.drop(columns=['SHV'])
retsx = pd.read_excel(filepath_data,sheet_name='excess returns')
retsx.set_index('Date',inplace=True)
# sort columns by order of descriptions in Info tab
symbol_list = info.index.drop('SHV')
rets = rets[symbol_list]
retsx = retsx[symbol_list]
rets_Harvard, retsx_Harvard = rets, retsx
# +
DATAPATH = '../data/midterm_2_data_pricing.xlsx'
SHEET = 'assets (excess returns)'
retsx = pd.read_excel(DATAPATH,sheet_name=SHEET)
retsx.set_index('Date',inplace=True)
rets = retsx + .01/12
rets_Commodities, retsx_Commodities = rets, retsx
# +
symbols = ['SPY']
# add multi-asset ETF symbols at cost of losing 15+ years of data
# symbols += ['IYR','EFA','EEM','IEF', 'TIP', 'DBC','PSP','QAI','BWX','HYG','SHV']
symbols += ['AAPL','MSFT','JNJ','MMM','GE','DIS','DHR','HON', 'JPM', 'BAM', 'MCD', 'F', 'CVX', 'HD','LOW', 'CAT','NUE','DHI','XOM', 'BA','FDX','BAC','WMT','NYT','TM']
start_date = '1990-12-31'
end_date = '2021-11-30'
df = yf.download(symbols,start_date,end_date)['Adj Close']
rets = df.resample('M').last().pct_change()
rets.dropna(axis=0,inplace=True)
if 'SHV' in rets.columns:
retsx = rets.subtract(rets['SHV'],axis=0).drop(columns=['SHV'])
else:
rf = .01/12
retsx = rets - rf
rets_Equities, retsx_Equities = rets, retsx
# +
#USE_DATA = 'Harvard'
#USE_DATA = 'Commodities'
#USE_DATA = 'Equities'
USE_DATA = 'ALL'
if USE_DATA == 'Harvard':
rets, retsx = rets_Harvard, retsx_Harvard
elif USE_DATA == 'Commodities':
rets, retsx = rets_Commodities, retsx_Commodities
elif USE_DATA == 'Equities':
rets, retsx = rets_Equities, retsx_Equities
else:
rets = pd.concat([rets_Harvard.drop(columns=['SPY']), rets_Commodities, rets_Equities],axis=1,join='inner')
retsx = pd.concat([retsx_Harvard.drop(columns=['SPY']), retsx_Commodities, retsx_Equities],axis=1,join='inner')
# -
Ntime, Nassets = retsx.shape
print(f'Number of assets: {Nassets:.0f}')
print(f'Number of periods: {Ntime:.0f}')
display(retsx.head())
display_correlation(retsx)
# ## Figure of Mean-Variance Optimization
import os
import sys
if os.path.isfile('../dev/extras.py'):
sys.path.insert(0, '../dev')
from extras import MVweights, plotMV
figrets = rets
label = 'GMV'
wstar = pd.DataFrame(MVweights(figrets,target=label),index=figrets.columns,columns=[label])
label = 'TAN'
wstar[label] = MVweights(figrets,target=label,isexcess=False)
wts_a = wstar['TAN']
wts_b = wstar['GMV']
fig = plotMV(wts_a,wts_b,figrets.mean(),figrets.cov(),labels=['TAN','GMV'],annualize=12)
# ### Description of Individual Asset Sharpe Ratios
(retsx.mean()/retsx.std()).to_frame().describe().rename({0:'Sharpe Ratio Summary'},axis=1).drop(index=['count']).style.format('{:.2%}'.format)
# # Mean-Variance Optimization is OLS
#
# ## OLS when Projecting a Constant
#
# The OLS estimator of regressing $y$ on $X$ (no intercept) is:
# $$\boldsymbol{\hat{\beta}_{y|x}} = \left(\boldsymbol{X}'\boldsymbol{X}\right)^{-1}\boldsymbol{X}'\boldsymbol{y}$$
# Though it may seem unusual we could regress a constant on regressors:
# $$1 = \beta x_t + \epsilon_t$$
# _Obviously, if we included an intercept, the regression would be degenerate with $\alpha=1, \beta=0, \epsilon_t=0$._
#
# Regress the constant, 1, on returns. So $X=R$ and $y=1$.
# $$\boldsymbol{\hat{\beta}_{1|R}} = \left(\boldsymbol{R}'\boldsymbol{R}\right)^{-1}\boldsymbol{R}'\boldsymbol{1_n}$$
#
# ### The OLS solution as sample moments
# $$\boldsymbol{\hat{\beta}_{1|R}} = \boldsymbol{\hat{\Sigma}_0^{-1}\hat{\mu}}$$
#
# **Scaling**
#
# The OLS betas will not sum to one, but we can include a scaling factor to ensure this, and we can refer to this as a weight vector, $\boldsymbol{w}_{ols}$:
# $$\boldsymbol{w}_{ols} = \boldsymbol{\hat{\Sigma}_0^{-1}\hat{\mu}}\ \hat{c}_{ols}$$
#
# $$\hat{c}_{ols} = \frac{1}{\boldsymbol{1}'_k\boldsymbol{\hat{\Sigma}}_0^{-1}\boldsymbol{\hat{\mu}}}$$
#
# ## Mean-Variance Solution
#
# Using sample estimates for the moments above, we have:
# $$\boldsymbol{\hat{w}}_{tan} = \boldsymbol{\hat{\Sigma}^{-1}\hat{\mu}}\hat{c}_{tan}$$
#
# $$\hat{c}_{\text{tan}} = \frac{1}{\boldsymbol{1_k'\hat{\Sigma}^{-1}\hat{\mu}}}$$
#
# where $\hat{c}_{\text{mv}}$ is a constant that ensures $\boldsymbol{\hat{w}}_{tan}$ sums to 1.
# ## Equality
#
# If we go through the tedious linear algebra, we find
# $$\boldsymbol{\hat{w}}_{tan} =
# \boldsymbol{\hat{\Sigma}}^{-1}\boldsymbol{\hat{\mu}}\hat{c}_{tan} =
# \boldsymbol{\hat{\Sigma}}_0^{-1}\boldsymbol{\hat{\mu}}\hat{c}_{ols} = \boldsymbol{\hat{w}}_{ols}$$
#
#
# ## Scaling of the constant used as the dependent variable
# - We are using the constant $1$ on the left-hand-side as the dependent variable.
# - For OLS, the scaling of this constant simply changes the sum of the weights. Thus, it impacts the exact scaling constant, $\hat{c}_{ols}$, which enforces the weights to sum to 1.
#
# ### Going beyond MV, the scaling may matter!
# - For more complex optimization, the solution weights do not scale proportionally with the target mean, as they do for the excess-return Mean-Variance frontier.
# - In those cases, we may need to rescale the regressand constant to trace out the frontier.
#
# ## Conclusion
# **Mean Variance Optimization is equivalent to OLS of a constant on the returns.**
#
# This means...
# - We can get statistical significance of the MV weights.
# - We can restrict the MV solution in ways we commonly restrict OLS. This includes Non-Negative Least Squares.
# - We can restrict the number of positions in the MV solution. (LASSO).
# - We can restrict the position sizes in the MV solution via a penalty parameter instead of $2n$ boundary constraints. (Ridge).
# +
wts = tangency_weights(retsx).rename({0:'MV'},axis=1)
# for OLS, doesn't matter what scaling we give y, just use y=1
# but note that below this scaling may matter
y = np.ones((Ntime,1))
X = retsx
beta = LinearRegression(fit_intercept=False).fit(X,y).coef_.transpose()
# rescale OLS beta to sum to 1
beta /= beta.sum()
wts['OLS'] = beta
wts.style.format('{:.2%}'.format)
# -
# ### Confirmation
# - They are the same weights!
# - So we drop the redundant `OLS` column.
#
# ### Statistical Significance (in-sample) of these weights
# Get them from the usual OLS t-stats!
tstats = pd.DataFrame(sm.OLS(y, X).fit().tvalues,columns=['t-stat'])
display(tstats.loc[tstats['t-stat'].abs()>2].sort_values('t-stat',ascending=False).style.format('{:.2f}'.format))
# # No Short Positions
#
# Implement via Non-Negative Least Squares (NNLS)
# - Do this instead of using Constrained Optimization with $k$ boundary constraints.
# - NNLS is doing the Linear Programming with inequalities the same as we would do with Constrained Optimization, but it saves us some work in implementation.
# +
# for NLLS, scaling of y does not matter
y = np.ones((Ntime,1))
X = retsx
beta = LinearRegression(fit_intercept=False, positive=True).fit(X,y).coef_.transpose()
beta /= beta.sum()
beta
wts['NNLS'] = beta
wts.loc[wts['NNLS']>0, ['NNLS']].sort_values('NNLS',ascending=False)
# -
# # Regularized Regressions are Useful
#
# The OLS interpretation of MV makes clear that due to multicolinearity, the optimal in-sample weights can be extreme.
#
# Instead, we may want to use regularized regression to deal with the following constraints.
# **Constraints**
# 1. restrict gross leverage, $\sum_{i}^n |w^i| \le L$
# 2. limit the total number of positions, $\sum_{i}^n\boldsymbol{1}_{>0}\left(w^i\right) \le K$
#
# This can be done somewhat clumsily with the traditional constrained optimization.
#
# But other challenges are hard to address with traditional techniques
#
# **Challenges**
# 1. Limit positions from being too large, without specifying security-specific boundaries.
# 1. Put more emphasis on out-of-sample performance
# 1. Implement a Bayesian approach to Mean-Variance optimization
# ## Ridge Estimation
#
# - Ridge estimation may help with the challenges above.
# - Except it will NOT limit the total number of positions.
#
# The Ridge estimator is the optimized solution for a regularized regression with an L2 penalty.
#
# Recall that the Ridge estimator is
# $$\hat{\boldsymbol{\beta}}^{\text{ridge}} = \left(\boldsymbol{X}'\boldsymbol{X} + \lambda\mathcal{I}_n\right)^{-1}\left(\boldsymbol{X}\boldsymbol{y}\right)$$
# where
# - $\mathcal{I}_n$ is the $n\times n$ identity matrix.
# - $\lambda$ is a hyperparameter ("tuning" parameter) related to the L2 penalty.
#
# Note that this is the exact same as OLS, except we have a modified second-moment matrix. In our application of regressing 1 on returns without an intercept, the point is that instead of the OLS calculation,
# $$\boldsymbol{\hat{\Sigma}}_0^{-1}\boldsymbol{\hat{\mu}}$$
# we use
# $$\left(\boldsymbol{\hat{\Sigma}}_0 + \lambda\mathcal{I}_n\right)^{-1}\boldsymbol{\hat{\mu}}$$
# - For $\lambda=0$, we simply have OLS.
# - For large $\lambda$, we are diagonalizing the second-moment matrix. (Since we do not regress on an intercept, this is the uncentered second-moment matrix, not quite the covariance matrix.)
#
# ### Conclusion
# - The Ridge estimator is diagonalizing the second-moment matrix, which makes it more stable for inversion.
# - This reduces its sensitivity to small changes in the data, and allows it to perform more consistently out-of-sample, though less optimally in-sample.
# - Conceptually, this means that it constructs less extreme long-short weights given that it diminishes the magnitudes of the correlations relative to the main diagonal.
# - Statistically, the extra term on the second-moment matrix is reducing the impact the multicolinearity of the asset returns have on the estimate.
# ## LASSO Estimation
#
# - LASSO estimation helps with the challenges above.
# - Additionally, LASSO can reduce the number of positions, (dimension reduction.)
#
# Unlike Ridge, there is no closed-form solution for the LASSO estimator.
# ## Bayesian Interpretation
#
# **Ridge**
# - The Ridge estimator is a Bayesian posterior assuming a Normally distributed prior on the betas, updated via normally distributed sample data.
#
# **LASSO**
# - The LASSO estimator is a Bayesian posterior assuming a Laplace-distributed prior on the betas, updated via normally distributed sample data.
#
# This does not mean Ridge requires us to believe the data is normally distributed. That is an assumption to interpret it as thte Bayesian solution.
#
# Figure 6.2 from *Introduction to Statistical Learning* (James, Witten, Hastie, and Tibshirani) gives a useful visualization of these priors, and why they lead to Ridge having muted, but non-zero betas, whereas LASSO has (potentially many) betas equal to zero and others that are not muted as much as in Ridge.
# <img src="../refs/regularized_bayesian_priors.png" width="400"/>
#
# *If figure does not display, make sure you have the `refs` directory in the repo, at the expected relative path.*
#
def penalized_reg_limit_gross(func, X, y, limit=2, penalty=1e-6, fit_intercept=True):
wts = np.ones(X.shape[1]) * 100
while np.abs(wts).sum()>limit:
penalty *= 1.25
model = func(alpha=penalty, fit_intercept=fit_intercept).fit(X,y)
wts = model.coef_ / model.coef_.sum()
return wts, penalty
# +
GROSS_LIMIT = 2
# scaling of y will impact the solution if penalty held constant
# here, we adjust the penalty to ensure the scaling, so initial scaling of y is less important
betas, penalty_ridge = penalized_reg_limit_gross(Ridge, rets, y, limit=GROSS_LIMIT, fit_intercept=False)
wts['Ridge'] = betas.transpose()
betas, penalty_lasso = penalized_reg_limit_gross(Lasso, rets, y, limit=GROSS_LIMIT, fit_intercept=False)
wts['Lasso'] = betas.transpose()
print(f'Penalty for Ridge: {penalty_ridge : .2e}.\nPenalty for LASSO: {penalty_lasso : .2e}.')
# -
# ## Diagonalization and Shrinkage
#
# ### Diagonalization
# - Diagonalize the covariance matrix (set all off-diagonal terms to 0).
# - This was popular long before Ridge and continues to be.
#
# ### Shrinkage Estimators
# - "Shrink" the covariance matrix going into MV estimation by mixing a diagonalized version of the matrix with the full matrix, according to some mixing parameter.
# - The mixing parameter may change over time, depending on the data.
# - This is equivalent to Ridge for certain specification of the mixing parameter.
#
# So Ridge is another lense for a popular approach to MV optimization.
covDiag = np.diag(np.diag(retsx.cov()))
temp = np.linalg.solve(covDiag,retsx.mean())
wts['Diagonal'] = temp / temp.sum()
# # Performance
# +
if 'Equal' not in wts.columns:
wts.insert(0,'Equal',np.ones_like(Nassets)/Nassets)
if 'OLS' in wts.columns:
wts.drop(columns=['OLS'],inplace=True)
retsx_ports = retsx @ wts
# -
display(performanceMetrics(retsx_ports, annualization=12))
display(tailMetrics(retsx_ports))
display(get_ols_metrics(retsx['SPY'], retsx_ports,annualization=12))
(1+retsx_ports).cumprod().plot();
display_correlation(retsx_ports)
wts.abs().sum().plot.bar(title='Gross Leverage');
wts.style.format('{:.2%}'.format).background_gradient(cmap='Blues')
# ***
| discussions/Optimal Portfolios via Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Aggregations, Sorting, and Basic Calculations
# © Explore Data Science Academy
# ## Learning Objectives
#
# In this train we will explore:
# - Built-in functions
# - Grouping data
# - Sorting data
#
# ## Outline
#
# - Built-in functions
# - Counting
# - Basic Arithmetic
# - Sum and AVG
# - max and min
# - ORDER BY
# - GROUP BY
# - HAVING
# ### Loading the database
# Load SQL magic commands
# %load_ext sql
# Load Chinook SQLite database
# + language="sql"
#
# sqlite:///chinook.db
# -
# Chinook database ER diagram:
#
# <img src="https://github.com/Explore-AI/Pictures/blob/master/sqlite-sample-database-color.jpg?raw=true" width=70%/>
#
# _[Image source](https://www.sqlitetutorial.net/sqlite-sample-database/)_
# ## [Built-in functions](https://db.apache.org/derby/docs/10.13/ref/rrefsqlj29026.html)
#
# In this section, we demonstrate some of the useful SQL built-in functions for data analysis.
#
# ### 1. Counting
# One of the most common built-in functions you'll encounter is the `count()` function. As the name implies, this function will count the number of rows of the column specified in its arguments. In the simplest case, it can be used as follows:
#
# SELECT count(column_name)
# FROM table_name
#
# where the column whose rows we want to count is supplied as an argument into the count function.
#
# Great, let's do some examples:
#
# ### 1.1. Counting rows of a given column
#
# Let's write a query that counts the number of albums and the number of artists in the albums table. In English:
#
# return a count of the AlbumId column and a count of the ArtistId column from the albums table
#
# In SQL:
# + language="sql"
#
# SELECT count(AlbumId) AS "Number of Albums", count(ArtistId) AS "Number of Artists"
# FROM albums;
# -
# Notice that we've added aliases for the counts for better readability. Try removing them and see what happens.
#
# The above query seems to be successful right? However, those with a keen eye may be wondering why we have the same number of artists as albums, surely the number of albums should be greater than the number of artists. We'll discuss how to resolve this in the next example, in the meantime, do a `SELECT *` query on the albums table to see if you can understand where the problem is.
# + language="sql"
#
# -- your code here
# -
# ### 1.2. Counting unique values in a given column
# If you take a closer look at the albums table (i.e. in your SELECT query above), you will notice that some of the ArtistIds have been duplicated. This makes sense because some Artists will have written multiple albums. Now the question is, how do we reflect this in SQL?
#
# This brings us to the `DISTINCT` keyword, which eliminates duplicates in a column. We can use this keyword together with the count function to only consider distinct values.
#
# Following from the previous example, write a query that counts the number of albums and the number of unique artists in the albums table. In English:
#
# return a count of the AlbumId column and a count of the unique rows in the ArtistId column from the albums table
#
# In SQL:
# + language="sql"
#
# SELECT count(AlbumId) AS "Number of Albums", count(DISTINCT ArtistId) AS "Number of Unique Artists"
# FROM albums;
# -
# This looks more realistic, to achieve this we used the `DISTINCT` keyword in the argument of the count function. In other words, we supplied the count function with a new column (i.e. `DISTINCT ArtistId`) which only contains unique ArtistIds.
# ### 2. Max and Min
#
# SQL has built-in functions for calculating the maximum value of column (i.e. `max()`) and the minimum value of column (i.e. `min()`). These functions can be applied on all numerical type columns and DATETIME columns.
#
# Let's do some examples.
# ### 2.1. Calculating the minimum of a column
# Write a query that returns the name of the shortest song in the database. In English:
#
# return data from the Name column and the minimum of the Milliseconds column from the tracks table
#
# In SQL:
# + language="sql"
#
# SELECT Name, min(Milliseconds) AS "Length in Millisecs"
# FROM tracks;
# -
# Easy peasy, lemon squeezy. However, aside from the fact that this song is in Portuguese, nothing else makes sense. For example, how long is this song in minutes?
#
# Before we answer this question, let's do another example.
# ### 2.2. Calculating the maximum of a column
# Write a query that returns the name of the most recently hired employee. In English:
#
# return the FirstName column,LastName column and maximum of the HireDate column from the employees table
#
# In SQL:
# + language="sql"
#
# SELECT FirstName, LastName, max(Hiredate) AS "Hire Date"
# FROM employees;
# -
# As you can see, we can find the max (later) or min (earlier) dates in the same way as we do with numerical columns.
# ### 3. Basic arithmetic
#
# We can perform basic calculations in SQL using standard math operators:
# - `+` - addition
# - `-` - subtraction
# - `*` - multiplication
# - `/` - division
#
# Let's see some examples (since all these operators work in the same way, we will only demonstrate multiplication and division here):
#
# ### 3.1. Multiplication
# Suppose chinook hired a South African accountant and needed to send them all company invoices. As such, each invoice total has to be converted from dollars to rands. Write a query that will achieve this (assumming that 1 Canadian dollar = 13.18 rands). In English:
#
# return data in all columns, the Total column multiplied by 13.18 from the invoices table
#
# In SQL:
# + jupyter={"outputs_hidden": true} language="sql"
#
# SELECT *, Total*13.81 AS "Total in Rands"
# FROM invoices
# LIMIT 10;
# -
# ### 3.2. Division
# To follow up on the query in subsection 2.1. Write a query that returns the name of the longest song in the database but converts the song length from Milliseconds to Minutes. In English:
#
# return data from the Name column and the maximum of the Milliseconds column divided by 60000 from the tracks table
#
# In SQL:
# + language="sql"
#
# SELECT Name, max(Milliseconds)/60000 AS "Length in Minutes"
# FROM tracks;
# -
# Although our query does its job, we have stumbled upon a suspicious entry. An 88 minute song?
# Not exactly, before we jump to conclusions, notice that there is a MediaTypeId in the tracks table. If we trace that back to the media_types table:
# + language="sql"
#
# SELECT *
# FROM media_types
# -
# We can see that the database (particularly the tracks table) also stores videos :) so false alarm. That said, the `max()` and `min()` functions are also an ok way to find outliers in the data.
# ### 4. Summing and Averaging
# Next to `max` and `min`, the `sum` and `avg` functions are the most common aggregator functions you will need in your SQL queries. It is worth noting that all these aggregator functions will ignore NULL values (i.e. missing entries in a column).
#
# Let's do some examples.
#
# ### 4.1 Calculating the sum of a column
# Write a query that calculates how long (in hours) it would take to go through all chinook media (i.e. music, videos, etc). In English:
#
# return the sum of the Milliseconds column divided by 3600000 from the tracks table
#
# In SQL:
# + language="sql"
#
# SELECT sum(Milliseconds)/36000000 AS "Time in Hours"
# FROM tracks;
# -
# ### 4.2 Calculating the average of a column
#
# Write a query that returns the average purchase total per invoice. In English:
#
# return the average of the Total column from the invoices table
#
# In SQL:
# + language="sql"
#
# SELECT round(avg(Total),2) AS "Average purchase in dollars"
# FROM invoices;
# -
# Note that we have also used the `round` in this query to round of our answer to 2 decimal places.
# ## Grouping and Ordering data
#
# In this section we cover how to group and order data, we also explore how these operations can be combined with aggregator functions to produce more insightful queries.
# ### 1. The ORDER BY clause
# The ORDER BY clause is used to sort SQL query results by column a given column. This clause can be applied to a standard query as follows:
#
# ```sql
# SELECT columns
# FROM tables
# WHERE boolean expression
# ORDER BY column DESC
# ```
#
# We can also specify whether we want the rows to be sorted in ascending order (default) or descending order (`DESC`) using `DESC` keyword.
#
# Let's do some examples
#
# ### 1.1. Sorting in ascending order
# Write a query that returns the top 10 shortest media (in seconds) items in the database. In English:
#
# return the Name, Milliseconds column divided by 1000 from the tracks table, sort by the Milliseconds column in ascending order, and only output the first 10 rows
#
# In SQL:
# + jupyter={"outputs_hidden": true} language="sql"
#
# SELECT Name, round(Milliseconds/1000.0,2) AS "Length in Seconds"
# FROM tracks
# ORDER BY Milliseconds
# LIMIT 10;
# -
# In this query, we divided by 1000.0 (i.e. adding the `.0`) to avoid integer division (which ignores the fractional parts).
# ### 1.2. Sorting in descending order
# Write a query that returns the Names of top 10 best customers and how much they spend on chinook media. In English:
#
# return the first and last name from the customers table, the Total column from the invoices table, align both tables by the customerId and sort the rows by the invoice Total column in descending order, and limit results to 10 rows
#
# In SQL:
# + language="sql"
#
# SELECT c.FirstName, c.LastName, i.Total AS "Purchase Total"
# FROM customers c, invoices i
# WHERE c.CustomerId = i.CustomerId
# ORDER BY i.Total DESC
# LIMIT 10;
# -
# ### 2. The GROUP BY clause
#
# The GROUP BY clause is used to group rows together which share the same value. In SQL we typically group rows by columns, which creates a group for each unique value in that column(s). As such, we typically use the GROUP BY clause with an aggregator function which will then operate on each group in the result set. A standard GROUP BY clause will take on the following format:
#
# ```
# SELECT columns
# FROM tables
# WHERE boolean expression
# GROUP BY columns
# ```
#
# The GROUP BY clause should always follow a WHERE clause (if it is present) and precede an ORDER BY clause (if it is present).
#
# Let's do some examples:
# ### 2.1. Grouping by a single column
#
# To start off, let's revisit the query from section 1.2 above. We tried to sort customers by their purchase totals. However, we forgot one crucial thing, customers generally make more than one purchase. As such, the best customers will be the ones with the highest sum of individual invoice totals.
#
# Let's massage our query to reflect this, again write a query that returns the Names of top 10 best customers and how much they spend on chinook media. In English:
#
# return the first and last name from the customers table, the summed Total column from the invoices table, align both tables by the customerId and sort the rows by the invoice Total column in descending order, and group results by the CustomerId from the invoices table, and limit results to 10 rows
# In SQL:
# + jupyter={"outputs_hidden": true} language="sql"
#
# SELECT c.FirstName, c.LastName, round(sum(i.Total),2) AS "Total Spend"
# FROM customers c, invoices i
# WHERE c.CustomerId = i.CustomerId
# GROUP BY i.CustomerId
# ORDER BY [Total Spend] DESC
# LIMIT 10; -- Remove this line to see the full output
# -
# Now note that some of the customers that we initially thought were great customers (e.g. `<NAME>`) are no longer there.
# ### 2.2. Grouping by multiple columns
# Write a query that returns the average unit price of media items in each genre. In English:
#
# return the genre Name column and an average of the unitprice column from the genre and tracks tables, where the genreId in the genre table matches the genreId in the tracks table, and the trackId in the tracks table matches the trackId in the invoice_items table, then group the results first by the track's genreId, and then group the results by the invoice_items unitprice
#
# In SQL:
# + jupyter={"outputs_hidden": true} language="sql"
#
# SELECT g.Name, round(avg(i.UnitPrice),2) AS "AVG Price per unit"
# FROM genres g, tracks t, invoice_items i
# WHERE g.GenreId = t.GenreId
# AND t.TrackId = i.TrackId
# GROUP BY t.GenreId, i.UnitPrice
# LIMIT 10; -- Remove this line to see the full output
# -
# In this query, the information we need is spread across 3 tables. As such, we have used the WHERE clause to connect and align the information between them. When the GROUP BY clause is supplied with multiple columns with, it will group by each column consecutively starting with the leftmost column to the right.
# ### 3. The HAVING Clause
#
# Until now, we've been using the WHERE clause to return only the rows that meet a set of conditions. However, what we haven't been able to do is to specify conditions on aggregated rows (particularly rows that result from a GROUP BY clause), this is where the HAVING clause comes in. It works exactly like the WHERE clause but will operate on aggregated rows. As such, the HAVING clause always has to follow the GROUP BY clause, not the other way round. A standard HAVING clause will take on the following format:
#
# ```
# SELECT columns
# FROM tables
# WHERE boolean expression
# GROUP BY columns
# HAVING boolean expression
# ```
#
# Let's see some examples:
# ### 3.1. Filtering the results of the group by clause
#
# Write a query that returns the countries with 5 or more customers. In English:
#
# return a count of the CustomerId column, the country column from the customers table, group the results by the country name, and display only the countries with a count greater than or equal to 5
#
# In SQL:
# + language="sql"
#
# SELECT count(CustomerId), Country
# FROM customers
# GROUP BY Country
# HAVING COUNT(CustomerId) >= 5;
# -
# ## Conclusion
#
# In this train we have learned how to perform basic calculations within our SQL queries, how to sort query results in ascending or descending order, how to group data by the unique values of one or more columns, and lastly, how to place conditions on grouped rows of data.
# ## Additional Links
# - [Built-in Functions](https://db.apache.org/derby/docs/10.13/ref/rrefsqlj29026.html)
# - [The GROUP BY clause](https://db.apache.org/derby/docs/10.13/ref/rrefsqlj32654.html)
# - [The ORDER BY clause](https://db.apache.org/derby/docs/10.13/ref/rrefsqlj13658.html)
# - [The HAVING clause](https://db.apache.org/derby/docs/10.13/ref/rrefsqlj14854.html)
| SQL/Aggregations_sorting_and_basic_calculations-data_and_notebook-a-1962/5_Aggregations_sorting_and_calculations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Creates a dataset version for cough classification learning tasks.
# %load_ext autoreload
# %autoreload 2
# +
from os import makedirs, symlink, rmdir
from os.path import join, dirname, exists, isdir, basename, splitext
from shutil import rmtree
import math
from collections import defaultdict
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from librosa import get_duration
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from termcolor import colored
from cac.utils.io import save_yml
from cac.utils.pandas import apply_filters
# -
random_state = 0
np.random.seed(random_state)
# +
# directory where the data resides
data_root = '/data/wiai-facility/'
save_root = join(data_root, 'processed')
version_dir = join(save_root, 'versions')
makedirs(version_dir, exist_ok=True)
save_audio_dir = join(save_root, 'audio')
# -
attributes = pd.read_csv(join(save_root, 'attributes.csv'))
annotation = pd.read_csv(join(save_root, 'annotation.csv'))
annotation.shape, attributes.shape
type(annotation['classification'][0])
annotation['classification'] = annotation['classification'].apply(eval)
type(annotation['classification'][0])
annotation['classification'][0]
annotation = apply_filters(annotation, {'audio_type': ['cough_1', 'cough_2', 'cough_3']})
annotation.shape
# #### Split patients in training and validation sets
all_patients = list(annotation['id'].unique())
NUM_PATIENTS_TO_SELECT = 100
all_patients = np.random.choice(all_patients, 100)
len(all_patients)
train_ids, val_test_ids = train_test_split(all_patients, test_size=0.2, random_state=random_state)
val_ids, test_ids = train_test_split(val_test_ids, test_size=0.5, random_state=random_state)
len(train_ids), len(val_ids), len(test_ids)
df_train = apply_filters(annotation, {'id': train_ids}, reset_index=True)
df_train = df_train.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_train.rename({'classification': 'label'}, axis=1, inplace=True)
df_train['label'] = df_train['label'].apply(lambda x: {'classification': x})
df_val = apply_filters(annotation, {'id': val_ids}, reset_index=True)
df_val = df_val.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_val.rename({'classification': 'label'}, axis=1, inplace=True)
df_val['label'] = df_val['label'].apply(lambda x: {'classification': x})
df_test = apply_filters(annotation, {'id': test_ids}, reset_index=True)
df_test = df_test.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_test.rename({'classification': 'label'}, axis=1, inplace=True)
df_test['label'] = df_test['label'].apply(lambda x: {'classification': x})
df_all = apply_filters(annotation, {'id': all_patients}, reset_index=True)
df_all = df_all.drop(columns=['unsupervised', 'users', 'audio_type', 'id'])
df_all.rename({'classification': 'label'}, axis=1, inplace=True)
df_all['label'] = df_all['label'].apply(lambda x: {'classification': x})
df_train.shape, df_val.shape, df_test.shape, df_all.shape
version = 'default-clf'
save_path = join(save_root, 'versions', '{}.yml'.format(version))
# +
description = dict()
description['description'] = 'version for COVID vs non COVID task(s) on cough with random split'
for name, _df in zip(['all', 'train', 'val', 'test'], [df_all, df_train, df_val, df_test]):
description[name] = {
'file': _df['file'].values.tolist(),
'label': _df['label'].values.tolist()
}
# -
# save description
makedirs(dirname(save_path), exist_ok=True)
save_yml(description, save_path)
| datasets/versioning/cough-classification/wiai-facility/default-clf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Build Neural network from scratch
#
# In this project, I'll build neural network and use it to predict student admissions.
#
#
# # Predicting Student Admissions with Neural Networks
# In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
# - GRE Scores (Test)
# - GPA Scores (Grades)
# - Class rank (1-4)
#
# The dataset originally came from here: http://www.ats.ucla.edu/
#
# ## Loading the data
# To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://docs.scipy.org/
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# %config InlineBackend.figure_format = 'retina'
# Importing pandas matplotlib and numpy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('data/student_data.csv')
# Printing out the first 10 rows of our data
data.head()
# -
# ## Plotting the data
#
# First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
# +
# # %matplotlib inline
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa","rank"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
# -
# Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
# +
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
# -
# This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
#
# ## TODO: One-hot encoding the rank
# Use the `get_dummies` function in pandas in order to one-hot encode the data.
#
# Hint: To drop a column, it's suggested that you use `one_hot_data`[.drop( )](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html).
# +
## One solution
# Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# Drop the previous rank column
one_hot_data = one_hot_data.drop('rank', axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
## Alternative solution ##
# if you're using an up-to-date version of pandas,
# you can also use selection by columns
# an equally valid solution
one_hot_data = pd.get_dummies(data, columns=['rank'])
one_hot_data[:10]
# -
# ## TODO: Scaling the data
# The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
# +
# Copying our data
processed_data = one_hot_data[:]
# Scaling the columns
processed_data['gre'] = processed_data['gre']/800
processed_data['gpa'] = processed_data['gpa']/4.0
processed_data[:10]
# -
# ## Splitting the data into Training and Testing
# In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
# +
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
train_data.head()
# -
test_data.head()
# ## Splitting the data into features and targets (labels)
# Now, as a final step before the training, we'll split the data into features (X) and targets (y).
# +
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
features.head()
# -
targets.head()
# ## Time to build the network
#
# Below we'll build your network. we'll implement both the forward pass and backwards pass through the network. we'll also set the hyperparameters: the learning rate, and the number of training passes.
#
# <img src="assets/neural_network.png" width=300px>
#
# the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
#
# We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
#
# > **Hint:** we'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
#
# Below, we have these tasks:
# 1. Implement the sigmoid function to use as the activation function.
# 2. Implement the forward pass .
# 3. Implement the backpropagation algorithm, including calculating the output error.
# 4. Implement the forward pass.
# +
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
# -
# # TODO: Backpropagate the error
# Now it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\hat{y})x $$ for binary cross entropy loss function and
# $$ (y-\hat{y})\sigma'(x)x $$ for mean square error.
def error_term_formula(x, y, output):
# for binary cross entropy loss
return (y - output)*x
# for mean square error
# return (y - output)*sigmoid_prime(x)*x
# +
# Neural Network hyperparameters
epochs = 20000
learnrate = 0.000001
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error term
error_term = error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term
# Update the weights here. The learning rate times the
# change in weights
# don't have to divide by n_records since it is compensated by the learning rate
weights += learnrate * del_w #/ n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean(error_formula(targets, out))
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
# -
# Calculate accuracy on test data
test_out = sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
| PredictingStudentAdmissionsNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Data, Information, Knowledge and Wisdom
#
#
# + hide_input=true init_cell=true slideshow={"slide_type": "skip"} tags=["Hide"]
# This code cell starts the necessary setup for Hour of CI lesson notebooks.
# First, it enables users to hide and unhide code by producing a 'Toggle raw code' button below.
# Second, it imports the hourofci package, which is necessary for lessons and interactive Jupyter Widgets.
# Third, it helps hide/control other aspects of Jupyter Notebooks to improve the user experience
# This is an initialization cell
# It is not displayed because the Slide Type is 'Skip'
from IPython.display import HTML, IFrame, Javascript, display
from ipywidgets import interactive
import ipywidgets as widgets
from ipywidgets import Layout
import getpass # This library allows us to get the username (User agent string)
# import package for hourofci project
import sys
sys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook)
# sys.path.append('supplementary')
import hourofci
try:
import os
os.chdir('supplementary')
except:
pass
# load javascript to initialize/hide cells, get user agent string, and hide output indicator
# hide code by introducing a toggle button "Toggle raw code"
HTML('''
<script type="text/javascript" src=\"../../supplementary/js/custom.js\"></script>
<style>
.output_prompt{opacity:0;}
</style>
<input id="toggle_code" type="button" value="Toggle raw code">
''')
# -
# ## Reminder
# <a href="#/slide-2-0" class="navigate-right" style="background-color:blue;color:white;padding:8px;margin:2px;font-weight:bold;">Continue with the lesson</a>
#
# <br>
# </br>
# <font size="+1">
#
# By continuing with this lesson you are granting your permission to take part in this research study for the Hour of Cyberinfrastructure: Developing Cyber Literacy for GIScience project. In this study, you will be learning about cyberinfrastructure and related concepts using a web-based platform that will take approximately one hour per lesson. Participation in this study is voluntary.
#
# Participants in this research must be 18 years or older. If you are under the age of 18 then please exit this webpage or navigate to another website such as the Hour of Code at https://hourofcode.com, which is designed for K-12 students.
#
# If you are not interested in participating please exit the browser or navigate to this website: http://www.umn.edu. Your participation is voluntary and you are free to stop the lesson at any time.
#
# For the full description please navigate to this website: <a href="../../gateway-lesson/gateway/gateway-1.ipynb">Gateway Lesson Research Study Permission</a>.
#
# </font>
# ## Do you believe in Global Warming???
# What if I ask you this question and throw some numbers at you
from ipywidgets import Button, HBox, VBox,widgets,Layout
from IPython.display import display
import pandas as pd
table1 = pd.read_csv('databases/antartica_mass.csv').sample(frac = 1)
table1['0'] = pd.to_datetime(table1['0'])
table2 = pd.read_csv('databases/global_temperature.csv').sample(frac = 1)
table2['0'] = pd.to_datetime(table2['0'],format='%Y')
table3 = pd.read_csv('databases/carbon_dioxide.csv').sample(frac = 1)
table3['2'] = pd.to_datetime(table3['2'])
table1_disp = widgets.Output()
table2_disp = widgets.Output()
table3_disp = widgets.Output()
with table1_disp:
display(table1)
with table2_disp:
display(table2)
with table3_disp:
display(table3)
out=HBox([VBox([table1_disp],layout = Layout(margin='0 100px 0 0')),VBox([table2_disp],layout = Layout(margin='0 100px 0 0')),VBox([table3_disp])])
out
# These are just symbols and numbers (of course we can identify the date as we have seen that pattern before) and doesn't convey anything.
#
# This is what we essentially call **Data (or raw Data)**.
# ## What is Data?
# >**Data is a collection of facts** in a **raw or unorganized form** such as **numbers or characters**.
#
# Without **context** data has no value!!
#
# Now what if we are provided with the **information** about **what** these symbols represent, **who** collected the data, **where** is this data collected from and **when** was the data collected.
# ## What is Information (Data+Context)?
# >**Information** is a **collection of data** that is **arranged and ordered in a consistent way**. Data in the form of information becomes **more useful because storage and retrieval are easy**.
#
# For our sample datasets, what if we know about the **"what, who, where, and when"** questions. For example, if we are provided with the information that these datasets represent the change in Antartic Ice mass in giga tonnes, the temperature anomaly across the globe in celsius, and the carbon dioxide content in the atmosphere as parts per million, we can try to deduce patterns from the data.
table1.columns = ["Time", "Antartic_Mass(Gt)"]
table2.columns = ["Time", "Temperature_Anomaly(C)"]
table3.columns = ["Time", "Carbon_Dioxide(PPM)"]
table1_disp = widgets.Output()
table2_disp = widgets.Output()
table3_disp = widgets.Output()
with table1_disp:
display(table1)
with table2_disp:
display(table2)
with table3_disp:
display(table3)
out=HBox([VBox([table1_disp],layout = Layout(margin='0 100px 0 0')),VBox([table2_disp],layout = Layout(margin='0 100px 0 0')),VBox([table3_disp])])
out
# We can do more **processing** on the data and can convert it into structured forms. For example we can **sort** these datasets to look for temporal changes
table1_disp = widgets.Output()
table2_disp = widgets.Output()
table3_disp = widgets.Output()
with table1_disp:
display(table1.sort_values(by='Time'))
with table2_disp:
display(table2.sort_values(by='Time'))
with table3_disp:
display(table3.sort_values(by='Time'))
out=HBox([VBox([table1_disp],layout = Layout(margin='0 100px 0 0')),VBox([table2_disp],layout = Layout(margin='0 100px 0 0')),VBox([table3_disp])])
out
# After sorting we can see that, with Time, there is a depletion in Antratic mass and increase in temperature anomaly as well as carbon dioxide content in the atmosphere.
#
# We can also **visualize** these datasets (as a picture is worth 1000 words!!) to bolster our arguments
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1,3)
fig.set_figheight(8)
fig.set_figwidth(20)
table1.sort_values(by='Time').plot(x='Time',y='Antartic_Mass(Gt)',ax=axes[0]);
table2.sort_values(by='Time').plot(x='Time',y='Temperature_Anomaly(C)',ax=axes[1]);
table3.sort_values(by='Time').plot(x='Time',y='Carbon_Dioxide(PPM)',ax=axes[2]);
plt.show()
# By asking relevant questions about ‘who’, ‘what’, ‘when’, ‘where’, etc., we can derive valuable information from the data and make it more useful for us.
# ## What is Knowledge? (Patterns from Information)
# >**Knowledge** is the **appropriate collection of information**, such that it's **intent is to be useful**.
#
# Knowledge deals with the question of **"How"**.
#
# **"How"** is the **information, derived from the collected data, relevant to our goals?**
#
# **"How"** are the **pieces of this information connected to other pieces** to add more meaning and value?
#
# So how do we find this connection between our pieces of information. For example now we have the information that with time there is a decrease in Antartic Ice mass and a corresponding increase in Temperature Anomaly and the Carbon Dioxide content in the atmosphere. Can we prove that there is a relationship? This is where the simulation and model building skills come into play. Machine Learning (which has been a buzz word for long time) is used to answer such questions from large sets of data.
#
# ## What is Wisdom?(Acting up on Knowledge)
# >**Wisdom** is the **ability to select the best way to reach the desired outcome based on knowledge**.
#
# So its a very subjective concept. In our example, we now have the knowledge that (from developing climatic models) increases in atmospheric carbon dioxide are responsible for about two-thirds of the total energy imbalance that is causing earth's temperature to rise and we also know that rise in temperature leads to melting of ice mass which is a big threat to earth's bio-diversity. So what are we going to do about that? What is the **best way** to do it? So **Wisdom** here is **acting up on this knowledge** regarding carbon emissions and finding ways to reduce it.
# ## The DIKW pyramid
# We can essentially represent these concepts in a Pyramid with Data at the bottom and Wisdom at the top.
# 
#
# + [markdown] tags=[]
# So where does **Database** fits into this Pyrmaid (or model)?
#
# We are going to look at that in the upcoming chapters
# -
# #### Resources
# https://www.ontotext.com/knowledgehub/fundamentals/dikw-pyramid/
# https://www.systems-thinking.org/dikw/dikw.htm
# https://www.csestack.org/dikw-pyramid-model-difference-between-data-information/
# https://developer.ibm.com/articles/ba-data-becomes-knowledge-1/
# https://www.certguidance.com/explaining-dikw-hierarchy/
# https://www.spreadingscience.com/our-approach/diffusion-of-innovations-in-a-community/1-the-dikw-model-of-innovation/
# https://climate.nasa.gov/vital-signs/ice-sheets/
# https://climate.nasa.gov/vital-signs/global-temperature/
# https://climate.nasa.gov/vital-signs/carbon-dioxide/
# + [markdown] slideshow={"slide_type": "slide"}
# ### Click on the link below to move on!
# <br>
#
# <font size="+1"><a style="background-color:blue;color:white;padding:12px;margin:10px;font-weight:bold;" href="dbms-6.ipynb">Click here to go to the next notebook.</a></font>
| intermediate-lessons/geospatial-data/dbms-5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Load Necessary Libraries
#
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
# ### Loading page content
page=requests.get('https://www.speedtest.net/global-index#mobile')
cont=page.content
print(page.status_code)
soupobj=bs(cont,'html.parser')
#print(soupobj.prettify()) #printing out soup object
# ### Data sorting
# ### 1]
# +
#adding all country names in empty list-empli for further data frame
countr=soupobj.find_all(class_='country')
empli=[]
for li in countr:
empli.append(li.get_text())
#print(empli)
# -
# ### 2]
# +
#adding all internet speed rankings in empty list-empli2 for further data frame
empli2=[]
speed=soupobj.find_all(class_='speed')
for s in speed:
empli2.append(s.get_text())
#print(empli2)
# -
# ### Removing Duplicates
#removing starting and ending '\n' duplicates from every item of empli.
list3 = [x.replace('\n', '') for x in empli]
#print(list3)
#making datfm-DataFrame
datfm=pd.DataFrame({'Countries':list3,
'Speed in Mbps':empli2})
# +
#Converting dataframe into csv
datfm.to_csv('Sub.csv',index=False)
# -
# ### Data cleaning
newcsv=pd.read_csv('Sub.csv')
newcsv
Mobiletest=newcsv[1:139] #csv for Mobile-conducted speedtest's across globe
Broadbandtest=newcsv[140:]#csv for Broadband-conducted speedtest's across globe
Mobiletest
Broadbandtest
Mobiletest.to_csv('Mobiletest.csv',index=False)
Broadbandtest.to_csv('Broadbandtest.csv',index=False)
c1=pd.read_csv('Mobiletest.csv')
c2=pd.read_csv('Broadbandtest.csv')
# ### Final csv's
c1.to_csv('Mobiletest.csv',index=True)
c2.to_csv('Broadbandtest.csv',index=True)
| Source.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project: Train a Quadcopter How to Fly
#
# Design an agent to fly a quadcopter, and then train it using a reinforcement learning algorithm of your choice!
#
# Try to apply the techniques you have learnt, but also feel free to come up with innovative ideas and test them.
# ## Instructions
#
# Take a look at the files in the directory to better understand the structure of the project.
#
# - `task.py`: Define your task (environment) in this file.
# - `agents/`: Folder containing reinforcement learning agents.
# - `policy_search.py`: A sample agent has been provided here.
# - `agent.py`: Develop your agent here.
# - `physics_sim.py`: This file contains the simulator for the quadcopter. **DO NOT MODIFY THIS FILE**.
#
# For this project, you will define your own task in `task.py`. Although we have provided a example task to get you started, you are encouraged to change it. Later in this notebook, you will learn more about how to amend this file.
#
# You will also design a reinforcement learning agent in `agent.py` to complete your chosen task.
#
# You are welcome to create any additional files to help you to organize your code. For instance, you may find it useful to define a `model.py` file defining any needed neural network architectures.
#
# ## Controlling the Quadcopter
#
# We provide a sample agent in the code cell below to show you how to use the sim to control the quadcopter. This agent is even simpler than the sample agent that you'll examine (in `agents/policy_search.py`) later in this notebook!
#
# The agent controls the quadcopter by setting the revolutions per second on each of its four rotors. The provided agent in the `Basic_Agent` class below always selects a random action for each of the four rotors. These four speeds are returned by the `act` method as a list of four floating-point numbers.
#
# For this project, the agent that you will implement in `agents/agent.py` will have a far more intelligent method for selecting actions!
# +
import random
class Basic_Agent():
def __init__(self, task):
self.task = task
def act(self):
new_thrust = random.gauss(450., 25.)
return [new_thrust + random.gauss(0., 1.) for x in range(4)]
# -
# Run the code cell below to have the agent select actions to control the quadcopter.
#
# Feel free to change the provided values of `runtime`, `init_pose`, `init_velocities`, and `init_angle_velocities` below to change the starting conditions of the quadcopter.
#
# The `labels` list below annotates statistics that are saved while running the simulation. All of this information is saved in a text file `data.txt` and stored in the dictionary `results`.
# +
# %load_ext autoreload
# %autoreload 2
import csv
import numpy as np
from task import Task
# Modify the values below to give the quadcopter a different starting position.
runtime = 5. # time limit of the episode
init_pose = np.array([0., 0., 10., 0., 0., 0.]) # initial pose
init_velocities = np.array([0., 0., 0.]) # initial velocities
init_angle_velocities = np.array([0., 0., 0.]) # initial angle velocities
file_output = 'data.txt' # file name for saved results
# Setup
task = Task(init_pose, init_velocities, init_angle_velocities, runtime)
agent = Basic_Agent(task)
done = False
labels = ['time', 'x', 'y', 'z', 'phi', 'theta', 'psi', 'x_velocity',
'y_velocity', 'z_velocity', 'phi_velocity', 'theta_velocity',
'psi_velocity', 'rotor_speed1', 'rotor_speed2', 'rotor_speed3', 'rotor_speed4']
results = {x : [] for x in labels}
# Run the simulation, and save the results.
with open(file_output, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labels)
while True:
rotor_speeds = agent.act()
_, _, done = task.step(rotor_speeds)
to_write = [task.sim.time] + list(task.sim.pose) + list(task.sim.v) + list(task.sim.angular_v) + list(rotor_speeds)
for ii in range(len(labels)):
results[labels[ii]].append(to_write[ii])
writer.writerow(to_write)
if done:
break
# -
# Run the code cell below to visualize how the position of the quadcopter evolved during the simulation.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(results['time'], results['x'], label='x')
plt.plot(results['time'], results['y'], label='y')
plt.plot(results['time'], results['z'], label='z')
plt.legend()
_ = plt.ylim()
# -
# The next code cell visualizes the velocity of the quadcopter.
plt.plot(results['time'], results['x_velocity'], label='x_hat')
plt.plot(results['time'], results['y_velocity'], label='y_hat')
plt.plot(results['time'], results['z_velocity'], label='z_hat')
plt.legend()
_ = plt.ylim()
# Next, you can plot the Euler angles (the rotation of the quadcopter over the $x$-, $y$-, and $z$-axes),
plt.plot(results['time'], results['phi'], label='phi')
plt.plot(results['time'], results['theta'], label='theta')
plt.plot(results['time'], results['psi'], label='psi')
plt.legend()
_ = plt.ylim()
# before plotting the velocities (in radians per second) corresponding to each of the Euler angles.
plt.plot(results['time'], results['phi_velocity'], label='phi_velocity')
plt.plot(results['time'], results['theta_velocity'], label='theta_velocity')
plt.plot(results['time'], results['psi_velocity'], label='psi_velocity')
plt.legend()
_ = plt.ylim()
# Finally, you can use the code cell below to print the agent's choice of actions.
plt.plot(results['time'], results['rotor_speed1'], label='Rotor 1 revolutions / second')
plt.plot(results['time'], results['rotor_speed2'], label='Rotor 2 revolutions / second')
plt.plot(results['time'], results['rotor_speed3'], label='Rotor 3 revolutions / second')
plt.plot(results['time'], results['rotor_speed4'], label='Rotor 4 revolutions / second')
plt.legend()
_ = plt.ylim()
# When specifying a task, you will derive the environment state from the simulator. Run the code cell below to print the values of the following variables at the end of the simulation:
# - `task.sim.pose` (the position of the quadcopter in ($x,y,z$) dimensions and the Euler angles),
# - `task.sim.v` (the velocity of the quadcopter in ($x,y,z$) dimensions), and
# - `task.sim.angular_v` (radians/second for each of the three Euler angles).
# the pose, velocity, and angular velocity of the quadcopter at the end of the episode
print(task.sim.pose)
print(task.sim.v)
print(task.sim.angular_v)
# In the sample task in `task.py`, we use the 6-dimensional pose of the quadcopter to construct the state of the environment at each timestep. However, when amending the task for your purposes, you are welcome to expand the size of the state vector by including the velocity information. You can use any combination of the pose, velocity, and angular velocity - feel free to tinker here, and construct the state to suit your task.
#
# ## The Task
#
# A sample task has been provided for you in `task.py`. Open this file in a new window now.
#
# The `__init__()` method is used to initialize several variables that are needed to specify the task.
# - The simulator is initialized as an instance of the `PhysicsSim` class (from `physics_sim.py`).
# - Inspired by the methodology in the original DDPG paper, we make use of action repeats. For each timestep of the agent, we step the simulation `action_repeats` timesteps. If you are not familiar with action repeats, please read the **Results** section in [the DDPG paper](https://arxiv.org/abs/1509.02971).
# - We set the number of elements in the state vector. For the sample task, we only work with the 6-dimensional pose information. To set the size of the state (`state_size`), we must take action repeats into account.
# - The environment will always have a 4-dimensional action space, with one entry for each rotor (`action_size=4`). You can set the minimum (`action_low`) and maximum (`action_high`) values of each entry here.
# - The sample task in this provided file is for the agent to reach a target position. We specify that target position as a variable.
#
# The `reset()` method resets the simulator. The agent should call this method every time the episode ends. You can see an example of this in the code cell below.
#
# The `step()` method is perhaps the most important. It accepts the agent's choice of action `rotor_speeds`, which is used to prepare the next state to pass on to the agent. Then, the reward is computed from `get_reward()`. The episode is considered done if the time limit has been exceeded, or the quadcopter has travelled outside of the bounds of the simulation.
#
# In the next section, you will learn how to test the performance of an agent on this task.
# ## The Agent
#
# The sample agent given in `agents/policy_search.py` uses a very simplistic linear policy to directly compute the action vector as a dot product of the state vector and a matrix of weights. Then, it randomly perturbs the parameters by adding some Gaussian noise, to produce a different policy. Based on the average reward obtained in each episode (`score`), it keeps track of the best set of parameters found so far, how the score is changing, and accordingly tweaks a scaling factor to widen or tighten the noise.
#
# Run the code cell below to see how the agent performs on the sample task.
# +
import sys
import pandas as pd
from agents.policy_search import PolicySearch_Agent
from task import Task
num_episodes = 1000
target_pos = np.array([0., 0., 10.])
task = Task(target_pos=target_pos)
agent = PolicySearch_Agent(task)
for i_episode in range(1, num_episodes+1):
state = agent.reset_episode() # start a new episode
while True:
action = agent.act(state)
next_state, reward, done = task.step(action)
agent.step(reward, done)
state = next_state
if done:
print("\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}), noise_scale = {}".format(
i_episode, agent.score, agent.best_score, agent.noise_scale), end="") # [debug]
break
sys.stdout.flush()
# -
# This agent should perform very poorly on this task. And that's where you come in!
# ## Define the Task, Design the Agent, and Train Your Agent!
#
# Amend `task.py` to specify a task of your choosing. If you're unsure what kind of task to specify, you may like to teach your quadcopter to takeoff, hover in place, land softly, or reach a target pose.
#
# After specifying your task, use the sample agent in `agents/policy_search.py` as a template to define your own agent in `agents/agent.py`. You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode()`, etc.).
#
# Note that it is **highly unlikely** that the first agent and task that you specify will learn well. You will likely have to tweak various hyperparameters and the reward function for your task until you arrive at reasonably good behavior.
#
# As you develop your agent, it's important to keep an eye on how it's performing. Use the code above as inspiration to build in a mechanism to log/save the total rewards obtained in each episode to file. If the episode rewards are gradually increasing, this is an indication that your agent is learning.
# +
## TODO: Train your agent here.
import sys
import pandas as pd
from agents.agent import DDPG
from task import Task
import datetime
init_pose = np.array([0., 0., 0., 0., 0., 0.])
init_velocities = np.array([0., 0., 0.])
init_angle_velocities = np.array([0., 0., 0.])
runtime = 5
target_pos = np.array([0., 0., 100.])
task = Task(init_pose=init_pose, init_velocities=init_velocities,
init_angle_velocities=init_angle_velocities, runtime=runtime,
target_pos=target_pos)
agent = DDPG(task)
episode_numbers = 100
all_returns = {
"messages": [],
"scores": [],
"position": {
"X": [],
"Y": [],
"Z": [],
},
"angle": {
"X": [],
"Y": [],
"Z": []
}
}
for i_episode in range(1, episode_numbers + 1):
state = agent.reset_episode_vars()
while True:
action = agent.act(state)
next_state, reward, done, final_pose, final_angle_velocities = task.new_step(action)
agent.step(action, reward, next_state, done)
state = next_state
if done:
message = "\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}), position = {:7.3f}, angle = {:7.3f}".format(
i_episode, agent.score, agent.best_score, final_pose[2], final_angle_velocities.sum())
print(message, end="")
all_returns["messages"].append(message)
all_returns["scores"].append(agent.score)
all_returns["position"]["X"].append(final_pose[0])
all_returns["position"]["Y"].append(final_pose[1])
all_returns["position"]["Z"].append(final_pose[2])
all_returns["angle"]["X"].append(final_angle_velocities[0])
all_returns["angle"]["Y"].append(final_angle_velocities[1])
all_returns["angle"]["Z"].append(final_angle_velocities[2])
break
sys.stdout.flush()
log_file = "log.txt"
date_time_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
file = open(log_file, "a+")
file.write(date_time_now + "\n\r")
for message_log in all_returns["messages"]:
file.write(" " + message_log + "\n\r")
file.write("\n\r----\n\r\n\r")
file.close()
# -
# ## Plot the Rewards
#
# Once you are satisfied with your performance, plot the episode rewards, either from a single run, or averaged over multiple runs.
# ### Score
#
# Scoring chart during the learning of the quadricopter.
#
# ### Pontuação
#
# Gráfico com pontuação durante o aprendizado do quadricóptero.
# +
## TODO: Plot the rewards.
plt.plot(all_returns["scores"], label='Score')
_ = plt.ylim()
# -
# ### Score X Position
#
# Graph with the score and the position in the X, Y and Z axes of the quadricopter during the learning.
#
# ### Pontuação X Posição
#
# Gráfico com a pontuação e a posição nos eixos X, Y e Z do quadricóptero durante o aprendizado.
plt.plot(all_returns["scores"], label='Score')
plt.plot(all_returns["position"]["X"], label="Position X")
plt.plot(all_returns["position"]["Y"], label="Position Y")
plt.plot(all_returns["position"]["Z"], label="Position Z")
plt.legend()
_ = plt.ylim()
# ### Score X Angle/Velocity
#
# Graph with the score and angle/velocity on the X, Y and Z axes of the quadricopter during learning.
#
# ### Pontuação X Angulo/Velocidade
#
# Gráfico com a pontuação e o angulo/velocidade nos eixos X, Y e Z do quadricóptero durante o aprendizado.
plt.plot(all_returns["scores"], label='Score')
plt.plot(all_returns["angle"]["X"], label="Angle X")
plt.plot(all_returns["angle"]["Y"], label="Angle Y")
plt.plot(all_returns["angle"]["Z"], label="Angle Z")
plt.legend()
_ = plt.ylim()
# ### Score X Position X Angle/Velocity
#
# Graph with the score, position in the X, Y and Z axes and the angle/velocity in the X, Y and Z axes of the quadricopter during the learning.
#
# ### Pontuação X Posição X Angulo/Velocidade
#
# Gráfico com a pontuação, posição nos eixos X, Y e Z e o angulo/velocidade nos eixos X, Y e Z do quadricóptero nos durante o aprendizado.
plt.plot(all_returns["scores"], label='Score')
plt.plot(all_returns["position"]["X"], label="Position X")
plt.plot(all_returns["position"]["Y"], label="Position Y")
plt.plot(all_returns["position"]["Z"], label="Position Z")
plt.plot(all_returns["angle"]["X"], label="Angle X")
plt.plot(all_returns["angle"]["Y"], label="Angle Y")
plt.plot(all_returns["angle"]["Z"], label="Angle Z")
plt.legend()
_ = plt.ylim()
# ## Reflections
#
# **Question 1**: Describe the task that you specified in `task.py`. How did you design the reward function?
#
# **Answer**:
#
# O método de recompensa que utilizei foi validando duas informações do quadricóptero, que são o *angulo/velocidade* e *posição* atual do quadricóptero nos eixos X, Y e Z.
#
# Foi criado um método dentro do arquivo **task.py** chamado **calcule**, dentro dele é calculado o valor de recompensa que o quadricóptero ira receber com base no *angulo/velocidade* e *posição* nos eixos X, Y e Z.
#
# Este método necessita de três parâmetro, que são, o valor atual da *posição* ou *angulo/velocidade* do quadricóptero, representado pelo parâmetro *current*, o valor da meta, o melhor valor possível para a *posição* ou *angulo/velocidade* do quadricóptero, representado pelo parâmetro *goal* e o intervalo, um valor numérico para checar e calcular o máximo de intervalo dos valores *current* e *goal*, representado pelo parâmetro *interval*.
#
# Primeiro ele realiza uma checagem com o parâmetro *current* e o parâmetro *goal*, para saber se os dois valores são iguais, se forem, encerra o método retornando o valor do *interval* multiplicado por dois, o dobro da pontuação máxima que o método calcula se os valores fossem diferente. Isso significa que a *posição* ou *angulo/velocidade* do quadricóptero esta em seu melhor resultado, dando sua recompensa máxima a ele.
#
# Depois ele ira realizar uma checagem que se o valor de *current* for menor que o resultado de *goal* menos *inteval*, ou *current* for maior que o resultado de *goal* mais *interval*, encerra o método retornando zero. Isso significa que o valor atual *current* é menor ou maior que o intervalo e a meta, então ele esta muito longe da *posição* ou *angulo/velocidade* recomendado, resultando em nenhuma recompensa.
#
# Por ultimo, ele checa o valor de *goal*, se ele for igual a zero, significa que ele vai usar o calculo abaixo referente ao *if* para o *angulo/velocidade*, retornando o calculo de *interval* menos *current* se acaso *current* for maior que *goal*, senão retorna o resultado de *current* mais *interval*. Se acaso *goal* for diferente de zero, ele entrará no *else*, e seguindo a mesma logica, se *current* for maior que *goal* retorna o calculo de *interval* mais *goal*, menos *current*, senão retorna simplesmente o valor de *current*. Esse cálculos acima são para calcular a recompensa em comparação a meta, quanto mais perto da meta, maior é a recompensa, quanto mais longe da meta, menor é a recompensa.
#
# Este método calcula a recompensa utilizando a proximidade do valor atual estiver da meta, quanto mais perto, maior é a recompensa, que é recebida através do valor do intervalo, que é usado para checar o intervalo máximo entre o valor atual e a meta, e também calcular a sua recompensa para aquele momento.
#
# Esse método é usado seis vezes, calculando a recompensa para o *angulo/velocidade* nos eixos X, Y e Z e a *posição* nos eixos X, Y e Z do quadricóptero, com uma multiplicação da recompensa da *posição* no eixo Z por três, significando que esta parte da recompensa é muito importante, após isso soma todos os valores, e esse valor é a recompensa do quadricóptero para aquele momento.
# **Question 2**: Discuss your agent briefly, using the following questions as a guide:
#
# - What learning algorithm(s) did you try? What worked best for you?
# - What was your final choice of hyperparameters (such as $\alpha$, $\gamma$, $\epsilon$, etc.)?
# - What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.
#
# **Answer**:
#
# - O algorítimo de aprendizado utilizado foi o *Deep Deterministic Policy Gradients*, ele foi disponibilizado pela Udacity como uma escolha popular, e funcionou muito bem. Foram feitas poucas modificações nele para deixar o melhor possível para a aprendizagem do quadricóptero.
#
#
# - Poucos valores foram alterados no algorítimo de aprendizado, os valores de $\alpha$, $\gamma$, $\epsilon$ e muitos outros foram mantidos em seu valor *default*, foram alterados o valor de **buffer_size**, que estava com um valor de 100000, e alterado para 1000000, o que foi um grande diferencial para a aprendizagem, quando o valor ainda estava 100000, a aprendizagem erá muito pequena, mas quando alterou para 1000000, ele começou a aprender muito mais rápido e melhor.
#
#
# - Não foi alterado nenhuma opção como **layers**, **sizes** ou **activation functions**, mas foi adicionado o retorno do **score**, **best_score** e **reward** no algorítimo de aprendizado, esses valores são usados para mostrar o desempenho do algorítimo durante a aprendizagem, para salvar no log e mostrar nos gráficos acima o resultado final da aprendizagem do quadricóptero.
# **Question 3**: Using the episode rewards plot, discuss how the agent learned over time.
#
# - Was it an easy task to learn or hard?
# - Was there a gradual learning curve, or an aha moment?
# - How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)
#
# **Answer**:
#
# - Em minha concepção, foi uma tarefa nem fácil e nem difícil, mas sim média, um projeto trabalhoso pela quantidade de teste realizados. No começo foi criado vários métodos de recompensa, e testando um por um ate chegar ao melhor método. Depois de algumas analises, reparei que o aprendizado não estava aumentando muito, e simplesmente ele começava a ganhar cada vez menos recompensa depois de um tempo, o que levou a um novo método de recompensa, o descrito na questão 1, e também alterado o algorítimo de aprendizado, na parte da memoria, o **buffer_size**, aumentando seu valor, apos isso o quadricóptero começou a aprender melhor.
#
#
# - Existe uma curva de aprendizado apos os primeiros episódios, que apos conseguir uma boa taxa de aprendizado, se mantêm ate o final de todos os episódios, com algumas variações na taxa de recompensa. Percebemos que o objetivo do quadricóptero que é decolar e chegar ate a posição 100 no eixo Z, é comprido, pois analisando os gráficos da *posição*, percebe que o quadricóptero se mantêm relativamente perto da posição 100 no eixo Z, e nos demais eixos se mantêm mais próximos ao zero, e também o angulo/velocidade se mantêm muito próximo do zero, como foi definido no método de recompensa.
#
#
# - Analisando o gráfico abaixo, e a media da recompensa dos 10 últimos episódios, que foi de **1207.7206694394608**, percebemos algumas variações em sua taxa de recompensa, mas que rapidamente volta para o todo do gráfico, repetindo os demais episódios, com uma ótima taxa de aprendizado. A media das recompensas foi relativamente boa, mais poderia ser melhor se não tivesse as variações, que é o principal motivo da media não estar como as recompensas mais altas.
# +
last_10_scores = all_returns["scores"][-10:]
average = sum(last_10_scores) / len(last_10_scores)
print(average)
plt.plot(last_10_scores, label='Score')
plt.legend()
_ = plt.ylim()
# -
# **Question 4**: Briefly summarize your experience working on this project. You can use the following prompts for ideas.
#
# - What was the hardest part of the project? (e.g. getting started, plotting, specifying the task, etc.)
# - Did you find anything interesting in how the quadcopter or your agent behaved?
#
# **Answer**:
#
# - A parte mais difícil do projeto foi definir a **task**, o método de recompensa para cada episodio de aprendizado, mas a dificuldade não foi tão grande, mas sim trabalhosa, pois a cada nova mudança nesse método, era preciso iniciar o treinamento novamente. Testar todas as possíveis formas de recompensas que implementei foram ótimas para chegar nesse modelo final.
#
#
# - Achei este projeto muito bom, foi excelente ver a cada novo episodio, a cada novo método de aprendizado, a cada novo teste, ver o progresso que o quadricóptero teve, com base nos valores e no gráfico que foram apresentados.
| quadcopter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AlexGerwer/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/Copy_of_LS_DS_113_Making_Data_backed_Assertions_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - Making Data-backed Assertions
#
# This is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it.
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - what's going on here?
#
# Consider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people.
#
# Try to figure out which variables are possibly related to each other, and which may be confounding relationships.
#
# Try and isolate the main relationships and then communicate them using crosstabs and graphs. Share any cool graphs that you make with the rest of the class in Slack!
# + id="TGUS79cOhPWj" colab_type="code" outputId="88f33990-b4eb-40ec-8248-90e98742d186" colab={"base_uri": "https://localhost:8080/", "height": 746}
# Step 1 - find the actual file to download
# On github, navigate to the persons.csv data file and then click the "raw" button to display the file itself.
# Use the URL at the top of the page in read_csv()
persons_data_url = 'https://raw.githubusercontent.com/ryanleeallred/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module3-databackedassertions/persons.csv'
# The dataset is review in github: https://github.com/ryanleeallred/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module3-databackedassertions/persons.csv
# All of the data seems to be int64
# There does not seem to be any missing data
# There does not seem to be an apparent relationship between the data, although it would appear that weight would be the dependent variable
# Although there could be a relationship between weight and age, or weight and exercie time, one can envision many confounding variables, such as diet, the general state of health, etc.
# Step 2 - load the data
import pandas as pd
persons_data = pd.read_csv(persons_data_url)
print ('\n')
# Step 3 - inspect the loaded file
pd.set_option('display.max_columns', None)
print (persons_data.head(20), '\n')
# Step 4 - assess the loaded file for characteristics and missing (NaN) data
persons_data_shape = persons_data.shape
print ('Persons Data Shape')
print (persons_data_shape, '\n')
print (persons_data.count(), '\n')
print (persons_data.isna().sum(), '\n')
# + id="v5BnMAdOoDN2" colab_type="code" outputId="44f2ba6a-91e9-40de-88e9-05a8990f9a4e" colab={"base_uri": "https://localhost:8080/", "height": 851}
# Crosstabulation is used to further examine the data
# In order for the data to be viewed in a meaningful way, it will need to be binned
# Histograms are used to determine meaningful binning grannularity
import matplotlib.pyplot as plt
# Histogram for weight
persons_data['weight'].hist(bins=8)
plt.xlabel('weight')
plt.ylabel('counts')
plt.title('weight vs counts')
plt.grid(True)
plt.show();
# Histogram for age
persons_data['age'].hist(bins=9)
plt.xlabel('age')
plt.ylabel('counts')
plt.title('age vs counts')
plt.grid(True)
plt.show();
# Histogram for exercise time
persons_data['exercise_time'].hist(bins=6)
plt.xlabel('exercise time')
plt.ylabel('counts')
plt.title('exercise time vs counts')
plt.grid(True)
plt.show();
# + id="TjqCt1wjFxYk" colab_type="code" outputId="d18dc5f9-c6de-4c5c-8ab6-3f28aa7eed43" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# # !pip freeze
# + id="qCB2qoE_GWBb" colab_type="code" outputId="cc3de973-5cef-43f8-dcdf-f04ebf449dcb" colab={"base_uri": "https://localhost:8080/", "height": 371}
# # !pip install pandas==0.23.4
# + id="HGjlUpsqGZJU" colab_type="code" outputId="23178861-b134-4fc2-9aa5-bfdbbae9fa46" colab={"base_uri": "https://localhost:8080/", "height": 36}
# import pandas as pd
print(pd.__version__)
# + id="t9uOM5FZFnV4" colab_type="code" outputId="7ed4e8dc-b3ba-4483-fe1b-89d7333a6374" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Now we will print some crosstabulated data
weight_bins = pd.cut(persons_data['weight'], 8) # 8 equal-sized bins
age_bins = pd.cut(persons_data['age'], 9) # 9 equal-sized bins
exercise_time_bins = pd.cut(persons_data['exercise_time'], 6) # 6 equal-sized bins
crosstab_age = pd.crosstab(weight_bins, age_bins, normalize = 'columns')
print (crosstab_age)
print('\n')
crosstab_exercise_time = pd.crosstab(weight_bins, exercise_time_bins, normalize = 'columns')
print(crosstab_exercise_time)
print('\n')
crosstab_age_exercise = pd.crosstab(age_bins, exercise_time_bins, normalize = 'columns')
print(crosstab_age_exercise)
print('\n')
# + id="7xUmTIS7KLZJ" colab_type="code" outputId="cadcca92-47d2-4a6a-ee4c-61c584740642" colab={"base_uri": "https://localhost:8080/", "height": 953}
# Now the cross tabs will be plotted
crosstab_age.plot();
crosstab_age.plot(kind = 'bar');
crosstab_age.plot(kind = 'bar', stacked = True);
# + id="7_NH4zJFLFVW" colab_type="code" outputId="fa92e498-70c9-4bd5-b407-dff639a13dd5" colab={"base_uri": "https://localhost:8080/", "height": 953}
crosstab_exercise_time.plot();
crosstab_exercise_time.plot(kind = 'bar');
crosstab_exercise_time.plot(kind = 'bar', stacked = True);
# + id="uJe6qlpbLS_U" colab_type="code" outputId="3f85ee55-7259-4f5a-f635-7be4f485f865" colab={"base_uri": "https://localhost:8080/", "height": 953}
crosstab_age_exercise.plot();
crosstab_age_exercise.plot(kind = 'bar');
crosstab_age_exercise.plot(kind = 'bar', stacked = True);
# + id="EMcF9DRmutvZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="ca55d023-6e2d-4b05-b6d1-0ffe37a702ea"
# The data is plotted as a 3D scatter plot
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.scatter(persons_data['age'], persons_data['exercise_time'], persons_data['weight'], c='r', marker='.')
ax.scatter(persons_data['exercise_time'], persons_data['age'], persons_data['weight'], c='r', marker='.')
ax.set_xlabel('exercise time')
ax.set_xlim(0, 300)
ax.set_ylabel('age')
ax.set_ylim(0, 80)
ax.set_zlabel('Z')
ax.set_zlim(0, 250)
plt.show()
# + id="Qxy4JOJ11V3k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="bdd14b2e-364b-4ea3-fffc-c8644dcaff12"
# The data is plotted as 3D surface with a triangular mesh
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Make the plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(persons_data['exercise_time'], persons_data['age'], persons_data['weight'], cmap=plt.cm.viridis, linewidth=0.2)
plt.show()
# to Add a color bar which maps values to colors.
surf=ax.plot_trisurf(persons_data['exercise_time'], persons_data['age'], persons_data['weight'], cmap=plt.cm.viridis, linewidth=0.2)
fig.colorbar( surf, shrink=0.5, aspect=5)
plt.show()
# Rotate it
ax.view_init(30, 45)
plt.show()
# Other palette
ax.plot_trisurf(persons_data['exercise_time'], persons_data['age'], persons_data['weight'], cmap=plt.cm.jet, linewidth=0.01)
ax.set_xlabel('age')
ax.set_xlim(0, 80)
ax.set_ylabel('exercise time')
ax.set_ylim(0, 300)
ax.set_zlabel('Z')
ax.set_zlim(0, 250)
plt.show()
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. What are the variable types in the data?
# 2. What are the relationships between the variables?
# 3. Which relationships are "real", and which spurious?
#
# + id="H_2RbmcZOglR" colab_type="code" colab={}
# All three variables in this data set are integers
# There does not seem to be any relationship among the variables
# This is not surprising because many factors determine such things as exercise time and weight
# It seems like there was no control for confounding variables
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub.
#
# - [Spurious Correlations](http://tylervigen.com/spurious-correlations)
# - [NIH on controlling for confounding variables](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4017459/)
#
# Stretch goals:
#
# - Produce your own plot inspired by the Spurious Correlation visualizations (and consider writing a blog post about it - both the content and how you made it)
# - Pick one of the techniques that NIH highlights for confounding variables - we'll be going into many of them later, but see if you can find which Python modules may help (hint - check scikit-learn)
| Copy_of_LS_DS_113_Making_Data_backed_Assertions_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Sources
# <ul>
# <li> new_clean_data : Cleaned data with dynamic cleaning
# <li> new_clean_data_v2 : Every df has the same feature set
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
filenames = open('filenames.txt','r').read()
filenames = filenames.split('\n')[:-1]
file_count = len(filenames)
print(file_count)
# -
def get_x_y_from_df(df):
df = df.drop(columns=['Quarter end'])
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
X = df
Y = X['Price']
Y = Y[1:]
#Getting Next Quarter %change of price
#This will be our target variable
X = X[:-1]
return X,Y
s=1
try:
df = pd.read_csv(".All_Data/new_clean_data/"+str(filenames[0]), delimiter = ',')
df.set_index("Unnamed: 0", inplace =True)
except:
s=2
x,y = get_x_y_from_df(df)
# # Feature Importance
#
# Feature importance gives you a score for each feature of your data, the higher the score more important or relevant is the feature towards your output variable
# +
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
model.fit(x,y)
print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers
#plot graph of feature importances for better visualization
feat_importances = pd.Series(model.feature_importances_, index=x.columns)
feat_importances.nlargest(15).plot(kind='barh')
plt.show()
# -
feat_importances.nlargest(15).index
# +
'''
NOW ONLY TAKING THE TOP 15 FEATURES
'''
x = x[feat_importances.nlargest(15).index]
np.shape(x)
# -
# ## correlation studies
# +
import seaborn as sns
# Create correlation matrix
corr_matrix = x.corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
top_corr_features = corr_matrix.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(x[top_corr_features].corr(),annot=True,cmap="RdYlGn")
# -
to_drop = [column for column in upper.columns if any(upper[column] > 0.90)]
to_drop
# Drop features
x = x.drop(x[to_drop], axis=1)
x
np.shape(x)
# # Making the Feature Selection Function
# +
from sklearn.ensemble import ExtraTreesRegressor
def feature_selection(df):
'''
input: dataframe
Feature selection steps
1. Feature Importance
The feature importance is used from ExtraTreesRegressor, Top 15 features selected
2. Correlation
Features with correlation > 90% are eliminated
'''
x,y = get_x_y_from_df(df)
model = ExtraTreesRegressor()
model.fit(x,y)
feat_importances = pd.Series(model.feature_importances_, index=x.columns)
'''
NOW ONLY TAKING THE TOP 15 FEATURES
'''
x = x[feat_importances.nlargest(15).index]
corr_matrix = x.corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
top_corr_features = corr_matrix.index
to_drop = [column for column in upper.columns if any(upper[column] > 0.90)]
x = x.drop(x[to_drop], axis=1)
return x, y
# -
b
| 002_b_Feature_Selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Introducing the IPython Notebook
# + [markdown] slideshow={"slide_type": "skip"}
# ### <NAME> (US Army ERDC) and <NAME> (KAUST)
# + [markdown] slideshow={"slide_type": "skip"}
# ### Teaching Numerical Methods with IPython Notebooks, SciPy 2014
# + [markdown] slideshow={"slide_type": "notes"}
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">This lecture</span> by <a xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName" rel="cc:attributionURL"><NAME> and <NAME></a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. All code examples are also licensed under the [MIT license](http://opensource.org/licenses/MIT).
#
# **NOTE**: Some changes have been applied to make this notebook compliant with **Python 3**
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is this?
# + [markdown] slideshow={"slide_type": "fragment"}
# This is a gentle introduction to the IPython Notebook aimed at lecturers who wish to incorporate it in their teaching, written in an IPython Notebook. This presentation adapts material from the [IPython official documentation](http://nbviewer.ipython.org/github/ipython/ipython/blob/2.x/examples/Notebook).
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is an IPython Notebook?
# + [markdown] slideshow={"slide_type": "fragment"}
# An IPython Notebook is a:
#
# **[A]** Interactive environment for writing and running code
# **[B]** Weave of code, data, prose, equations, analysis, and visualization
# **[C]** Tool for prototyping new code and analysis
# **[D]** Reproducible workflow for scientific research
#
# + [markdown] slideshow={"slide_type": "fragment"}
# **[E]** **All of the above**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Writing and Running Code
# + [markdown] slideshow={"slide_type": "fragment"}
# The IPython Notebook consists of an ordered list of cells.
#
# There are four important cell types:
#
# * **Code**
# * **Markdown**
# * **Heading**
# * **Raw**
# + [markdown] slideshow={"slide_type": "fragment"}
# We briefly introduce how Code Cells work here. We will return to the other three cell types later.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Code Cells
# + slideshow={"slide_type": "fragment"}
# This is a code cell made up of Python comments
# We can execute it by clicking on it with the mouse
# then clicking the "Run Cell" button
# + slideshow={"slide_type": "fragment"}
# A comment is a pretty boring piece of code
# This code cell generates "Hello, World" when executed
print("Hello, World")
# + slideshow={"slide_type": "subslide"}
# Code cells can also generate graphical output
# %matplotlib inline
import matplotlib
matplotlib.pyplot.hist([0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 10]);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Modal editor
# -
# Starting with IPython 2.0, the IPython Notebook has a modal user interface. This means that the keyboard does different things depending on which mode the Notebook is in. There are two modes: edit mode and command mode.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Edit mode
# -
# Edit mode is indicated by a green cell border and a prompt showing in the editor area:
#
# <img src="./files/images/edit_mode.png">
#
# When a cell is in edit mode, you can type into the cell, like a normal text editor.
# + [markdown] slideshow={"slide_type": "fragment"}
# <div class="alert alert-success" style="margin: 10px">
# Enter edit mode by pressing `enter` or using the mouse to click on a cell's editor area.
# </div>
# + [markdown] slideshow={"slide_type": "fragment"}
# <div class="alert alert-success" style="margin: 10px">
# While in edit mode, tab-completion works for variables the kernel knows about from executing previous cells.
# </div>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Command mode
# -
# Command mode is indicated by a grey cell border:
#
# <img src="./files/images/command_mode.png">
#
# When you are in command mode, you are able to edit the notebook as a whole, but not type into individual cells. Most importantly, in command mode, the keyboard is mapped to a set of shortcuts that let you perform notebook and cell actions efficiently. For example, if you are in command mode and you press `c`, you will copy the current cell - no modifier is needed.
# + [markdown] slideshow={"slide_type": "fragment"}
# <div class="alert alert-error" style="margin: 10px">
# Don't try to type into a cell in command mode; unexpected things will happen!
# </div>
# + [markdown] slideshow={"slide_type": "fragment"}
# <div class="alert alert-success" style="margin: 10px">
# Enter command mode by pressing `esc` or using the mouse to click *outside* a cell's editor area.
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mouse navigation
# -
# All navigation and actions in the Notebook are available using the mouse through the menubar and toolbar, which are both above the main Notebook area:
#
# <img src="./files/images/menubar_toolbar.png">
# + [markdown] slideshow={"slide_type": "notes"}
# The first idea of mouse based navigation is that **cells can be selected by clicking on them.** The currently selected cell gets a grey or green border depending on whether the notebook is in edit or command mode. If you click inside a cell's editor area, you will enter edit mode. If you click on the prompt or output area of a cell you will enter command mode.
#
# If you are running this notebook in a live session (not on http://nbviewer.ipython.org) try selecting different cells and going between edit and command mode. Try typing into a cell.
# + [markdown] slideshow={"slide_type": "notes"}
# The second idea of mouse based navigation is that **cell actions usually apply to the currently selected cell**. Thus if you want to run the code in a cell, you would select it and click the "Play" button in the toolbar or the "Cell:Run" menu item. Similarly, to copy a cell you would select it and click the "Copy" button in the toolbar or the "Edit:Copy" menu item. With this simple pattern, you should be able to do most everything you need with the mouse.
#
# Markdown and heading cells have one other state that can be modified with the mouse. These cells can either be rendered or unrendered. When they are rendered, you will see a nice formatted representation of the cell's contents. When they are unrendered, you will see the raw text source of the cell. To render the selected cell with the mouse, click the "Play" button in the toolbar or the "Cell:Run" menu item. To unrender the selected cell, double click on the cell.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Keyboard Navigation
# + [markdown] slideshow={"slide_type": "fragment"}
# The modal user interface of the IPython Notebook has been optimized for efficient keyboard usage. This is made possible by having two different sets of keyboard shortcuts: one set that is active in edit mode and another in command mode.
# + [markdown] slideshow={"slide_type": "fragment"}
# The most important keyboard shortcuts are `enter`, which enters edit mode, and `esc`, which enters command mode.
#
# In edit mode, most of the keyboard is dedicated to typing into the cell's editor. Thus, in edit mode there are relatively few shortcuts:
# -
# In command mode, the entire keyboard is available for shortcuts:
# + [markdown] slideshow={"slide_type": "slide"}
# Here the rough order in which the IPython Developers recommend learning the command mode **shortcuts**:
#
# 1. Basic navigation: `enter`, `shift-enter`, `up/k`, `down/j`
# 2. Saving the notebook: `s`
# 2. Cell types: `y`, `m`, `1-6`, `t`
# 3. Cell creation and movement: `a`, `b`, `ctrl+k`, `ctrl+j`
# 4. Cell editing: `x`, `c`, `v`, `d`, `z`, `shift+=`
# 5. Kernel operations: `i`, `0`
# + [markdown] slideshow={"slide_type": "fragment"}
# I personally (& humbly) suggest learning `h` first!
# + [markdown] slideshow={"slide_type": "slide"}
# ## The IPython Notebook Architecture
# + [markdown] slideshow={"slide_type": "notes"}
# So far, we have learned the basics of using IPython Notebooks.
#
# For simple demonstrations, the typical user doesn't need to understand how the computations are being handled, but to successfully write and present computational notebooks, **you** will need to understand how the notebook architecture works.
# + [markdown] slideshow={"slide_type": "notes"}
# A *live* notebook is composed of an interactive web page (the front end), a running IPython session (the kernel or back end), and a web server responsible for handling communication between the two (the, err..., middle-end)
# + [markdown] slideshow={"slide_type": "notes"}
# A *static* notebook, as for example seen on NBViewer, is a static view of the notebook's content. The default format is HTML, but a notebook can also be output in PDF or other formats.
# -
# The centerpiece of an IPython Notebook is the "kernel", the IPython instance responsible for executing all code. Your IPython kernel maintains its state between executed cells.
# + slideshow={"slide_type": "subslide"}
x = 0
print(x)
# + slideshow={"slide_type": "fragment"}
x += 1
print(x)
# + [markdown] slideshow={"slide_type": "notes"}
# There are two important actions for interacting with the kernel. The first is to interrupt it. This is the same as sending a Control-C from the command line. The second is to restart it. This completely terminates the kernel and starts it anew. None of the kernel state is saved across a restart.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Markdown cells
# -
# Text can be added to IPython Notebooks using Markdown cells. Markdown is a popular markup language that is a superset of HTML. Its specification can be found here:
#
# <http://daringfireball.net/projects/markdown/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Markdown basics
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Text formatting
# + [markdown] slideshow={"slide_type": "fragment"}
# You can make text *italic* or **bold** or `monospace`
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Itemized Lists
# -
# * One
# - Sublist
# - This
# - Sublist
# - That
# - The other thing
# * Two
# - Sublist
# * Three
# - Sublist
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Enumerated Lists
# -
# 1. Here we go
# 1. Sublist
# 2. Sublist
# 2. There we go
# 3. Now this
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Horizontal Rules
# -
# ---
#
# ---
#
# ---
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Blockquotes
# -
# > To me programming is more than an important practical art. It is also a gigantic undertaking in the foundations of knowledge. -- <NAME>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Links
# -
# [IPython's website](http://ipython.org)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Code
# -
# This is a code snippet:
#
# ```Python
# def f(x):
# """a docstring"""
# return x**2
# ```
#
# This is an example of a **Python** function
# + [markdown] slideshow={"slide_type": "fragment"}
# You can also use triple-backticks to denote code blocks.
# This also allows you to choose the appropriate syntax highlighter.
#
# ```C
# if (i=0; i<n; i++) {
# printf("hello %d\n", i);
# x += 4;
# }
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Tables
# -
# Time (s) | Audience Interest
# ---------|------------------
# 0 | High
# 1 | Medium
# 5 | Facebook
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Images
# -
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### YouTube
# + slideshow={"slide_type": "-"}
from IPython.display import YouTubeVideo
YouTubeVideo('vW_DRAJ0dtc')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Other HTML
# -
# <strong> Be Bold! </strong>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mathematical Equations
# + [markdown] slideshow={"slide_type": "fragment"}
# Courtesy of MathJax, you can beautifully render mathematical expressions, both inline:
# $e^{i\pi} + 1 = 0$, and displayed:
#
# $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Equation Environments
# -
# You can also use a number of equation environments, such as `align`:
#
# \begin{align}
# x &= 4 \\
# y+z &= x
# \end{align}
#
# [A full list of available TeX and LaTeX commands is maintained by Dr. <NAME>.](http://www.onemathematicalcat.org/MathJaxDocumentation/TeXSyntax.htm)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Other Useful MathJax Notes
# -
# * inline math is demarcated by `$ $`, or `\( \)`
# * displayed math is demarcated by `$$ $$` or `\[ \]`
# * displayed math environments can also be directly demarcated by `\begin` and `\end`
# * `\newcommand` and `\def` are supported, *within* areas MathJax processes (such as in a `\[ \]` block)
# * equation numbering is not officially supported, but it can be indirectly enabled
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Note about Notebook Security
# -
# By default, a notebook downloaded to a new computer is *untrusted*
#
# * HTML and Javascript in Markdown cells is now *never* executed
# * HTML and Javascript code outputs must be explicitly *re-executed*
# * Some of these restrictions can be mitigrated through shared accounts (Sage MathCloud) and secrets
# + [markdown] slideshow={"slide_type": "notes"}
# More information on notebook security is in the [IPython Notebook documentation](http://ipython.org/ipython-doc/stable/notebook/security.html)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Magics
# + [markdown] slideshow={"slide_type": "-"}
# IPython kernels execute a superset of the Python language. The extension functions, commonly referred to as *magics*, come in two variants.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Line Magics
# + [markdown] slideshow={"slide_type": "-"}
# * A *line magic* looks like a command line call. The most important of these is `%matplotlib inline`, which embeds all matplotlib plot output as images in the notebook itself.
# + slideshow={"slide_type": "fragment"}
# %matplotlib inline
# + slideshow={"slide_type": "subslide"}
# %whos
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Cell Magics
# + [markdown] slideshow={"slide_type": "-"}
# * A *cell magic* takes its entire cell as an argument. Although there are a number of useful cell magics, you may find `%%timeit` to be useful for exploring code performance.
# + slideshow={"slide_type": "fragment"}
# %%timeit
import numpy as np
np.sum(np.random.rand(1000))
# -
# ### Execute Code as Python 2
# + language="python2"
#
# i = 10**60
# print type(i)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Interacting with the Command Line
# + [markdown] slideshow={"slide_type": "subslide"}
# IPython supports one final trick, the ability to interact directly with your shell by using the `!` operator.
# + slideshow={"slide_type": "fragment"}
# !ls
# + slideshow={"slide_type": "fragment"}
# x = !ls
# + slideshow={"slide_type": "fragment"}
print(x)
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Note about Notebook Version Control
# + [markdown] slideshow={"slide_type": "-"}
# The IPython Notebook is stored using canonicalized JSON for ease of use with version control systems.
#
# There are two things to be aware of:
#
# * By default, IPython embeds all content and saves kernel execution numbers. You may want to get in the habit of clearing all cells before committing.
#
# * As of IPython 2.0, all notebooks are signed on save. This increases the chances of a commit collision during merge, forcing a manual resolution. Either signature can be safely deleted in this situation.
| 01 Introducing the IPython Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd0a7209f30d626d7ca378e87ab9e54d86230903770f9cef6e8158b3fe61b437bae
# ---
# %matplotlib inline
from IPython.display import display,Math
from sympy import *
init_session()
mykeys = [ i for i in range(33)]
myvalues = [" "]+[chr(i) for i in range(65, 65+26)]+[".",",","'",'"',"(",")"]
mydict = dict(zip(mykeys,myvalues))
dlen = len(myvalues)
from IPython.display import HTML
from ipywidgets import interact
from ipywidgets import interact,Dropdown,IntSlider
@interact
def _(ptext="THIS IS A PEN.",key="0"):
print("平文 :{}".format(ptext))
try:
key = int(key)
except:
key = 0
etext = ""
for s in ptext:
etext += mydict[(myvalues.index(s)+key)%dlen]
print("暗号文:{}".format(etext))
| 21jk1-0519.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
import gzip
import pickle
# +
ph = tf.placeholder(shape=[None,3], dtype=tf.int32)
# look the -1 in the first position
x = tf.slice(ph, [0, 0], [-1, 2])
input_ = np.array([[1,2,3],
[3,4,5],
[5,6,7]])
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(x, feed_dict={ph: input_}))
# -
input_ = np.array([[1,2,0,0,0,0,0,0,0,0],[3,4,0,0,0,0,0,0,0,0]])
input_
input_.shape
ph = tf.placeholder(shape=[None,10],dtype=tf.int32)
x = tf.slice(ph,[0,0],[-1,2])
x
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(x, feed_dict={ph: input_}))
| tf_exp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <NAME>
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import mglearn
mglearn.plots.plot_knn_classification(n_neighbors=1)
# Jetzt laden wir den Datensatz
# +
from sklearn.model_selection import train_test_split
X, y = mglearn.datasets.make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# -
# Erzeugen des Classifier Object
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
# Trainieren de smodels
clf.fit(X_train, y_train)
# Und machen eine Prädiktion
pred = clf.predict(X_test)
print("Test set predictions: {}".format(pred))
Betrachten die accuracy
sco = clf.score(X_test, y_test)
print("Test set accuracy: {:.2f}".format(sco))
np.shape(X_test)
X_new = np.random.rand(7,2) * 10
np.shape(X_new)
print(X_new)
pred = clf.predict(X_new)
print("Garbage prediction with dummy values: {}".format(pred))
| tobi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Homework 6 - Berkeley STAT 157
#
# **Your name: XX, SID YY, teammates A,B,C (Please add your name, SID and teammates to ease Ryan and Rachel to grade.)**
#
# Handout 3/5/2019, due 3/12/2019 by 4pm. Please submit through gradescope.
#
# In this homework, we will train a CNN model on CIFAR-10 and submit the results into [Kaggle](https://www.kaggle.com/c/cifar-10). The rule is similar to homework 4:
#
# - work as a team
# - submit your results into Kaggle
# - take a screen shot of your best score and insert it below
# - the top 3 teams/individuals will be awarded with 500 dollar AWS credits
#
# The rest of this notebook contains a baseline ResNet-15 model to train on CIFAR-10. Please use it as a starting point. The end of this notebooks has several hints to improve your results.
#
# First, import the packages or modules required for the competition.
# + attributes={"classes": [], "id": "", "n": "1"}
import d2l
from mxnet import autograd, gluon, init
from mxnet.gluon import data as gdata, loss as gloss, nn
import os
import pandas as pd
import shutil
import time
# -
# ## Obtain and Organize the Data Sets
#
# The competition data is divided into a training set and testing set. The training set contains 50,000 images. The testing set contains 300,000 images, of which 10,000 images are used for scoring, while the other 290,000 non-scoring images are included to prevent the manual labeling of the testing set and the submission of labeling results. The image formats in both data sets are PNG, with heights and widths of 32 pixels and three color channels (RGB). The images cover 10 categories: planes, cars, birds, cats, deer, dogs, frogs, horses, boats, and trucks. The upper-left corner of Figure 9.16 shows some images of planes, cars, and birds in the data set.
#
# ### Download the Data Set
#
# After logging in to Kaggle, we can click on the "Data" tab on the CIFAR-10 image classification competition webpage shown in Figure 9.16 and download the training data set "train.7z", the testing data set "test.7z", and the training data set labels "trainlabels.csv".
#
#
# ### Unzip the Data Set
#
# The training data set "train.7z" and the test data set "test.7z" need to be unzipped after downloading. After unzipping the data sets, store the training data set, test data set, and training data set labels in the following respective paths:
#
# * ../data/kaggle_cifar10/train/[1-50000].png
# * ../data/kaggle_cifar10/test/[1-300000].png
# * ../data/kaggle_cifar10/trainLabels.csv
#
# To make it easier to get started, we provide a small-scale sample of the data set mentioned above. "train_tiny.zip" contains 100 training examples, while "test_tiny.zip" contains only one test example. Their unzipped folder names are "train_tiny" and "test_tiny", respectively. In addition, unzip the zip file of the training data set labels to obtain the file "trainlabels.csv". If you are going to use the full data set of the Kaggle competition, you will also need to change the following `demo` variable to `False`.
# + attributes={"classes": [], "id": "", "n": "2"}
demo = True # You need to change demo to False for this homework.
if demo:
import zipfile
for f in ['train_tiny.zip', 'test_tiny.zip', 'trainLabels.csv.zip']:
with zipfile.ZipFile('../data/kaggle_cifar10/' + f, 'r') as z:
z.extractall('../data/kaggle_cifar10/')
# -
# ### Organize the Data Set
#
# We need to organize data sets to facilitate model training and testing. The following `read_label_file` function will be used to read the label file for the training data set. The parameter `valid_ratio` in this function is the ratio of the number of examples in the validation set to the number of examples in the original training set.
# + attributes={"classes": [], "id": "", "n": "3"}
def read_label_file(data_dir, label_file, train_dir, valid_ratio):
with open(os.path.join(data_dir, label_file), 'r') as f:
# Skip the file header line (column name)
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
idx_label = dict(((int(idx), label) for idx, label in tokens))
labels = set(idx_label.values())
n_train_valid = len(os.listdir(os.path.join(data_dir, train_dir)))
n_train = int(n_train_valid * (1 - valid_ratio))
assert 0 < n_train < n_train_valid
return n_train // len(labels), idx_label
# -
# Below we define a helper function to create a path only if the path does not already exist.
# + attributes={"classes": [], "id": "", "n": "4"}
def mkdir_if_not_exist(path):
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
# -
# Next, we define the `reorg_train_valid` function to segment the validation set from the original training set. Here, we use `valid_ratio=0.1` as an example. Since the original training set has 50,000 images, there will be 45,000 images used for training and stored in the path “`input_dir/train`” when tuning hyper-parameters, while the other 5,000 images will be stored as validation set in the path “`input_dir/valid`”. After organizing the data, images of the same type will be placed under the same folder so that we can read them later.
# + attributes={"classes": [], "id": "", "n": "5"}
def reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label):
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, train_dir)):
idx = int(train_file.split('.')[0])
label = idx_label[idx]
mkdir_if_not_exist([data_dir, input_dir, 'train_valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train_valid', label))
if label not in label_count or label_count[label] < n_train_per_label:
mkdir_if_not_exist([data_dir, input_dir, 'train', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train', label))
label_count[label] = label_count.get(label, 0) + 1
else:
mkdir_if_not_exist([data_dir, input_dir, 'valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'valid', label))
# -
# The `reorg_test` function below is used to organize the testing set to facilitate the reading during prediction.
# + attributes={"classes": [], "id": "", "n": "6"}
def reorg_test(data_dir, test_dir, input_dir):
mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
for test_file in os.listdir(os.path.join(data_dir, test_dir)):
shutil.copy(os.path.join(data_dir, test_dir, test_file),
os.path.join(data_dir, input_dir, 'test', 'unknown'))
# -
# Finally, we use a function to call the previously defined `reorg_test`, `reorg_train_valid`, and `reorg_test` functions.
# + attributes={"classes": [], "id": "", "n": "7"}
def reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio):
n_train_per_label, idx_label = read_label_file(data_dir, label_file,
train_dir, valid_ratio)
reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label)
reorg_test(data_dir, test_dir, input_dir)
# -
# We use only 100 training example and one test example here. The folder names for the training and testing data sets are "train_tiny" and "test_tiny", respectively. Accordingly, we only set the batch size to 1. During actual training and testing, the complete data set of the Kaggle competition should be used and `batch_size` should be set to a larger integer, such as 128. We use 10% of the training examples as the validation set for tuning hyper-parameters.
# + attributes={"classes": [], "id": "", "n": "8"}
if demo:
# Note: Here, we use small training sets and small testing sets and the
# batch size should be set smaller. When using the complete data set for
# the Kaggle competition, the batch size can be set to a large integer
train_dir, test_dir, batch_size = 'train_tiny', 'test_tiny', 1
else:
train_dir, test_dir, batch_size = 'train', 'test', 128
data_dir, label_file = '../data/kaggle_cifar10', 'trainLabels.csv'
input_dir, valid_ratio = 'train_valid_test', 0.1
reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio)
# -
# ## Image Augmentation
#
# (We will cover image augmentation next week, you can ignore it for this homework.)
#
# To cope with overfitting, we use image augmentation. For example, by adding `transforms.RandomFlipLeftRight()`, the images can be flipped at random. We can also perform normalization for the three RGB channels of color images using `transforms.Normalize()`. Below, we list some of these operations that you can choose to use or modify depending on requirements.
# + attributes={"classes": [], "id": "", "n": "9"}
transform_train = gdata.vision.transforms.Compose([
# Magnify the image to a square of 40 pixels in both height and width
gdata.vision.transforms.Resize(40),
# Randomly crop a square image of 40 pixels in both height and width to
# produce a small square of 0.64 to 1 times the area of the original
# image, and then shrink it to a square of 32 pixels in both height and
# width
gdata.vision.transforms.RandomResizedCrop(32, scale=(0.64, 1.0),
ratio=(1.0, 1.0)),
gdata.vision.transforms.RandomFlipLeftRight(),
gdata.vision.transforms.ToTensor(),
# Normalize each channel of the image
gdata.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
# -
# In order to ensure the certainty of the output during testing, we only perform normalization on the image.
transform_test = gdata.vision.transforms.Compose([
gdata.vision.transforms.ToTensor(),
gdata.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
# ## Read the Data Set
#
# Next, we can create the `ImageFolderDataset` instance to read the organized data set containing the original image files, where each data instance includes the image and label.
# + attributes={"classes": [], "id": "", "n": "10"}
# Read the original image file. Flag=1 indicates that the input image has
# three channels (color)
train_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train'), flag=1)
valid_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'valid'), flag=1)
train_valid_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train_valid'), flag=1)
test_ds = gdata.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'test'), flag=1)
# -
# We specify the defined image augmentation operation in `DataLoader`. During training, we only use the validation set to evaluate the model, so we need to ensure the certainty of the output. During prediction, we will train the model on the combined training set and validation set to make full use of all labelled data.
train_iter = gdata.DataLoader(train_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch='keep')
valid_iter = gdata.DataLoader(valid_ds.transform_first(transform_test),
batch_size, shuffle=True, last_batch='keep')
train_valid_iter = gdata.DataLoader(train_valid_ds.transform_first(
transform_train), batch_size, shuffle=True, last_batch='keep')
test_iter = gdata.DataLoader(test_ds.transform_first(transform_test),
batch_size, shuffle=False, last_batch='keep')
# ## Define the Model
#
# (We will cover hybridize next week. It often makes your model run faster, but you can ignore what it means for this homework.)
#
# Here, we build the residual blocks based on the HybridBlock class, which is slightly different than the implementation described in the [“Residual networks (ResNet)”](http://d2l.ai/chapter_convolutional-neural-networks/resnet.html) section. This is done to improve execution efficiency.
# + attributes={"classes": [], "id": "", "n": "11"}
class Residual(nn.HybridBlock):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def hybrid_forward(self, F, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return F.relu(Y + X)
# -
# Next, we define the ResNet-18 model.
def resnet18(num_classes):
net = nn.HybridSequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.HybridSequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
# The CIFAR-10 image classification challenge uses 10 categories. We will perform Xavier random initialization on the model before training begins.
# +
def get_net(ctx):
num_classes = 10
net = resnet18(num_classes)
net.initialize(ctx=ctx, init=init.Xavier())
return net
loss = gloss.SoftmaxCrossEntropyLoss()
# -
# ## Define the Training Functions
#
# We will select the model and tune hyper-parameters according to the model's performance on the validation set. Next, we define the model training function `train`. We record the training time of each epoch, which helps us compare the time costs of different models.
# + attributes={"classes": [], "id": "", "n": "12"}
def train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay):
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
for X, y in train_iter:
y = y.astype('float32').as_in_context(ctx)
with autograd.record():
y_hat = net(X.as_in_context(ctx))
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
time_s = "time %.2f sec" % (time.time() - start)
if valid_iter is not None:
valid_acc = d2l.evaluate_accuracy(valid_iter, net, ctx)
epoch_s = ("epoch %d, loss %f, train acc %f, valid acc %f, "
% (epoch + 1, train_l_sum / n, train_acc_sum / n,
valid_acc))
else:
epoch_s = ("epoch %d, loss %f, train acc %f, " %
(epoch + 1, train_l_sum / n, train_acc_sum / n))
print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))
# -
# ## Train and Validate the Model
#
# Now, we can train and validate the model. The following hyper-parameters can be tuned. For example, we can increase the number of epochs. Because `lr_period` and `lr_decay` are set to 80 and 0.1 respectively, the learning rate of the optimization algorithm will be multiplied by 0.1 after every 80 epochs. For simplicity, we only train one epoch here.
# + attributes={"classes": [], "id": "", "n": "13"}
ctx, num_epochs, lr, wd = d2l.try_gpu(), 1, 0.1, 5e-4
lr_period, lr_decay, net = 80, 0.1, get_net(ctx)
net.hybridize()
train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
# -
# ## Classify the Testing Set and Submit Results on Kaggle
#
# After obtaining a satisfactory model design and hyper-parameters, we use all training data sets (including validation sets) to retrain the model and classify the testing set.
# + attributes={"classes": [], "id": "", "n": "14"}
net, preds = get_net(ctx), []
net.hybridize()
train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
for X, _ in test_iter:
y_hat = net(X.as_in_context(ctx))
preds.extend(y_hat.argmax(axis=1).astype(int).asnumpy())
sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key=lambda x: str(x))
df = pd.DataFrame({'id': sorted_ids, 'label': preds})
df['label'] = df['label'].apply(lambda x: train_valid_ds.synsets[x])
df.to_csv('submission.csv', index=False)
# -
# After executing the above code, we will get a "submission.csv" file. The format of this file is consistent with the Kaggle competition requirements.
#
# ## Hints to Improve Your Results
#
# * You should use the compete CIFAR-10 dataset to get meaningful results.
# * You'd better use a GPU machine to run it, otherwise it'll be quite slow. (Please DON'T FORGET to stop or terminate your instance if you are not using it, otherwise AWS will change you)
# * Change the `batch_size` and number of epochs `num_epochs` to 128 and 100, respectively. (It will take a while to run.)
# * Change to another network, such as ResNet-34 or Inception
| homeworks/homework6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import the Necessary Packages
# +
import cProfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import random
import os
import math
import json
import scipy
import time
import sys
import multiprocessing as mp
# this inserts the continuous file into the path
sys.path.insert(0, '/Users/tinkertanker/Documents/Entropica/annealing/master/quantum-inspired-optimisation/')
#from Anneal_cont import Annealer
# %matplotlib inline
pd.options.display.float_format = '{:,.1f}'.format
import operator
from operator import itemgetter
from cost_function import *
import solver as slv
import solver_utils as su
import optimiser as opt
import visualisation
# +
func, mesh = levy()
i1, i2 = mesh[0], mesh[1]
def f():
'''
input: tour (list)
Function that evaluates the cost of a given x1, x2 (euclidean distance)
output: single cost
'''
exploration_space = [(i,j) for i in i1 for j in i2]
# storing this huge dictionary in memory may not be a great idea..
super_energies = {(i,j):[func(*[i,j]),0] for i,j in exploration_space}
return super_energies
super_energies = f()
# +
# this function is not inside the package yet and outside, but the code can be optimized to take advantage of the structure
# anyway
myCostFn = CostFunction(func, mesh, "x1, x2")
print(myCostFn.dimensionality)
print(myCostFn.function)
print(myCostFn.vars_names)
# +
def move_func(config):
return config + 0.5*(-1)**(np.random.randint(2))
# Set up the Markov Chain Monte Carlo rule
mcmcRule = su.mcmc_rule(move_func,myCostFn)
# Define the cooling schedule - here a simple geometric one, with constant 0.99
# We choose the initial temperature to be 10 times the maximum value of the cost function
init_beta = 1/(10*max(cost_val))
cooling = su.cooling_schedule(['geometric',0.99,1000], init_beta = init_beta)
pa = population_annealing(myCostFn,mcmcRule,cooling,walkers=101error_thres=10e-2,choice='multinomial')
# -
maxiters=500
sa_optimiser = opt.optimiser(sa,maxiters)
class population_annealing:
def __init__(self, cost_func,
mcmc_rule,
cooling_schedule):
self._evaluate = cost_func.evaluate_cost
self._move_func = mcmc_rule._move_func
self._metropolis_update = mcmc_rule._metropolis_update
self.method = 'Simulated Annealing'
self.cooling_schedule = cooling_schedule
dynamic = hasattr(cooling_schedule, '_dynamic_func')
if dynamic:
self._do_iteration = self._dynamic_iteration
else:
self._do_iteration = self._basic_iteration
def _basic_iteration(self, current_config, beta):
"""
Simple, static MCMC update rule (only the accept/reject criteria depends on the state of the system)
"""
proposed_config = self._move_func(current_config)
accept = self._metropolis_update(current_config,proposed_config,beta)
if accept:
current_config = proposed_config
return current_config, self._evaluate(current_config)
def _dynamic_iteration(self, current_config, beta, extra_dict):
"""
Dynamic MCMC update that depends on the current state of the system, and requries extra input information,
and must return extra information (compared to the static case above)
This works by taking in optional additional function arguments, which are executed at specific times to produce the desired result.
Two types of functions like this may be:
(1) 'moves': A function that allows the generated moves to be dependent on current conditions,
executed at the beginning of each step ----->> NOT SUPPORTED YET
(2) 'decisions': A function that is executed after the acceptance/rejectance outcome of each step,
before moving on to the next step. Should be of the form (accept_func, reject_func), specifying what
is to be done in each case. ---->> DESCRIPTION NEEDS UPDATING
Examples:
- Using (2) allows the temperature updates to be dependent on the current conditions (e.g. in Lam's method)
Note that these additional functions cannot themselves be dynamic!
"""
proposed_config = self._move_func(current_config)
accept = self._metropolis_update(current_config,proposed_config,beta)
if accept:
current_config = proposed_config
# Execute the dynamic function
extra_vals = [i for i in extra_dict.values()]
beta, dyn_out_vars = self.cooling_schedule._dynamic_func(beta, accept, *extra_vals)
return current_config, self._evaluate(current_config), beta, dyn_out_vars
# testing the non assigned taus
class RandomAnneal:
'''
Pass the max steps you want to take to the annealer function
'''
def __init__(
self,
cost_func,
mcmc_rule,
cooling_schedule,
walkers=10,
error_thres=10e-2,
choice='multinomial'
):
'''
inputs: total number of steps to try, geometric multiplier for annealing schedule
Initialize parameters
output: none
'''
self._evaluate = cost_func.evaluate_cost
self._move_func = mcmc_rule._move_func
self._metropolis_update = mcmc_rule._metropolis_update
self.method = 'Population Annealing'
self.cooling_schedule = cooling_schedule
dynamic = hasattr(cooling_schedule, '_dynamic_func')
if dynamic:
self._do_iteration = self._dynamic_iteration
else:
self._do_iteration = self._basic_iteration
self.func, self.mesh = func, mesh
self.Tmax, self.exploration_space = maxsteps, explore
self.i1, self.i2 = self.mesh[0], self.mesh[1]
self.all_energies = super_energies.copy()
self.correct_answer, self.error_threshold, self.cumulative_correct = super_energies[min(self.all_energies.keys(), key=(lambda k: self.all_energies[k]))][0], error_thres, 0.0
self.choice = choice
self.walkers_t1, self.walkers_t2, self.initial = walkers, walkers, walkers
self.stat_weight_ratio = dict()
self.partition_function = 0
self.energy_landscape = dict()
# e_diff is a lambda function used to calculate the ratio of statistical weight
self.e_diff = lambda x, y: np.exp(-(x[1] - x[0]) * y)
self.get_min = lambda some_dict: min(some_dict.keys(), key=(lambda n: some_dict[n]))
self.get_max = lambda some_dict: max(some_dict.keys(), key=(lambda n: some_dict[n]))
def _basic_iteration(self, current_config, beta):
"""
Simple, static MCMC update rule (only the accept/reject criteria depends on the state of the system)
"""
proposed_config = self._move_func(current_config)
accept = self._metropolis_update(current_config,proposed_config,beta)
if accept:
current_config = proposed_config
return current_config, self._evaluate(current_config)
def _dynamic_iteration(self, current_config, beta, extra_dict):
"""
Dynamic MCMC update that depends on the current state of the system, and requries extra input information,
and must return extra information (compared to the static case above)
This works by taking in optional additional function arguments, which are executed at specific times to produce the desired result.
Two types of functions like this may be:
(1) 'moves': A function that allows the generated moves to be dependent on current conditions,
executed at the beginning of each step ----->> NOT SUPPORTED YET
(2) 'decisions': A function that is executed after the acceptance/rejectance outcome of each step,
before moving on to the next step. Should be of the form (accept_func, reject_func), specifying what
is to be done in each case. ---->> DESCRIPTION NEEDS UPDATING
Examples:
- Using (2) allows the temperature updates to be dependent on the current conditions (e.g. in Lam's method)
Note that these additional functions cannot themselves be dynamic!
"""
proposed_config = self._move_func(current_config)
accept = self._metropolis_update(current_config,proposed_config,beta)
if accept:
current_config = proposed_config
# Execute the dynamic function
extra_vals = [i for i in extra_dict.values()]
beta, dyn_out_vars = self.cooling_schedule._dynamic_func(beta, accept, *extra_vals)
return current_config, self._evaluate(current_config), beta, dyn_out_vars
def resample_population(self, walker_pos, mean_val, stat_weight_ratio, Q, tau, choice='multinomial'):
'''
input: a walker point
randomly resample the population N times for each replica, where N is a poisson random variable
output: either a list of samples or None.
'''
rv = dict()
if choice == "poisson":
# current number of replicas over the previous number of replicas
tau = {k:(self.initial / mean_val * v) for k,v in tau.items()}
# generate a list of poisson values based on the array
rv = {k:np.random.poisson(v) for k,v in tau.items()}
nums = [v for k,v in rv.items()]
else:
taus = np.array(list(tau.values()))
normalized_taus = taus / np.sum(taus)
nums = np.random.multinomial(self.initial, normalized_taus)
rv = {k:nums[k] for k in range(len(walker_pos))} # this is not self.initial, this is something else.
return rv, nums
def partition_calc(self, walker_pos, t0, t1, mean_val):
'''
input: None
calculate the statistical weight of a single walker, and also
output: parition function and statisticla weight ratios for each walker
'''
stat_weight_ratio = dict()
walk_energies = list()
# 1 iteration
for k,v in walker_pos.items():
energy = walker_pos[k][1]
#self.walker_pos[k][1] = energy # append the cost function the walker's position
swr = self.e_diff([t0, t1], energy)
# potential problem here in when we need to reinstantiate
if k not in stat_weight_ratio.keys():
stat_weight_ratio[k] = 0.0
stat_weight_ratio[k] = swr
walk_energies.append(swr)
partition_function = np.sum([np.exp(-(t1) * i[1]) for i in list(walker_pos.values())])
Q = np.sum(walk_energies) / mean_val
tau = {k:stat_weight_ratio[k]/Q for k,v in walker_pos.items()}
return stat_weight_ratio, partition_function, Q, tau
def random_neighbour(self):
"""
input: x (a 2D array)
draw from the entire array space of x1 and x2
output: (newx, newy)
"""
new_x = np.random.choice(self.i1)
new_y = np.random.choice(self.i2)
return [new_x, new_y]
def acceptance_probability(
self,
cost,
new_cost,
temperature,
):
'''
inputs: old cost, new cost, current temperature
calculate probability of acceptance and return it using the metropolis algorithm
output: probability (0 to 1)
'''
return np.exp(-(new_cost - cost) / temperature)
def check_correct(self, energy):
self.cumulative_correct += np.sum([1 if (i-self.correct_answer)<=self.error_threshold or i<self.correct_answer else 0 for i in energy])
def max_key(self, walker_pos):
'''
inputs: none
finds the minimum value in the dictionary of walkers
outputs: key of the lowest (best) cost value in the entire dictionary of walkers
'''
return min(walker_pos.keys(), key=(lambda k: walker_pos[k][1]))
def calculate_covariance(self, resampled_B, resampled_B_prime):
'''
inputs: resampled_B, resampled_B_prime both containing the position and number of walkers at that position for a B and B prime
produces a numpy covariance matrix containing the inter and intra covariance between the number of walkers resampled
output: covariance matrix of (self.walkers_t1 x self.walkers_t1) dimension
'''
#distance = lambda x, y: np.sqrt((x[0] - y[0]) ** 2 + (x[1]
# - y[1]) ** 2)
# can be generalized to take in the parameters for the poisson distribution but better not to compute the cov for that at this time
# do something here
# calculate all the euclidean distances from the max keys
euclid_distances = list(resampled_B.keys()) + list(resampled_walkers.keys())
print(euclid_distances)
#for k,v in resampled_B_prime.items():
return euclid_distances
def get_all_affinity(self, swr_affinity, resample_walker):
# affinity_keys = {i:[[k for m in v if m[0] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0] for i,j in resample_walker.items()}
affinity_vals = {i:{k:[m for m in v if m[1] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0 and k not in list(resample_walker.keys())} for i,j in resample_walker.items()}
return affinity_vals
def respawn_walker(self, walker_pos, resampled_walker, tau):
calculate_swr = lambda x,y: x/y
# gets those with positive number of resampled walkers
resample_walker = {k:1 for k,v in resampled_walker.items() if v > 0}
# takes the walkers with positive resamplings back into the walker_pos
new_walker_pos = {k:walker_pos[k][:] for k,v in resample_walker.items()}
# this is the slow part!!
new_walker_pos = self.assign_walkers(tau, new_walker_pos, walker_pos)
return new_walker_pos
def get_max_tau(self, taus : dict, new_walker_pos : dict, walker_pos : dict):
# takes in a dictionary and returns the key of walker which has not been assigned with the maximum tau
assigned_keys = list(new_walker_pos.keys())
filtered_tau = {k:v for k,v in taus.items() if k not in assigned_keys}
max_tau = self.get_min(filtered_tau)
# max_tau represents the walker we choose
# generates a random number for each of the
accepted = 0
while accepted == 0:
# it's somehow stuck here
# here we iterate through the dictionary and do a monte carlo style check if the current walker is better than the walkers to be chosen
# if in some case that everything is rejected, we will run a while loop and try again.
generate_rn = sorted([(i,np.random.uniform(0, 1)) for i,j in new_walker_pos.items()], reverse=True)
for i,j in enumerate(generate_rn):
if random.uniform(0,1) > j[1]:
accepted = j[0]
return max_tau, accepted
def unassigned(self, walker_pos, new_walker_pos):
return {k:v for k,v in walker_pos.items() if k not in list(new_walker_pos.keys())}
def assign_walkers(self, taus, new_walker_pos, walker_pos):
not_assigned = self.unassigned(walker_pos, new_walker_pos)
if len(not_assigned) > 0:
max_tau, accepted = self.get_max_tau(taus, new_walker_pos, walker_pos)
new_walker_pos[max_tau] = walker_pos[accepted][:]
return self.assign_walkers(taus, new_walker_pos, walker_pos)
return new_walker_pos
def anneal(self):
'''
inputs: none
function performs annealing and calls random start to kickstart the annealing process. iteratively
calculates the new cost.
output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep)
'''
T_list = [1]
# metrics we want to keep track of
populations = list()
free_energy = dict()
average_cost = list()
best = list()
walker_z = list()
walker_pos, new_walker_pos = dict(), dict()
# keeps track of the number of resamples assigned to each walker
resample_time = [[1 for _ in range(self.initial)]]
# keeps track of where walkers go ove rtime
config_time = list()
#taus_over_time = {i:0 for i in range(self.walkers_t1)}
# generate a state of random walkers with their costs, need to change such that we are generating tours instead of other stuff.
# generate a state of random walkers with their costs
walker_pos = {i:[[np.random.choice(self.i1),
np.random.choice(self.i2)]] for i in range(self.walkers_t1)}
confgs_time = [[v[0] for k,v in walker_pos.items()]]
# append the cost of each state
for k,v in walker_pos.items():
walker_pos[k].append(self.all_energies[tuple(v[0])][0])
# increase the number of walkers at all_energies
self.all_energies[tuple(v[0])][1] += 1
# gets the maximum value of the key
max_key = self.max_key(walker_pos)
best_cost = [[1, walker_pos[max_key][0], walker_pos[max_key][1]]]
for temp_step in range(2, self.Tmax+2):
# calculate the temperature from temp step 2 onward
fraction = 1/temp_step
if temp_step > 2:
if self.lams == 0:
T = self.multiplier * fraction if self.multiplier < 1 else fraction
else:
T = fraction
T_list.append(int(np.round(1/T)))
populations.append(self.walkers_t1)
params = (T_list[-2], T_list[-1], np.mean(populations))
stat_weight_ratio, partition_function, Q, tau = self.partition_calc(walker_pos, *params)
new_params = [walker_pos] + [params[-1]] + [stat_weight_ratio, Q, tau, self.choice]
resampled_walker, num_resampled = self.resample_population(*new_params)
new_walker_pos = self.respawn_walker(walker_pos, resampled_walker, tau)
resample_time.append(num_resampled)
config_time.append([v for k,v in new_walker_pos.items()])
# explore a new city configuration for each walker (the annealing step)
for k,v in new_walker_pos.items():
costs = round(new_walker_pos[k][1], 2)
states = new_walker_pos[k][0]
if costs not in self.energy_landscape.keys():
self.energy_landscape[costs] = 1
else:
self.energy_landscape[costs] = self.energy_landscape[costs] + 1
for step in range(self.exploration_space):
new_state = self.random_neighbour()
new_cost = self.func(*new_state)
if new_cost < costs or self.acceptance_probability(costs,
new_cost, T) >= random.uniform(0, 1):
states, costs = new_state, new_cost
new_walker_pos[k][0], new_walker_pos[k][1] = states, costs
# reassign to best cost if greater than the current best cost
if costs < best_cost[-1][2]:
best_cost.append([temp_step, states, costs/self.initial]) # should i be putting the state or the walker? none of them are meaningful anyway...
best.append(best_cost[-1][2])
all_costs = np.array([walker_pos[k][1] for k,v in walker_pos.items()])
average_cost.append(np.mean(all_costs))
free_energy[temp_step] = math.log(Q) + math.log(self.walkers_t1)
self.check_correct(all_costs/self.initial)
# only after you are done calculating the covariance, then you reassign the previous to the currxent one
walker_pos = new_walker_pos.copy()
self.walkers_t1 = self.walkers_t2
self.walkers_t2 = len(walker_pos)
new_walker_pos = dict()
return (
self.energy_landscape,
average_cost,
self.cumulative_correct,
free_energy,
best_cost,
best, # best refers to all the best costs at a given temperature
populations,
T_list,
walker_z,
resample_time,
config_time
)
# # Annealing Code
# calculate the partition function
partition_function = [ for k,v in super_energies.items()]
for i in range(1, 5000+1):
energies = super_energies.copy()
Beta = 1/i
energies = -Beta * np.array(energies)
partition_function = np.sum(np.exp(energies))
self.partition[i] = partition_function
energy.append(energies)
# +
# testing the non assigned taus
class RandomAnneal:
'''
Pass the max steps you want to take to the annealer function
'''
def __init__(
self,
maxsteps=500,
explore=30,
walkers=10,
error_thres=10e-2,
multiplier=1, # by default the multipler is 1
acceptrate=0.5,
lams=0, # by default lams is turned off
choice='multinomial',
#accs = [500, 1, 1, 0.5, 0, round((Ncity.n)**0.5), 30]
):
'''
inputs: total number of steps to try, geometric multiplier for annealing schedule
Initialize parameters
output: none
'''
self.func, self.mesh = func, mesh
self.lams, self.acceptrate, self.multiplier = lams, acceptrate, multiplier
self.Tmax, self.exploration_space = maxsteps, explore
self.i1, self.i2 = self.mesh[0], self.mesh[1]
self.all_energies = super_energies.copy()
self.correct_answer, self.error_threshold, self.cumulative_correct = super_energies[min(self.all_energies.keys(), key=(lambda k: self.all_energies[k]))][0], error_thres, 0.0
self.choice = choice
self.walkers_t1, self.walkers_t2, self.initial = walkers, walkers, walkers
self.stat_weight_ratio = dict()
self.partition_function = 0
self.energy_landscape = dict()
# e_diff is a lambda function used to calculate the ratio of statistical weight
self.e_diff = lambda x, y: np.exp(-(x[1] - x[0]) * y)
self.get_min = lambda some_dict: min(some_dict.keys(), key=(lambda n: some_dict[n]))
self.get_max = lambda some_dict: max(some_dict.keys(), key=(lambda n: some_dict[n]))
def resample_population(self, walker_pos, mean_val, stat_weight_ratio, Q, tau, choice='multinomial'):
'''
input: a walker point
randomly resample the population N times for each replica, where N is a poisson random variable
output: either a list of samples or None.
'''
rv = dict()
if choice == "poisson":
# current number of replicas over the previous number of replicas
tau = {k:(self.initial / mean_val * v) for k,v in tau.items()}
# generate a list of poisson values based on the array
rv = {k:np.random.poisson(v) for k,v in tau.items()}
nums = [v for k,v in rv.items()]
else:
taus = np.array(list(tau.values()))
normalized_taus = taus / np.sum(taus)
nums = np.random.multinomial(self.initial, normalized_taus)
rv = {k:nums[k] for k in range(len(walker_pos))} # this is not self.initial, this is something else.
return rv, nums
def partition_calc(self, walker_pos, t0, t1, mean_val):
'''
input: None
calculate the statistical weight of a single walker, and also
output: parition function and statisticla weight ratios for each walker
'''
stat_weight_ratio = dict()
walk_energies = list()
# 1 iteration
for k,v in walker_pos.items():
energy = walker_pos[k][1]
#self.walker_pos[k][1] = energy # append the cost function the walker's position
swr = self.e_diff([t0, t1], energy)
# potential problem here in when we need to reinstantiate
if k not in stat_weight_ratio.keys():
stat_weight_ratio[k] = 0.0
stat_weight_ratio[k] = swr
walk_energies.append(swr)
partition_function = np.sum([np.exp(-(t1) * i[1]) for i in list(walker_pos.values())])
Q = np.sum(walk_energies) / mean_val
tau = {k:stat_weight_ratio[k]/Q for k,v in walker_pos.items()}
return stat_weight_ratio, partition_function, Q, tau
def random_neighbour(self):
"""
input: x (a 2D array)
draw from the entire array space of x1 and x2
output: (newx, newy)
"""
new_x = np.random.choice(self.i1)
new_y = np.random.choice(self.i2)
return [new_x, new_y]
def f(self):
'''
input: tour (list)
Function that evaluates the cost of a given x1, x2 (euclidean distance)
output: single cost
'''
exploration_space = [(i,j) for i in self.i1 for j in self.i2]
# storing this huge dictionary in memory may not be a great idea..
super_energies = {(i,j):[self.func(*[i,j]),0] for i,j in exploration_space}
return super_energies
def acceptance_probability(
self,
cost,
new_cost,
temperature,
):
'''
inputs: old cost, new cost, current temperature
calculate probability of acceptance and return it using the metropolis algorithm
output: probability (0 to 1)
'''
return np.exp(-(new_cost - cost) / temperature)
def check_correct(self, energy):
self.cumulative_correct += np.sum([1 if (i-self.correct_answer)<=self.error_threshold or i<self.correct_answer else 0 for i in energy])
def max_key(self, walker_pos):
'''
inputs: none
finds the minimum value in the dictionary of walkers
outputs: key of the lowest (best) cost value in the entire dictionary of walkers
'''
return min(walker_pos.keys(), key=(lambda k: walker_pos[k][1]))
def calculate_covariance(self, resampled_B, resampled_B_prime):
'''
inputs: resampled_B, resampled_B_prime both containing the position and number of walkers at that position for a B and B prime
produces a numpy covariance matrix containing the inter and intra covariance between the number of walkers resampled
output: covariance matrix of (self.walkers_t1 x self.walkers_t1) dimension
'''
#distance = lambda x, y: np.sqrt((x[0] - y[0]) ** 2 + (x[1]
# - y[1]) ** 2)
# can be generalized to take in the parameters for the poisson distribution but better not to compute the cov for that at this time
# do something here
# calculate all the euclidean distances from the max keys
euclid_distances = list(resampled_B.keys()) + list(resampled_walkers.keys())
print(euclid_distances)
#for k,v in resampled_B_prime.items():
return euclid_distances
def get_all_affinity(self, swr_affinity, resample_walker):
# affinity_keys = {i:[[k for m in v if m[0] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0] for i,j in resample_walker.items()}
affinity_vals = {i:{k:[m for m in v if m[1] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0 and k not in list(resample_walker.keys())} for i,j in resample_walker.items()}
return affinity_vals
def respawn_walker(self, walker_pos, resampled_walker, tau):
calculate_swr = lambda x,y: x/y
# gets those with positive number of resampled walkers
resample_walker = {k:1 for k,v in resampled_walker.items() if v > 0}
# takes the walkers with positive resamplings back into the walker_pos
new_walker_pos = {k:walker_pos[k][:] for k,v in resample_walker.items()}
# this is the slow part!!
new_walker_pos = self.assign_walkers(tau, new_walker_pos, walker_pos)
return new_walker_pos
def get_max_tau(self, taus : dict, new_walker_pos : dict, walker_pos : dict):
# takes in a dictionary and returns the key of walker which has not been assigned with the maximum tau
assigned_keys = list(new_walker_pos.keys())
filtered_tau = {k:v for k,v in taus.items() if k not in assigned_keys}
max_tau = self.get_min(filtered_tau)
# max_tau represents the walker we choose
# generates a random number for each of the
accepted = 0
while accepted == 0:
# it's somehow stuck here
# here we iterate through the dictionary and do a monte carlo style check if the current walker is better than the walkers to be chosen
# if in some case that everything is rejected, we will run a while loop and try again.
generate_rn = sorted([(i,np.random.uniform(0, 1)) for i,j in new_walker_pos.items()], reverse=True)
for i,j in enumerate(generate_rn):
if random.uniform(0,1) > j[1]:
accepted = j[0]
return max_tau, accepted
def unassigned(self, walker_pos, new_walker_pos):
return {k:v for k,v in walker_pos.items() if k not in list(new_walker_pos.keys())}
def assign_walkers(self, taus, new_walker_pos, walker_pos):
not_assigned = self.unassigned(walker_pos, new_walker_pos)
if len(not_assigned) > 0:
max_tau, accepted = self.get_max_tau(taus, new_walker_pos, walker_pos)
new_walker_pos[max_tau] = walker_pos[accepted][:]
return self.assign_walkers(taus, new_walker_pos, walker_pos)
return new_walker_pos
def anneal(self):
'''
inputs: none
function performs annealing and calls random start to kickstart the annealing process. iteratively
calculates the new cost.
output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep)
'''
T_list = [1]
# metrics we want to keep track of
populations = list()
free_energy = dict()
average_cost = list()
best = list()
walker_z = list()
walker_pos, new_walker_pos = dict(), dict()
# keeps track of the number of resamples assigned to each walker
resample_time = [[1 for _ in range(self.initial)]]
# keeps track of where walkers go ove rtime
config_time = list()
#taus_over_time = {i:0 for i in range(self.walkers_t1)}
# generate a state of random walkers with their costs, need to change such that we are generating tours instead of other stuff.
# generate a state of random walkers with their costs
walker_pos = {i:[[np.random.choice(self.i1),
np.random.choice(self.i2)]] for i in range(self.walkers_t1)}
confgs_time = [[v[0] for k,v in walker_pos.items()]]
# append the cost of each state
for k,v in walker_pos.items():
walker_pos[k].append(self.all_energies[tuple(v[0])][0])
# increase the number of walkers at all_energies
self.all_energies[tuple(v[0])][1] += 1
# gets the maximum value of the key
max_key = self.max_key(walker_pos)
best_cost = [[1, walker_pos[max_key][0], walker_pos[max_key][1]]]
for temp_step in range(2, self.Tmax+2):
# calculate the temperature from temp step 2 onward
fraction = 1/temp_step
if temp_step > 2:
if self.lams == 0:
T = self.multiplier * fraction if self.multiplier < 1 else fraction
else:
T = fraction
T_list.append(int(np.round(1/T)))
populations.append(self.walkers_t1)
params = (T_list[-2], T_list[-1], np.mean(populations))
stat_weight_ratio, partition_function, Q, tau = self.partition_calc(walker_pos, *params)
new_params = [walker_pos] + [params[-1]] + [stat_weight_ratio, Q, tau, self.choice]
# print("why")
resampled_walker, num_resampled = self.resample_population(*new_params)
# print("sigh")
new_walker_pos = self.respawn_walker(walker_pos, resampled_walker, tau)
# print("hi")
resample_time.append(num_resampled)
config_time.append([v for k,v in new_walker_pos.items()])
# explore a new city configuration for each walker (the annealing step)
for k,v in new_walker_pos.items():
costs = round(new_walker_pos[k][1], 2)
states = new_walker_pos[k][0]
if costs not in self.energy_landscape.keys():
self.energy_landscape[costs] = 1
else:
self.energy_landscape[costs] = self.energy_landscape[costs] + 1
# walker_pos_check = walker_pos.copy()
for step in range(self.exploration_space):
new_state = self.random_neighbour()
new_cost = self.func(*new_state)
# walker_pos_check[k][1] = new_cost
# new_stat_weight_ratio, new_partition_function, new_Q, new_tau = self.partition_calc(walker_pos_check, *params)
# walker_z.append([temp_step, step, k, new_partition_function])
if new_cost < costs or self.acceptance_probability(costs,
new_cost, T) >= random.uniform(0, 1):
states, costs = new_state, new_cost
if self.lams == 1:
self.acceprate = 1 / 500 * (499 * self.acceptrate + 1)
else:
if self.lams == 1:
self.acceptrate = 1 / 500 * (499 * self.acceptrate)
# check conditions
if fraction < 0.15:
LamRate = 0.44 + 0.56 * 560 ** (-temp_step
/ (self.Tmax * 0.15))
elif fraction < 0.65:
LamRate = 0.44
else:
LamRate = 0.44 * 440 ** ((-fraction - 0.65) / 0.35)
if LamRate < self.acceptrate:
T *= 0.99
else:
T *= 1 / 0.999
new_walker_pos[k][0], new_walker_pos[k][1] = states, costs
# reassign to best cost if greater than the current best cost
if costs < best_cost[-1][2]:
best_cost.append([temp_step, states, costs/self.initial]) # should i be putting the state or the walker? none of them are meaningful anyway...
# print("died")
# this is where we can calculate the covariance matrix between two different temperatuers for different positions
#resampled_B_prime = {tuple(walker_pos[k][0]):v for k,v in resampled_walker.items()}
#covariance = self.calculate_covariance(resampled_B, resampled_B_prime)
best.append(best_cost[-1][2])
all_costs = np.array([walker_pos[k][1] for k,v in walker_pos.items()])
average_cost.append(np.mean(all_costs))
free_energy[temp_step] = math.log(Q) + math.log(self.walkers_t1)
self.check_correct(all_costs/self.initial)
# only after you are done calculating the covariance, then you reassign the previous to the currxent one
walker_pos = new_walker_pos.copy()
self.walkers_t1 = self.walkers_t2
self.walkers_t2 = len(walker_pos)
new_walker_pos = dict()
return (
self.energy_landscape,
average_cost,
self.cumulative_correct,
free_energy,
best_cost,
best, # best refers to all the best costs at a given temperature
populations,
T_list,
walker_z,
resample_time,
config_time
)
# +
class TauAnneal:
'''
Pass the max steps you want to take to the annealer function
'''
def __init__(
self,
maxsteps=500,
explore=30,
walkers=10,
error_thres=10e-2,
multiplier=1, # by default the multipler is 1
acceptrate=0.5,
lams=0, # by default lams is turned off
choice='multinomial',
#accs = [500, 1, 1, 0.5, 0, round((Ncity.n)**0.5), 30]
):
'''
inputs: total number of steps to try, geometric multiplier for annealing schedule
Initialize parameters
output: none
'''
self.func, self.mesh = func, mesh
self.lams, self.acceptrate, self.multiplier = lams, acceptrate, multiplier
self.Tmax, self.exploration_space = maxsteps, explore
self.i1, self.i2 = self.mesh[0], self.mesh[1]
self.all_energies = super_energies.copy()
self.correct_answer, self.error_threshold, self.cumulative_correct = super_energies[min(self.all_energies.keys(), key=(lambda k: self.all_energies[k]))][0], error_thres, 0.0
self.choice = choice
self.walkers_t1, self.walkers_t2, self.initial = walkers, walkers, walkers
self.stat_weight_ratio = dict()
self.partition_function = 0
self.energy_landscape = dict()
# e_diff is a lambda function used to calculate the ratio of statistical weight
self.e_diff = lambda x, y: np.exp(-(x[1] - x[0]) * y)
def resample_population(self, walker_pos, mean_val, stat_weight_ratio, Q, tau, choice='multinomial'):
'''
input: a walker point
randomly resample the population N times for each replica, where N is a poisson random variable
output: either a list of samples or None.
'''
rv = dict()
if choice == "poisson":
# current number of replicas over the previous number of replicas
tau = {k:(self.initial / mean_val * v) for k,v in tau.items()}
# generate a list of poisson values based on the array
rv = {k:np.random.poisson(v) for k,v in tau.items()}
nums = [v for k,v in rv.items()]
else:
taus = np.array(list(tau.values()))
normalized_taus = taus / np.sum(taus)
nums = np.random.multinomial(self.initial, normalized_taus)
rv = {k:nums[k] for k in range(len(walker_pos))} # this is not self.initial, this is something else.
return rv, nums
def partition_calc(self, walker_pos, t0, t1, mean_val):
'''
input: None
calculate the statistical weight of a single walker, and also
output: parition function and statisticla weight ratios for each walker
'''
stat_weight_ratio = dict()
walk_energies = list()
# 1 iteration
for k,v in walker_pos.items():
energy = walker_pos[k][1]
#self.walker_pos[k][1] = energy # append the cost function the walker's position
swr = self.e_diff([t0, t1], energy)
# potential problem here in when we need to reinstantiate
if k not in stat_weight_ratio.keys():
stat_weight_ratio[k] = 0.0
stat_weight_ratio[k] = swr
walk_energies.append(swr)
partition_function = np.sum([np.exp(-(t1) * i[1]) for i in list(walker_pos.values())])
Q = np.sum(walk_energies) / mean_val
tau = {k:stat_weight_ratio[k]/Q for k,v in walker_pos.items()}
return stat_weight_ratio, partition_function, Q, tau
def random_neighbour(self):
"""
input: x (a 2D array)
draw from the entire array space of x1 and x2
output: (newx, newy)
"""
new_x = np.random.choice(self.i1)
new_y = np.random.choice(self.i2)
return [new_x, new_y]
def f(self):
'''
input: tour (list)
Function that evaluates the cost of a given x1, x2 (euclidean distance)
output: single cost
'''
exploration_space = [(i,j) for i in self.i1 for j in self.i2]
# storing this huge dictionary in memory may not be a great idea..
super_energies = {(i,j):[self.func(*[i,j]),0] for i,j in exploration_space}
return super_energies
def acceptance_probability(
self,
cost,
new_cost,
temperature,
):
'''
inputs: old cost, new cost, current temperature
calculate probability of acceptance and return it using the metropolis algorithm
output: probability (0 to 1)
'''
return np.exp(-(new_cost - cost) / temperature)
def check_correct(self, energy):
self.cumulative_correct += np.sum([1 if (i-self.correct_answer)<=self.error_threshold or i<self.correct_answer else 0 for i in energy])
def max_key(self, walker_pos):
'''
inputs: none
finds the minimum value in the dictionary of walkers
outputs: key of the lowest (best) cost value in the entire dictionary of walkers
'''
return min(walker_pos.keys(), key=(lambda k: walker_pos[k][1]))
def calculate_covariance(self, resampled_B, resampled_B_prime):
'''
inputs: resampled_B, resampled_B_prime both containing the position and number of walkers at that position for a B and B prime
produces a numpy covariance matrix containing the inter and intra covariance between the number of walkers resampled
output: covariance matrix of (self.walkers_t1 x self.walkers_t1) dimension
'''
#distance = lambda x, y: np.sqrt((x[0] - y[0]) ** 2 + (x[1]
# - y[1]) ** 2)
# can be generalized to take in the parameters for the poisson distribution but better not to compute the cov for that at this time
# do something here
# calculate all the euclidean distances from the max keys
euclid_distances = list(resampled_B.keys()) + list(resampled_walkers.keys())
print(euclid_distances)
#for k,v in resampled_B_prime.items():
return euclid_distances
def get_all_affinity(self, swr_affinity, resample_walker):
# affinity_keys = {i:[[k for m in v if m[0] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0] for i,j in resample_walker.items()}
affinity_vals = {i:{k:[m for m in v if m[1] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0 and k not in list(resample_walker.keys())} for i,j in resample_walker.items()}
return affinity_vals
def respawn_walker(self, walker_pos, resampled_walker, tau):
calculate_swr = lambda x,y: x/y
# gets those with positive number of resampled walkers
resample_walker = {k:1 for k,v in resampled_walker.items() if v > 0}
# takes the walkers with positive resamplings back into the walker_pos
new_walker_pos = {k:walker_pos[k][:] for k,v in resample_walker.items()}
deleted_items = list()
swr_affinity = {k:[(calculate_swr(v,tau[i]), i) for i,j in resample_walker.items() if k != i] for k,v in tau.items()}
resample_affinity_vals = self.get_all_affinity(swr_affinity, resample_walker)
for k,v in resample_affinity_vals.items():
value = [n for m,n in v.items() if m not in deleted_items]
sorted_dict = sorted(value, reverse=True)[0:(resampled_walker[k] - resample_walker[k])]
res_items = [m for m,n in v.items() if n in sorted_dict]
rejects = len(sorted_dict) - resample_walker[k]
rejected = sorted_dict[rejects:]
for i in res_items:
new_walker_pos[i] = walker_pos[k][:]
resample_walker[k] += 1
deleted_items.append(i)
return new_walker_pos
def anneal(self):
'''
inputs: none
function performs annealing and calls random start to kickstart the annealing process. iteratively
calculates the new cost.
output: final cost, final state (list of x1 and x2), all costs (list of costs at every timestep)
'''
T_list = [1]
# metrics we want to keep track of
populations = list()
free_energy = dict()
average_cost = list()
best = list()
walker_z = list()
walker_pos, new_walker_pos = dict(), dict()
# keeps track of the number of resamples assigned to each walker
resample_time = [[1 for _ in range(self.initial)]]
# keeps track of where walkers go ove rtime
config_time = list()
#taus_over_time = {i:0 for i in range(self.walkers_t1)}
# generate a state of random walkers with their costs, need to change such that we are generating tours instead of other stuff.
# generate a state of random walkers with their costs
walker_pos = {i:[[np.random.choice(self.i1),
np.random.choice(self.i2)]] for i in range(self.walkers_t1)}
confgs_time = [[v[0] for k,v in walker_pos.items()]]
# append the cost of each state
for k,v in walker_pos.items():
walker_pos[k].append(self.all_energies[tuple(v[0])][0])
# increase the number of walkers at all_energies
self.all_energies[tuple(v[0])][1] += 1
# gets the maximum value of the key
max_key = self.max_key(walker_pos)
best_cost = [[1, walker_pos[max_key][0], walker_pos[max_key][1]]]
for temp_step in range(2, self.Tmax+2):
# calculate the temperature from temp step 2 onward
fraction = 1/temp_step
if temp_step > 2:
if self.lams == 0:
T = self.multiplier * fraction if self.multiplier < 1 else fraction
else:
T = fraction
T_list.append(int(np.round(1/T)))
populations.append(self.walkers_t1)
params = (T_list[-2], T_list[-1], np.mean(populations))
stat_weight_ratio, partition_function, Q, tau = self.partition_calc(walker_pos, *params)
new_params = [walker_pos] + [params[-1]] + [stat_weight_ratio, Q, tau, self.choice]
resampled_walker, num_resampled = self.resample_population(*new_params)
new_walker_pos = self.respawn_walker(walker_pos, resampled_walker, tau)
resample_time.append(num_resampled)
config_time.append([v for k,v in new_walker_pos.items()])
# explore a new city configuration for each walker (the annealing step)
for k,v in new_walker_pos.items():
costs = round(new_walker_pos[k][1], 2)
states = new_walker_pos[k][0]
if costs not in self.energy_landscape.keys():
self.energy_landscape[costs] = 1
else:
self.energy_landscape[costs] = self.energy_landscape[costs] + 1
# walker_pos_check = walker_pos.copy()
for step in range(self.exploration_space):
new_state = self.random_neighbour()
new_cost = self.func(*new_state)
# walker_pos_check[k][1] = new_cost
# new_stat_weight_ratio, new_partition_function, new_Q, new_tau = self.partition_calc(walker_pos_check, *params)
# walker_z.append([temp_step, step, k, new_partition_function])
if new_cost < costs or self.acceptance_probability(costs,
new_cost, T) >= random.uniform(0, 1):
states, costs = new_state, new_cost
if self.lams == 1:
self.acceprate = 1 / 500 * (499 * self.acceptrate + 1)
else:
if self.lams == 1:
self.acceptrate = 1 / 500 * (499 * self.acceptrate)
# check conditions
if fraction < 0.15:
LamRate = 0.44 + 0.56 * 560 ** (-temp_step
/ (self.Tmax * 0.15))
elif fraction < 0.65:
LamRate = 0.44
else:
LamRate = 0.44 * 440 ** ((-fraction - 0.65) / 0.35)
if LamRate < self.acceptrate:
T *= 0.99
else:
T *= 1 / 0.999
new_walker_pos[k][0], new_walker_pos[k][1] = states, costs
# reassign to best cost if greater than the current best cost
if costs < best_cost[-1][2]:
best_cost.append([temp_step, states, costs/self.initial]) # should i be putting the state or the walker? none of them are meaningful anyway...
# this is where we can calculate the covariance matrix between two different temperatuers for different positions
#resampled_B_prime = {tuple(walker_pos[k][0]):v for k,v in resampled_walker.items()}
#covariance = self.calculate_covariance(resampled_B, resampled_B_prime)
best.append(best_cost[-1][2])
all_costs = np.array([walker_pos[k][1] for k,v in walker_pos.items()])
average_cost.append(np.mean(all_costs))
free_energy[temp_step] = math.log(Q) + math.log(self.walkers_t1)
self.check_correct(all_costs/self.initial)
# only after you are done calculating the covariance, then you reassign the previous to the currxent one
walker_pos = new_walker_pos.copy()
self.walkers_t1 = self.walkers_t2
self.walkers_t2 = len(walker_pos)
new_walker_pos = dict()
return (
self.energy_landscape,
average_cost,
self.cumulative_correct,
free_energy,
best_cost,
best, # best refers to all the best costs at a given temperature
populations,
T_list,
walker_z,
resample_time,
config_time
)
# +
taus = {0: 4.979481459771921e-19, 1: 8.906010064543437e-104, 2: 2.9010633729504283e-50, 3: 2.909187059354772e-56, 4: 7.945645518559665e-09, 5: 8.660848262223333e-66, 6: 3.5271852989438855e-69, 7: 3.744975658012282e-22, 8: 1.3732738459480639e-65, 9: 2.316272193606615e-19, 10: 1.0649791359042534e-53, 11: 3.495811224642755e-36, 12: 8.122153210145312e-42, 13: 8.396737416695002e-86, 14: 7.080194234845007e-99, 15: 1.9013658268336554e-37, 16: 4.1363855485409126e-51, 17: 6.794105686579909e-83, 18: 2.8852876373243087e-49, 19: 4.4253055046909023e-23, 20: 5.517441600479428e-51, 21: 2.4340543560469156e-27, 22: 2.7696764341527948e-77, 23: 9.376072147534471e-110, 24: 1.513499538709749e-76, 25: 6.435449012887977e-88, 26: 4.1223880823846603e-81, 27: 1.2878218181936917e-46, 28: 5.465955887553644e-50, 29: 3.765333408982003e-79, 30: 6.736207538673744e-08, 31: 2.673402786589717e-12, 32: 5.204636539175555e-19, 33: 7.992444259775237e-46, 34: 1.3521088886371985e-150, 35: 1.5097206308787477e-12, 36: 3.1627657432689353e-65, 37: 4.733640167815339e-08, 38: 5.855853620694806e-22, 39: 7.663579852636736e-69, 40: 8.053214052957187e-92, 41: 2.994698183457118e-37, 42: 4.36152257428427e-08, 43: 2.7924061726193672e-18, 44: 86.21101390558884, 45: 3.003387459898819e-29, 46: 3.869857232885313e-42, 47: 9.084419036572294e-69, 48: 9.698573039984019e-114, 49: 6.295915663894739e-48, 50: 3.39696973093878e-39, 51: 2.0585228416143442e-25, 52: 1.8006256694664823e-103, 53: 3.4316691031387315e-84, 54: 5.3355204366997785e-48, 55: 3.704726716015464e-65, 56: 3.2420644244489503e-41, 57: 1.217892073805362e-84, 58: 4.532257543205371e-51, 59: 1.6170087778778006e-11, 60: 2.2296603899319717e-47, 61: 2.7345506899612595e-109, 62: 4.950018408047005e-20, 63: 4.987717022938297e-79, 64: 1.749705897792825e-10, 65: 1.7826231702865334e-20, 66: 5.557289016266856e-77, 67: 3.557499894258665e-83, 68: 5.899915018195347e-80, 69: 1.2017379381904272e-49, 70: 1.227152203069028e-71, 71: 3.299364156602039e-05, 72: 7.227891434981451e-21, 73: 3.8934721269661133e-26, 74: 1.845361652632113e-24, 75: 8.669908722234362e-14, 76: 4.26233589882733e-109, 77: 0.00010348102976916315, 78: 4.1826746358028446e-61, 79: 5.9812632889018665e-46, 80: 1.9160911989439013e-59, 81: 1.210448675833256e-75, 82: 1.0615992665130984e-25, 83: 7.730073964061945e-09, 84: 3.266748079028032e-57, 85: 1.7421892301642964e-55, 86: 3.687446574739879e-36, 87: 0.05480419949587991, 88: 2.748641311769773e-16, 89: 3.4767167784401154e-08, 90: 2.795957006635664e-77, 91: 9.35893598284094e-56, 92: 1.1746636974837712e-56, 93: 9.108655418895802, 94: 4.633748104190869e-43, 95: 3.542168916575115e-62, 96: 1.857356950810916e-67, 97: 1.1773123316102888e-42, 98: 4.625389792396117, 99: 7.058873800468946e-62}
walker_pos = {0: [[6.756756756756758, -2.992992992992993], 49.822956574294814], 1: [[-8.578578578578579, 9.6996996996997], 244.96128890287554], 2: [[0.7707707707707705, -10.0], 121.7433428680723], 3: [[-7.977977977977978, -4.374374374374375], 135.55605709503757], 4: [[2.9929929929929937, -2.6526526526526526], 26.329807360334858], 5: [[-6.276276276276276, -4.774774774774775], 157.49096903655112], 6: [[7.837837837837839, 9.61961961961962], 165.29703679792675], 7: [[-4.014014014014014, -3.353353353353353], 57.015622497263536], 8: [[6.896896896896898, -6.876876876876877], 157.02999905539443], 9: [[6.236236236236238, 2.1121121121121114], 50.588323255097315], 10: [[-7.4374374374374375, 2.5525525525525516], 129.65322028889454], 11: [[9.6996996996997, 3.9739739739739743], 89.32066345745747], 12: [[-0.39039039039039025, 8.198198198198199], 102.29314418154482], 13: [[9.1991991991992, 7.177177177177178], 203.57364033834034], 14: [[-9.91991991991992, 7.7177177177177185], 233.67778843326045], 15: [[6.756756756756758, -5.075075075075075], 92.23224152511182], 16: [[-8.038038038038039, -1.7317317317317311], 123.69118296058176], 17: [[9.15915915915916, 9.91991991991992], 196.8776728614979], 18: [[-9.53953953953954, 2.3123123123123115], 119.4462105292184], 19: [[2.4124124124124116, 8.058058058058059], 59.15128339065762], 20: [[-8.8988988988989, -1.2912912912912908], 123.40309103649993], 21: [[-3.9739739739739743, -2.8728728728728727], 68.95940475301951], 22: [[-4.634634634634635, -7.177177177177177], 183.95948722641972], 23: [[-5.395395395395395, -9.27927927927928], 258.72536487010314], 24: [[-6.856856856856857, -7.4174174174174174], 182.26120809133104], 25: [[-9.45945945945946, -6.376376376376376], 208.44483213666902], 26: [[-7.717717717717718, 5.8358358358358355], 192.77212547391042], 27: [[-5.595595595595595, 7.337337337337338], 113.3451275680049], 28: [[0.6706706706706704, -10.0], 121.10988120104248], 29: [[6.376376376376378, -8.318318318318319], 188.25755150283538], 30: [[4.114114114114114, 2.8728728728728736], 24.19234922559317], 31: [[4.994994994994995, -0.1701701701701701], 34.326834571514], 32: [[0.5705705705705704, -5.995995995995996], 49.77873246490645], 33: [[9.57957957957958, 5.655655655655655], 111.51958321834422], 34: [[-9.93993993993994, 9.15915915915916], 352.7652640042757], 35: [[5.175175175175175, -3.013013013013013], 34.89826206336113], 36: [[-2.5125125125125125, 9.23923923923924], 156.19574973307684], 37: [[-3.213213213213213, 3.0330330330330337], 24.545151814407298], 38: [[-3.8338338338338342, 4.454454454454455], 56.56859583446784], 39: [[8.63863863863864, 6.1761761761761775], 164.52105776680082], 40: [[-8.438438438438439, 9.31931931931932], 217.43092285094775], 41: [[-7.4574574574574575, 4.334334334334335], 91.77797055623972], 42: [[0.4904904904904903, 5.555555555555555], 24.627025101746536], 43: [[2.5525525525525516, 6.916916916916918], 48.09879359075367], 44: [[-0.2702702702702702, 1.9519519519519513], 3.222367627445266], 45: [[-3.9939939939939944, -2.8528528528528527], 73.3543924603531], 46: [[-7.697697697697698, 4.634634634634635], 103.0345218585954], 47: [[9.8998998998999, 4.2142142142142145], 164.35097623280808], 48: [[-9.61961961961962, 7.497497497497498], 267.9018874043929], 49: [[9.93993993993994, -4.994994994994995], 116.36334891635644], 50: [[4.674674674674675, -8.018018018018019], 96.25710041663571], 51: [[5.315315315315315, 6.0960960960960975], 64.52180423492217], 52: [[-5.7557557557557555, -8.238238238238239], 244.25729594821277], 53: [[-7.157157157157157, -8.93893893893894], 199.8632666181711], 54: [[7.897897897897899, -4.2742742742742745], 116.52886360080129], 55: [[-5.7757757757757755, -6.116116116116116], 156.0375871179243], 56: [[-4.934934934934935, -4.494494494494495], 100.90894408629659], 57: [[7.217217217217218, 9.85985985985986], 200.89919182397276], 58: [[1.2712712712712708, -8.598598598598599], 123.59978514190936], 59: [[2.7127127127127117, 5.135135135135135], 32.52702358233779], 60: [[4.1741741741741745, -8.458458458458459], 115.09881565683536], 61: [[10.0, -6.856856856856857], 257.6549735640019], 62: [[7.457457457457458, -1.3313313313313309], 52.131476132989235], 63: [[-10.0, 1.9119119119119112], 187.9764096203686], 64: [[1.8118118118118112, -3.9339339339339343], 30.14556878301191], 65: [[7.837837837837839, 0.6306306306306304], 53.152781457701266], 66: [[-8.458458458458459, 8.338338338338339], 183.26310732666997], 67: [[-8.618618618618619, 8.398398398398399], 197.5246702657268], 68: [[-7.957957957957958, 8.418418418418419], 190.1110350608908], 69: [[-2.7127127127127126, -6.176176176176176], 120.32206633431693], 70: [[8.67867867867868, -6.536536536536536], 170.95801096813975], 71: [[2.6726726726726717, 4.454454454454455], 17.99836126335988], 72: [[-5.555555555555555, -1.3513513513513509], 54.0555051688006], 73: [[0.41041041041041026, -5.895895895895896], 66.1870766491785], 74: [[-6.796796796796797, 1.0110110110110107], 62.32853252436616], 75: [[-1.9719719719719713, -3.473473473473473], 37.75549860765964], 76: [[-8.358358358358359, -6.756756756756756], 257.21112336177066], 77: [[-1.3313313313313309, -1.551551551551551], 16.855287817791854], 78: [[7.397397397397398, -6.116116116116116], 146.70590533418124], 79: [[-5.7957957957957955, 5.895895895895896], 111.80944804822884], 80: [[5.995995995995996, 8.178178178178179], 142.88139877809846], 81: [[-5.555555555555555, -7.897897897897898], 180.18205644568212], 82: [[8.87887887887888, 0.3503503503503502], 65.18401638022077], 83: [[-0.4504504504504503, 5.535535535535535], 26.357312974429554], 84: [[6.936936936936938, 8.118118118118119], 137.74272085029068], 85: [[6.636636636636638, -5.195195195195195], 133.7662031825131], 86: [[-5.215215215215215, -2.1321321321321323], 89.26729468291018], 87: [[-0.41041041041041026, -1.451451451451451], 10.583154023307221], 88: [[-3.093093093093093, 5.975975975975976], 43.50942033524502], 89: [[4.494494494494495, 1.7917917917917912], 24.853757918449887], 90: [[-6.956956956956957, 6.796796796796798], 183.9500432815073], 91: [[6.636636636636638, -8.97897897897898], 134.38759916916507], 92: [[-9.1991991991992, -3.9739739739739743], 136.46294888442455], 93: [[2.7327327327327318, 1.2312312312312308], 5.469940462074692], 94: [[5.035035035035035, 9.47947947947948], 105.15695850087127], 95: [[6.0560560560560575, -9.97997997997998], 149.17470210628332], 96: [[-8.118118118118119, -3.433433433433433], 161.3332123161127], 97: [[-4.734734734734735, 7.577577577577578], 104.22450531869791], 98: [[-0.1501501501501501, 2.5925925925925917], 6.147604921629306], 99: [[-7.5575575575575575, 2.5125125125125116], 148.48515581367664]}
resampled_walker = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0, 16: 0, 17: 0, 18: 0, 19: 0, 20: 0, 21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0, 30: 0, 31: 0, 32: 0, 33: 0, 34: 0, 35: 0, 36: 0, 37: 0, 38: 0, 39: 0, 40: 0, 41: 0, 42: 0, 43: 0, 44: 85, 45: 0, 46: 0, 47: 0, 48: 0, 49: 0, 50: 0, 51: 0, 52: 0, 53: 0, 54: 0, 55: 0, 56: 0, 57: 0, 58: 0, 59: 0, 60: 0, 61: 0, 62: 0, 63: 0, 64: 0, 65: 0, 66: 0, 67: 0, 68: 0, 69: 0, 70: 0, 71: 0, 72: 0, 73: 0, 74: 0, 75: 0, 76: 0, 77: 0, 78: 0, 79: 0, 80: 0, 81: 0, 82: 0, 83: 0, 84: 0, 85: 0, 86: 0, 87: 1, 88: 0, 89: 0, 90: 0, 91: 0, 92: 0, 93: 8, 94: 0, 95: 0, 96: 0, 97: 0, 98: 6, 99: 0}
print(walker_pos.keys())
# walker_pos = {1:[[5,3], 2], 2:[[3,4], 1], 3:[[1,1], 6], 4:[[2,4], 7]}
# need to append to time config after you are done with the steps below!
time_config = {k:v for k,v in walker_pos.items()}
calculate_swr = lambda x,y: x/y
# resampled_walker = {1:2, 2:2, 3:-4, 4:0}
# gets those with positive number of resampled walkers
resample_walker = {k:1 for k,v in resampled_walker.items() if v > 0}
print(resample_walker)
# takes the walkers with positive resamplings back into the walker_pos
new_walker_pos = {k:walker_pos[k][:] for k,v in resample_walker.items()}
# taus = {1:0.4, 2:0.3, 3:0.154, 4:0.146}
# rule 1: don't even need to consider if it's the same walker
swr_affinity = {k:[(calculate_swr(v,taus[i]), i) for i,j in resample_walker.items() if k != i] for k,v in taus.items()}
def get_all_affinity(swr_affinity):
# affinity_keys = {i:[[k for m in v if m[0] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0] for i,j in resample_walker.items()}
affinity_vals = {i:{k:[m for m in v if m[1] == i][0] for k,v in swr_affinity.items() if i != k and len(v)>0 and k not in list(resample_walker.keys())} for i,j in resample_walker.items()}
return affinity_vals
resample_affinity_vals = get_all_affinity(swr_affinity)
# rejected = {k:[] for k,v in swr_affinity.items() if len(v) > 0}
deleted_items = list()
for k,v in resample_affinity_vals.items():
# rejected = {i:list() for i,j in swr_affinity.items() if len(j) > 0}
value = [n for m,n in v.items() if m not in deleted_items]
# print(value)
# key = [m for m,n in v.items() if m not in deleted_items]
# print(key)
sorted_dict = sorted(value, reverse=True)[0:(resampled_walker[k] - resample_walker[k])]
res_items = [m for m,n in v.items() if n in sorted_dict]
print(str(res_items) + '\n')
rejects = len(sorted_dict) - resample_walker[k]
rejected = sorted_dict[rejects:]
for i in res_items:
new_walker_pos[i] = walker_pos[k][:]
resample_walker[k] += 1
deleted_items.append(i)
# for k,v in resample_affinity_vals.items():
# del resample_affinity_vals[k][i]
print(deleted_items)
print(new_walker_pos)
print(len(new_walker_pos))
# +
taus = {0: 4.979481459771921e-19, 1: 8.906010064543437e-104, 2: 2.9010633729504283e-50, 3: 2.909187059354772e-56, 4: 7.945645518559665e-09, 5: 8.660848262223333e-66, 6: 3.5271852989438855e-69, 7: 3.744975658012282e-22, 8: 1.3732738459480639e-65, 9: 2.316272193606615e-19, 10: 1.0649791359042534e-53, 11: 3.495811224642755e-36, 12: 8.122153210145312e-42, 13: 8.396737416695002e-86, 14: 7.080194234845007e-99, 15: 1.9013658268336554e-37, 16: 4.1363855485409126e-51, 17: 6.794105686579909e-83, 18: 2.8852876373243087e-49, 19: 4.4253055046909023e-23, 20: 5.517441600479428e-51, 21: 2.4340543560469156e-27, 22: 2.7696764341527948e-77, 23: 9.376072147534471e-110, 24: 1.513499538709749e-76, 25: 6.435449012887977e-88, 26: 4.1223880823846603e-81, 27: 1.2878218181936917e-46, 28: 5.465955887553644e-50, 29: 3.765333408982003e-79, 30: 6.736207538673744e-08, 31: 2.673402786589717e-12, 32: 5.204636539175555e-19, 33: 7.992444259775237e-46, 34: 1.3521088886371985e-150, 35: 1.5097206308787477e-12, 36: 3.1627657432689353e-65, 37: 4.733640167815339e-08, 38: 5.855853620694806e-22, 39: 7.663579852636736e-69, 40: 8.053214052957187e-92, 41: 2.994698183457118e-37, 42: 4.36152257428427e-08, 43: 2.7924061726193672e-18, 44: 86.21101390558884, 45: 3.003387459898819e-29, 46: 3.869857232885313e-42, 47: 9.084419036572294e-69, 48: 9.698573039984019e-114, 49: 6.295915663894739e-48, 50: 3.39696973093878e-39, 51: 2.0585228416143442e-25, 52: 1.8006256694664823e-103, 53: 3.4316691031387315e-84, 54: 5.3355204366997785e-48, 55: 3.704726716015464e-65, 56: 3.2420644244489503e-41, 57: 1.217892073805362e-84, 58: 4.532257543205371e-51, 59: 1.6170087778778006e-11, 60: 2.2296603899319717e-47, 61: 2.7345506899612595e-109, 62: 4.950018408047005e-20, 63: 4.987717022938297e-79, 64: 1.749705897792825e-10, 65: 1.7826231702865334e-20, 66: 5.557289016266856e-77, 67: 3.557499894258665e-83, 68: 5.899915018195347e-80, 69: 1.2017379381904272e-49, 70: 1.227152203069028e-71, 71: 3.299364156602039e-05, 72: 7.227891434981451e-21, 73: 3.8934721269661133e-26, 74: 1.845361652632113e-24, 75: 8.669908722234362e-14, 76: 4.26233589882733e-109, 77: 0.00010348102976916315, 78: 4.1826746358028446e-61, 79: 5.9812632889018665e-46, 80: 1.9160911989439013e-59, 81: 1.210448675833256e-75, 82: 1.0615992665130984e-25, 83: 7.730073964061945e-09, 84: 3.266748079028032e-57, 85: 1.7421892301642964e-55, 86: 3.687446574739879e-36, 87: 0.05480419949587991, 88: 2.748641311769773e-16, 89: 3.4767167784401154e-08, 90: 2.795957006635664e-77, 91: 9.35893598284094e-56, 92: 1.1746636974837712e-56, 93: 9.108655418895802, 94: 4.633748104190869e-43, 95: 3.542168916575115e-62, 96: 1.857356950810916e-67, 97: 1.1773123316102888e-42, 98: 4.625389792396117, 99: 7.058873800468946e-62}
walker_pos = {0: [[6.756756756756758, -2.992992992992993], 49.822956574294814], 1: [[-8.578578578578579, 9.6996996996997], 244.96128890287554], 2: [[0.7707707707707705, -10.0], 121.7433428680723], 3: [[-7.977977977977978, -4.374374374374375], 135.55605709503757], 4: [[2.9929929929929937, -2.6526526526526526], 26.329807360334858], 5: [[-6.276276276276276, -4.774774774774775], 157.49096903655112], 6: [[7.837837837837839, 9.61961961961962], 165.29703679792675], 7: [[-4.014014014014014, -3.353353353353353], 57.015622497263536], 8: [[6.896896896896898, -6.876876876876877], 157.02999905539443], 9: [[6.236236236236238, 2.1121121121121114], 50.588323255097315], 10: [[-7.4374374374374375, 2.5525525525525516], 129.65322028889454], 11: [[9.6996996996997, 3.9739739739739743], 89.32066345745747], 12: [[-0.39039039039039025, 8.198198198198199], 102.29314418154482], 13: [[9.1991991991992, 7.177177177177178], 203.57364033834034], 14: [[-9.91991991991992, 7.7177177177177185], 233.67778843326045], 15: [[6.756756756756758, -5.075075075075075], 92.23224152511182], 16: [[-8.038038038038039, -1.7317317317317311], 123.69118296058176], 17: [[9.15915915915916, 9.91991991991992], 196.8776728614979], 18: [[-9.53953953953954, 2.3123123123123115], 119.4462105292184], 19: [[2.4124124124124116, 8.058058058058059], 59.15128339065762], 20: [[-8.8988988988989, -1.2912912912912908], 123.40309103649993], 21: [[-3.9739739739739743, -2.8728728728728727], 68.95940475301951], 22: [[-4.634634634634635, -7.177177177177177], 183.95948722641972], 23: [[-5.395395395395395, -9.27927927927928], 258.72536487010314], 24: [[-6.856856856856857, -7.4174174174174174], 182.26120809133104], 25: [[-9.45945945945946, -6.376376376376376], 208.44483213666902], 26: [[-7.717717717717718, 5.8358358358358355], 192.77212547391042], 27: [[-5.595595595595595, 7.337337337337338], 113.3451275680049], 28: [[0.6706706706706704, -10.0], 121.10988120104248], 29: [[6.376376376376378, -8.318318318318319], 188.25755150283538], 30: [[4.114114114114114, 2.8728728728728736], 24.19234922559317], 31: [[4.994994994994995, -0.1701701701701701], 34.326834571514], 32: [[0.5705705705705704, -5.995995995995996], 49.77873246490645], 33: [[9.57957957957958, 5.655655655655655], 111.51958321834422], 34: [[-9.93993993993994, 9.15915915915916], 352.7652640042757], 35: [[5.175175175175175, -3.013013013013013], 34.89826206336113], 36: [[-2.5125125125125125, 9.23923923923924], 156.19574973307684], 37: [[-3.213213213213213, 3.0330330330330337], 24.545151814407298], 38: [[-3.8338338338338342, 4.454454454454455], 56.56859583446784], 39: [[8.63863863863864, 6.1761761761761775], 164.52105776680082], 40: [[-8.438438438438439, 9.31931931931932], 217.43092285094775], 41: [[-7.4574574574574575, 4.334334334334335], 91.77797055623972], 42: [[0.4904904904904903, 5.555555555555555], 24.627025101746536], 43: [[2.5525525525525516, 6.916916916916918], 48.09879359075367], 44: [[-0.2702702702702702, 1.9519519519519513], 3.222367627445266], 45: [[-3.9939939939939944, -2.8528528528528527], 73.3543924603531], 46: [[-7.697697697697698, 4.634634634634635], 103.0345218585954], 47: [[9.8998998998999, 4.2142142142142145], 164.35097623280808], 48: [[-9.61961961961962, 7.497497497497498], 267.9018874043929], 49: [[9.93993993993994, -4.994994994994995], 116.36334891635644], 50: [[4.674674674674675, -8.018018018018019], 96.25710041663571], 51: [[5.315315315315315, 6.0960960960960975], 64.52180423492217], 52: [[-5.7557557557557555, -8.238238238238239], 244.25729594821277], 53: [[-7.157157157157157, -8.93893893893894], 199.8632666181711], 54: [[7.897897897897899, -4.2742742742742745], 116.52886360080129], 55: [[-5.7757757757757755, -6.116116116116116], 156.0375871179243], 56: [[-4.934934934934935, -4.494494494494495], 100.90894408629659], 57: [[7.217217217217218, 9.85985985985986], 200.89919182397276], 58: [[1.2712712712712708, -8.598598598598599], 123.59978514190936], 59: [[2.7127127127127117, 5.135135135135135], 32.52702358233779], 60: [[4.1741741741741745, -8.458458458458459], 115.09881565683536], 61: [[10.0, -6.856856856856857], 257.6549735640019], 62: [[7.457457457457458, -1.3313313313313309], 52.131476132989235], 63: [[-10.0, 1.9119119119119112], 187.9764096203686], 64: [[1.8118118118118112, -3.9339339339339343], 30.14556878301191], 65: [[7.837837837837839, 0.6306306306306304], 53.152781457701266], 66: [[-8.458458458458459, 8.338338338338339], 183.26310732666997], 67: [[-8.618618618618619, 8.398398398398399], 197.5246702657268], 68: [[-7.957957957957958, 8.418418418418419], 190.1110350608908], 69: [[-2.7127127127127126, -6.176176176176176], 120.32206633431693], 70: [[8.67867867867868, -6.536536536536536], 170.95801096813975], 71: [[2.6726726726726717, 4.454454454454455], 17.99836126335988], 72: [[-5.555555555555555, -1.3513513513513509], 54.0555051688006], 73: [[0.41041041041041026, -5.895895895895896], 66.1870766491785], 74: [[-6.796796796796797, 1.0110110110110107], 62.32853252436616], 75: [[-1.9719719719719713, -3.473473473473473], 37.75549860765964], 76: [[-8.358358358358359, -6.756756756756756], 257.21112336177066], 77: [[-1.3313313313313309, -1.551551551551551], 16.855287817791854], 78: [[7.397397397397398, -6.116116116116116], 146.70590533418124], 79: [[-5.7957957957957955, 5.895895895895896], 111.80944804822884], 80: [[5.995995995995996, 8.178178178178179], 142.88139877809846], 81: [[-5.555555555555555, -7.897897897897898], 180.18205644568212], 82: [[8.87887887887888, 0.3503503503503502], 65.18401638022077], 83: [[-0.4504504504504503, 5.535535535535535], 26.357312974429554], 84: [[6.936936936936938, 8.118118118118119], 137.74272085029068], 85: [[6.636636636636638, -5.195195195195195], 133.7662031825131], 86: [[-5.215215215215215, -2.1321321321321323], 89.26729468291018], 87: [[-0.41041041041041026, -1.451451451451451], 10.583154023307221], 88: [[-3.093093093093093, 5.975975975975976], 43.50942033524502], 89: [[4.494494494494495, 1.7917917917917912], 24.853757918449887], 90: [[-6.956956956956957, 6.796796796796798], 183.9500432815073], 91: [[6.636636636636638, -8.97897897897898], 134.38759916916507], 92: [[-9.1991991991992, -3.9739739739739743], 136.46294888442455], 93: [[2.7327327327327318, 1.2312312312312308], 5.469940462074692], 94: [[5.035035035035035, 9.47947947947948], 105.15695850087127], 95: [[6.0560560560560575, -9.97997997997998], 149.17470210628332], 96: [[-8.118118118118119, -3.433433433433433], 161.3332123161127], 97: [[-4.734734734734735, 7.577577577577578], 104.22450531869791], 98: [[-0.1501501501501501, 2.5925925925925917], 6.147604921629306], 99: [[-7.5575575575575575, 2.5125125125125116], 148.48515581367664]}
resampled_walker = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0, 16: 0, 17: 0, 18: 0, 19: 0, 20: 0, 21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0, 30: 0, 31: 0, 32: 0, 33: 0, 34: 0, 35: 0, 36: 0, 37: 0, 38: 0, 39: 0, 40: 0, 41: 0, 42: 0, 43: 0, 44: 85, 45: 0, 46: 0, 47: 0, 48: 0, 49: 0, 50: 0, 51: 0, 52: 0, 53: 0, 54: 0, 55: 0, 56: 0, 57: 0, 58: 0, 59: 0, 60: 0, 61: 0, 62: 0, 63: 0, 64: 0, 65: 0, 66: 0, 67: 0, 68: 0, 69: 0, 70: 0, 71: 0, 72: 0, 73: 0, 74: 0, 75: 0, 76: 0, 77: 0, 78: 0, 79: 0, 80: 0, 81: 0, 82: 0, 83: 0, 84: 0, 85: 0, 86: 0, 87: 1, 88: 0, 89: 0, 90: 0, 91: 0, 92: 0, 93: 8, 94: 0, 95: 0, 96: 0, 97: 0, 98: 6, 99: 0}
# walker_pos = {1:[[5,3], 2], 2:[[3,4], 1], 3:[[1,1], 6], 4:[[2,4], 7]}
# resampled_walker = {1:2, 2:2, 3:-4, 4:0}
# taus = {1:0.4, 2:0.3, 3:0.154, 4:0.146}
calculate_swr = lambda x,y: x/y
get_min = lambda some_dict: min(some_dict.keys(), key=(lambda n: some_dict[n]))
get_max = lambda some_dict: max(some_dict.keys(), key=(lambda n: some_dict[n]))
# gets those with positive number of resampled walkers
resample_walker = {k:1 for k,v in resampled_walker.items() if v > 0}
# takes the walkers with positive resamplings back into the walker_pos
new_walker_pos = {k:walker_pos[k][:] for k,v in resample_walker.items()}
print(new_walker_pos)
# what we need to do is calculate all the stat weight ratios then pick the highest one and initialize with that
def get_max_tau(taus : dict, new_walker_pos : dict, walker_pos : dict):
# takes in a dictionary and returns the key of walker which has not been assigned with the maximum tau
assigned_keys = list(new_walker_pos.keys())
filtered_tau = {k:v for k,v in taus.items() if k not in assigned_keys}
print(filtered_tau)
max_tau = get_min(filtered_tau)
# max_tau represents the walker we choose
# generates a random number for each of the
accepted = 0
while accepted == 0:
# here we iterate through the dictionary and do a monte carlo style check if the current walker is better than the walkers to be chosen
# if in some case that everything is rejected, we will run a while loop and try again.
generate_rn = sorted([(i,np.random.uniform(0, 1)) for i,j in new_walker_pos.items()], reverse=True)
for i,j in enumerate(generate_rn):
if random.uniform(0,1) > j[1]:
accepted = j[0]
return max_tau, accepted
def unassigned(walker_pos, new_walker_pos):
return {k:v for k,v in walker_pos.items() if k not in list(new_walker_pos.keys())}
def assign_walkers(new_walker_pos, walker_pos):
max_tau, accepted = get_max_tau(taus, new_walker_pos, walker_pos)
new_walker_pos[max_tau] = walker_pos[accepted][:]
# to_append = [max_tau, walker_pos[accepted][:]]
not_assigned = unassigned(walker_pos, new_walker_pos)
if len(not_assigned) > 0:
return assign_walkers(new_walker_pos, walker_pos)
return new_walker_pos
# HOW DO I RETURN A DICTIONARY FROM A RECURSION?
new = assign_walkers(new_walker_pos, walker_pos)
print("# Walkers:{} ,\n New Walker Assignments: {}".format(len(new_walker_pos), new))
# # rejected = {k:[] for k,v in swr_affinity.items() if len(v) > 0}
# deleted_items = list()
# for k,v in resample_affinity_vals.items():
# # rejected = {i:list() for i,j in swr_affinity.items() if len(j) > 0}
# value = [n for m,n in v.items() if m not in deleted_items]
# # print(value)
# # key = [m for m,n in v.items() if m not in deleted_items]
# # print(key)
# sorted_dict = sorted(value, reverse=True)[0:(resampled_walker[k] - resample_walker[k])]
# res_items = [m for m,n in v.items() if n in sorted_dict]
# print(str(res_items) + '\n')
# rejects = len(sorted_dict) - resample_walker[k]
# rejected = sorted_dict[rejects:]
# for i in res_items:
# new_walker_pos[i] = walker_pos[k][:]
# resample_walker[k] += 1
# deleted_items.append(i)
# # for k,v in resample_affinity_vals.items():
# # del resample_affinity_vals[k][i]
# print(deleted_items)
# print(new_walker_pos)
# print(len(new_walker_pos))
# -
# ## New Resampling method for 50 - 200 walkers (Choose Randomly - Coin Flip)
# +
exp_space = [50, 300, 1000]
free_energies, landscapes, average, best, pops, convs = list(), list(), list(), list(), list(), list()
for i in exp_space:
print("Current Exploration Space: {}".format(i))
sys.stdout.flush()
# 3
rnd_pa = {"temp":list(), "walkers":list(), "iter":list(), "exp_space":list(), "landscapes":list(), "free_energies": list(), "average":list(), "best":list(), 'pops':list(), \
"convs":list()}
tau_pa = {"temp":list(), "walkers":list(), "iter":list(), "exp_space":list(), "landscapes":list(), "free_energies": list(), "average":list(), "best":list(), 'pops':list(), \
"convs":list()}
for j in range(100):
# 100
print("Iteration {}".format(j))
sys.stdout.flush()
start = time.time()
a = RandomAnneal(maxsteps=101, explore=10, walkers=i)
(energy_landscape, average_cost, cumulative, free_energy, best_cost_so_far, all_best_costs, population,\
temperature, walker_z, resample_time, config_time) = a.anneal()
print("Time for Random Popuation Annealing: {}".format(time.time() - start))
sys.stdout.flush()
start = time.time()
b = TauAnneal(maxsteps=101, explore=10, walkers=i)
(energy_landscape, average_cost, cumulative, free_energy, best_cost_so_far, all_best_costs, population,\
temperature, walker_z, resample_time, config_time) = b.anneal()
print("Time for Tau Popuation Annealing: {}".format(time.time() - start))
sys.stdout.flush()
convergence = cumulative / np.sum(population)
print("Convergence Rate: {}".format(convergence))
sys.stdout.flush()
rnd_pa['walkers'].append(i)
rnd_pa['temp'].append(temperature[1:])
rnd_pa['iter'].append(j)
rnd_pa["landscapes"].append(energy_landscape)
rnd_pa["free_energies"].append(free_energy)
rnd_pa["average"].append(average_cost)
rnd_pa["best"].append(all_best_costs)
rnd_pa["pops"].append(population[1:])
rnd_pa["convs"].append(convergence)
tau_pa['walkers'].append(i)
tau_pa['temp'].append(temperature[1:])
tau_pa['iter'].append(j)
tau_pa["landscapes"].append(energy_landscape)
tau_pa["free_energies"].append(free_energy)
tau_pa["average"].append(average_cost)
tau_pa["best"].append(all_best_costs)
tau_pa["pops"].append(population[1:])
tau_pa["convs"].append(convergence)
rand_pa = pd.DataFrame.from_dict(rnd_pa)
tau_pa = pd.DataFrame.from_dict(tau_pa)
rand_pa.to_csv('Random_Population.csv')
rand_pa.to_csv('Tau_Population.csv')
print("Successfully written to CSV!")
sys.stdout.flush()
# +
exp_space = [50, 300, 1000]
free_energies, landscapes, average, best, pops, convs = list(), list(), list(), list(), list(), list()
for i in exp_space:
a = TauAnneal(maxsteps=101, explore=10, walkers=i)
(energy_landscape, average_cost, cumulative, free_energy, best_cost_so_far, all_best_costs, population,\
temperature, walker_z, resample_time, config_time) = a.anneal()
convergence = cumulative / np.sum(population)
print("Convergence Rate: {}".format(convergence))
landscapes.append(energy_landscape)
free_energies.append(free_energy)
average.append(average_cost)
best.append(all_best_costs)
pops.append(population)
convs.append(convergence)
# -
for i,j in enumerate(landscapes):
df = pd.DataFrame(j, index=[0]).T.reset_index().rename(columns={"index":"energy", 0:"count"})
if i == 0:
count = 50
elif i == 1:
count = 300
else:
count = 1000
fig, ax1 = plt.subplots(1, 1)
plt.title("Energy Landscape Kernel Density Estimate for {} walkers".format(count))
plt.xlabel("Energy")
plt.ylabel("Kernel Density Estimate")
sns.distplot(df['energy'], color='y', label='energy', bins=10)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# +
for i,j in enumerate(free_energies):
df = pd.DataFrame(j, index=[0]).T.reset_index().rename(columns={"index":"temperature", 0:"free_energy"})
if i == 0:
count = 50
elif i == 1:
count = 300
else:
count = 10000
fig, ax1 = plt.subplots(1, 1)
plt.title("Free Energy Per Temperature for {} walkers".format(count))
sns.lineplot(df['temperature'],df['free_energy'], color='y', label='Free Energy')
plt.xlabel("Temperature")
plt.ylabel("Free Energy")
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# +
fig, ax1 = plt.subplots(1, 1)
plt.title("Convergence v. Number of Walkers".format(count))
sns.lineplot(x=exp_space, y=convs, color='g', label='Solved Problems')
plt.xlabel("Number of Walkers")
plt.ylabel("Fraction of Solved Problems")
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# -
for i,j in enumerate(average):
if i == 0:
count = 50
elif i == 1:
count = 300
else:
count = 1000
fig, ax1 = plt.subplots(1, 1)
best_costs = best[i]
sns.lineplot(temperature[1:], j, label='average_cost')
sns.lineplot(temperature[1:], best_costs, label='best_cost')
plt.title("Average Cost over Time for {} walkers".format(count))
plt.xlabel("Temperature (1/B)")
plt.ylabel("Average Cost")
# +
# explore = int(5 * 10e4)
explore=30
a = PAAnneal(maxsteps=101, explore=10, walkers=explore)
(energy_landscape, average_cost, cumulative, free_energy, best_cost_change, best_cost, population,\
temperature, walker_z, resample_time, config_time) = a.anneal()
# walker_z_df = pd.DataFrame.from_records(walker_z).rename(columns={0:"temp", 1:"sweep", 2:"walker", 3:"partition_function"})
# walker_z_df.head()
# -
# ## New Resampling method for 50 - 200 walkers (Choose by Taus)
random_free_energies, random_landscapes, random_average, random_best, random_pops, random_convs = list(), list(), list(), list(), list(), list()
# +
exp_space = [50,300,1000]
for i in exp_space:
a = RandomAnneal(maxsteps=101, explore=10, walkers=i)
(energy_landscape, average_cost, cumulative, free_energy, best_cost_so_far, all_best_costs, population,\
temperature, walker_z, resample_time, config_time) = a.anneal()
convergence = cumulative / np.sum(population)
print("Convergence Rate: {}".format(convergence))
random_landscapes.append(energy_landscape)
random_free_energies.append(free_energy)
random_average.append(average_cost)
random_best.append(all_best_costs)
random_pops.append(population)
random_convs.append(convergence)
# -
for i,j in enumerate(landscapes):
df = pd.DataFrame(j, index=[0]).T.reset_index().rename(columns={"index":"energy", 0:"count"})
if i == 0:
count = 50
elif i == 1:
count = 300
fig, ax1 = plt.subplots(1, 1)
plt.title("Energy Landscape Kernel Density Estimate for {} walkers".format(count))
plt.xlabel("Energy")
plt.ylabel("Kernel Density Estimate")
sns.distplot(df['energy'], color='y', label='energy', bins=10)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# +
for i,j in enumerate(free_energies):
df = pd.DataFrame(j, index=[0]).T.reset_index().rename(columns={"index":"temperature", 0:"free_energy"})
if i == 0:
count = 50
elif i == 1:
count = 300
fig, ax1 = plt.subplots(1, 1)
plt.title("Free Energy Per Temperature for {} walkers".format(count))
sns.lineplot(df['temperature'],df['free_energy'], color='y', label='Free Energy')
plt.xlabel("Temperature")
plt.ylabel("Free Energy")
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# +
fig, ax1 = plt.subplots(1, 1)
plt.title("Convergence v. Number of Walkers".format(count))
sns.lineplot(x=exp_space, y=convs, color='g', label='Solved Problems')
plt.xlabel("Number of Walkers")
plt.ylabel("Fraction of Solved Problems")
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# -
for i,j in enumerate(average):
if i == 0:
count = 50
elif i == 1:
count = 300
fig, ax1 = plt.subplots(1, 1)
best_costs = best[i]
sns.lineplot(temperature[1:], j, label='average_cost')
sns.lineplot(temperature[1:], best_costs, label='best_cost')
plt.title("Average Cost over Time for {} walkers".format(count))
plt.xlabel("Temperature (1/B)")
plt.ylabel("Average Cost")
# # One Sample Run (50 Walkers)
# ## Followed by plotting the energy landscape & count / energy landscape & temperature
# +
import cProfile
testing_profiles = [50, 100, 1000, 10000]
# explore = int(5 * 10e4)
for test in testing_profiles:
explore = test
a = PAAnneal(maxsteps=101, explore=10, walkers=explore)
cProfile.run('a.anneal()')
# +
explore = [50] + [i for i in range(100, 500, 100)]
convergent = {"explore": list(), "best_cost": list(), "converge":list()}
for i in explore:
a = PAAnneal(maxsteps=500, walkers=i)
energy_landscape, average_cost, cumulative, free_energy, best_cost_change, best_cost, population, temperature, walker_z = a.anneal()
convergent['explore'].append(i)
convergent['best_cost'].append(best_cost_change)
convergent['converge'].append(cumulative/np.sum(population))
converge_df = pd.DataFrame.from_dict(convergent)
converge_df.head()
# -
converge_df.plot(x = 'explore', y = 'converge', kind='line', label='convergence')
plt.title('Convergence v. Number of Walkers')
plt.xlabel('explore')
plt.ylabel('fraction of problems converged')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
converge_df.plot(x = 'explore', y = 'best_cost', kind='line', label='best cost')
plt.title('Best Cost v. Number of explorers')
plt.xlabel('best cost')
plt.ylabel('fraction of problems converged')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# +
a = PAAnneal(maxsteps=500, walkers=explore)
energy_landscape, average_cost, cumulative, free_energy, best_cost_change, best_cost, population, temperature, walker_z = a.anneal()
walker_z_df = pd.DataFrame.from_records(walker_z).rename(columns={0:"temp", 1:"sweep", 2:"walker", 3:"partition_function"})
walker_z_df.head()
# +
walker_explore = np.sort(np.random.randint(1, explore, size=10))[::-1]
temps = [i for i in range(50, 5000, 500)]
cumulative_sum = {"walker": list(), "temp": list(), "cumulative":list()}
for i in temps:
for walker in walker_explore:
sum_df = np.sum(np.cumsum(walker_z_df[(walker_z_df['temp']==i) & (walker_z_df['walker']==walker)])['partition_function'])
cumulative_sum['walker'].append(walker)
cumulative_sum['temp'].append(i)
cumulative_sum['cumulative'].append(sum_df)
df = pd.DataFrame.from_dict(cumulative_sum)
plt.title("Evolution of Partition Function per Sweep")
plt.xlabel("Sweep")
plt.ylabel("Partition Function (Z)")
sns.lineplot(df['temp'], df['cumulative'], color='y', label='walker')
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# -
walker_z_df_walker = walker_z_df.groupby(['sweep']).mean().reset_index()
#walker_z_df_walker = walker_z_df_walker[(walker_z_df_walker['temp']==2500) & (walker_z_df_walker['walker']==2000)]
# print(walker_z_df_walker['partition_function'])
plt.title("Evolution of Partition Function per Sweep")
plt.xlabel("Sweep")
plt.ylabel("Partition Function (Z)")
sns.lineplot(walker_z_df_walker['sweep'], walker_z_df_walker['partition_function'], color='y', label='Partition Function')
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
walker_z_df_walker = walker_z_df.groupby(['temp']).mean().reset_index()
#walker_z_df_walker = walker_z_df_walker[(walker_z_df_walker['temp']==i) & (walker_z_df_walker['walker']==2000)]
# print(walker_z_df_walker['partition_function'])
plt.title("Evolution of Partition Function per Sweep")
plt.xlabel("Sweep")
plt.ylabel("Partition Function (Z)")
sns.lineplot(walker_z_df_walker['temp'], walker_z_df_walker['partition_function'], color='y', label='Partition Function')
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# +
# need to explore maybe - 5 sets of low temperature, 5 sets of medium temperature, 5 sets of high temperature
temp_explore = [np.sort(np.random.randint(4000, 5000, size=5))[::-1], np.sort(np.random.randint(3000, 4000, size=5))[::-1], \
np.sort(np.random.randint(2000, 3000, size=5))[::-1], np.sort(np.random.randint(1000, 2000, size=5))[::-1], \
np.sort(np.random.randint(0, 1000, size=5))[::-1]]
for temps in temp_explore:
fig = plt.subplots()
plt.title("Evolution of Partition Function per Sweep {}".format(round(temps[0],-3)))
plt.xlabel("Sweep")
plt.ylabel("Partition Function (Z)")
for j in temps:
walker_z_df_walker = walker_z_df[(walker_z_df['temp']==j) & (walker_z_df['walker']==25)]
walker_z_df_walker.head()
sns.lineplot(walker_z_df_walker['sweep'], walker_z_df_walker['partition_function'], label='Partition Function at B: {}'.format(j))
# move the plot outside fo the graph
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('partition_{}.png'.format(round(temps[0],-3)))
# +
# deploy on cloud
#explore = [i for i in range(1, 1001, 50)]
# write to a dataframe and a raw csv
pop_anneal = {"run":list(), 'temperature':list(), "energy_landscape":list(), "divergence":list(), \
"converged_perc":list(), "best_cost":list(), "avg_cost_temp": list(), "temp_pop":list(), \
"total_pop":list()}
explore = 50
iters = 100
for i in range(100):
a = Annealer(maxsteps=5000, walkers=explore)
energy_landscape, average_cost, cumulative, kl_divergence, best_cost, population, temp, walker_z = a.anneal()
temp = [0] + temp
temp = temp[:-2]
total_population = np.sum(population)
new_divergence = np.abs([0 if math.isinf(v) == True else v for k,v in kl_divergence.items()])
for j in temp:
pop_anneal['run'].append(i)
pop_anneal['temperature'].append(j)
pop_anneal['energy_landscape'].append(energy_landscape)
pop_anneal['divergence'].append(new_divergence[j])
pop_anneal["converged_perc"].append(cumulative/total_population)
pop_anneal["best_cost"].append(best_cost[2])
# need to implement average cost every time step
pop_anneal["avg_cost_temp"].append(average_cost[j])
pop_anneal["temp_pop"].append(population[j])
pop_anneal["total_pop"].append(total_population)
anneal_run = pd.DataFrame.from_dict(pop_anneal)
anneal_run.head()
#anneal_run.to_csv("PA_run_test.csv")
# -
# ## Some sample plotting code
# +
df = pd.DataFrame(free_energy, index=[0]).T.reset_index().rename(columns={"index":"temperature", 0:"free_energy"})
plt.title("Free Energy Per Temperature")
sns.lineplot(df['temperature'],df['free_energy'], color='y', label='Free Energy')
plt.xlabel("Temperature")
plt.ylabel("Free Energy")
plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# +
free = free_energy.copy()
for k,v in free.items():
free[k] = free[k]/(-1/k)
df = pd.DataFrame(free, index=[0]).T.reset_index().rename(columns={"index":"temperature", 0:"free_energy"})
plt.title("Free Energy Per Temperature")
sns.lineplot(df['temperature'],df['free_energy'], color='y', label='Free Energy')
plt.xlabel("Temperature")
plt.ylabel("Free Energy")
plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# +
df = pd.DataFrame(kl_divergence, index=[0]).T.reset_index().rename(columns={"index":"temperature", 0:"divergence"})
plt.title("KL Divergence Per Temperature")
sns.lineplot(df['temperature'],df['divergence'], color='y', label='Kullbeck Leibler Divergence')
plt.xlabel("Temperature")
plt.ylabel("Divergence")
#plt.gca().invert_xaxis()
plt.legend()
# kulbeck leiber divergence
# cross entropy
# +
df = pd.DataFrame(energy_landscape, index=[0]).T.reset_index().rename(columns={"index":"energy", 0:"count"})
plt.title("Energy Landscape Kernel Density Estimate")
plt.xlabel("Energy")
plt.ylabel("Kernel Density Estimate")
sns.distplot(df['energy'], color='y', label='energy', bins=10)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# kulbeck leiber divergence
# cross entropy
# +
#population = [np.mean(i) for i in pop]
#temp = [np.mean(i) for i in temperature]
best_costs = [i for i in best_cost]
sns.lineplot(temperature, average_cost, label='average_cost')
sns.lineplot(temperature, best_costs, label='best_cost')
plt.title("Average Cost over Time")
plt.xlabel("Temperature (1/B)")
plt.ylabel("Average Cost")
plt.legend()
#plt.gca().invert_xaxis()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.xlim(0.5, 0)
# +
#population = [np.mean(i) for i in pop]
#temp = [np.mean(i) for i in temperature]
sns.lineplot(temperature, population, label='number_of_walkers')
plt.title("Number of Walkers over Time")
plt.xlabel("Temperature (1/B)")
plt.ylabel("Number of Walkers")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.gca().invert_xaxis()
#plt.xlim(0.5, 0)
# -
| Population Annealing/Continuous/Population Annealing (Continuous).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
# +
'''Code for fine-tuning Inception V3 for a new task.
Start with Inception V3 network, not including last fully connected layers.
Train a simple fully connected layer on top of these.
'''
import numpy as np
import keras
import random
from keras import optimizers
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Dropout
import inception_v3 as inception
import vgg16 as VGG
import prepare.collect as pc
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
'''
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
'''
N_CLASSES = 2
IMSIZE = (224, 224)
XML_DIR = "../data/annotations/xmls/"
IMG_DIR = "../data/images/"
VAL_RATIO = 0.3
# TO DO:: Replace these with paths to the downloaded data.
# Training directory
# train_dir = '../data/catdog/train'
# Testing directory
# test_dir = '../data/catdog/validation'
# Start with an Inception V3 model, not including the final softmax layer.
base_model = VGG.VGG16(weights='imagenet')
print ('Loaded vgg16 model')
# +
# Turn off training on base model layers
for layer in base_model.layers:
layer.trainable = False
# Add on new fully connected layers for the output classes.
# x = Dense(1024, activation='relu')(base_model.get_layer('fc2').output)
# x = Dropout(0.5)(x)
# predictions = Dense(N_CLASSES, activation='softmax', name='predictions')(x)
base_model_last = base_model.get_layer('flatten').output
# x = Dense(4096, activation='relu', name='fc1-1')(base_model_last)
# x = Dense(4096, activation='relu', name='fc1-2')(x)
# predictions = Dense(N_CLASSES, activation='softmax', name='predictions')(x)
# model = Model(input=base_model.input, output=predictions)
# for layer in model.layers:
# layer.trainable = False
y = Dense(4096, activation='relu', name='fc2-1')(base_model_last)
y = Dense(4096, activation='linear', name='fc2-2')(y)
#y = Dense(4096, activation='linear', name='fc2-3')(y)
# y = Dense(2048, activation='relu', name='fc2-4')(y)
aux_predictions = Dense(4, activation='linear', name='aux_predictions')(y)
#model = Model(input=base_model.input, output=predictions)
#model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# model = Model(input=base_model.input, output=[predictions, aux_predictions])
# model.compile(optimizer='rmsprop',
# loss={'predictions': 'categorical_crossentropy', 'aux_predictions': 'mean_squared_error'})
model = Model(input=base_model.input, output=aux_predictions)
sgd = optimizers.SGD(lr=0.000001)
#adam = optimizers.Adam(lr=0.00001)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# +
# Show some debug output
# print (model.summary())
# print ('Trainable weights')
#model.save_weights('catdog_pretrain.h5')
#print (model.trainable_weights)
# +
xmlFiles = pc.listAllFiles(XML_DIR)
infoList = list(map(lambda f:pc.getInfoTupleForXml(f,IMG_DIR) ,xmlFiles))
random.shuffle(infoList)
cutIndex = int(len(infoList)*VAL_RATIO)
train_files=infoList[:cutIndex]
val_files = infoList[cutIndex:]
train_files[50]
# +
#print(val_files)
np.random.seed()
img_datagen = ImageDataGenerator(rescale=1./255)
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
return categorical
def my_load_img(img_path,img_datagen,size):
img = image.load_img(img_path, target_size=size)
x = image.img_to_array(img)
# x = img_datagen.img_to_array(img)
x = img_datagen.random_transform(x)
x = img_datagen.standardize(x)
#x = np.expand_dims(x, axis=0)
return x
def my_img_generator(files,img_datagen,batch_size,bbox=False):
# index_array = np.random.permutation(len(files))
index = 0
count = 0
img_datas=[]
img_labels=[]
img_bboxes=[]
while 1:
# create numpy arrays of input data
# and labels, from each line in the file
if count < batch_size:
img_datas.append(my_load_img(files[index][1],img_datagen,IMSIZE))
# lable=[0.0,0.0]
# lable[files[index][1]]=1.0
img_labels.append(files[index][2])
if bbox:
img_bboxes.append(np.array(files[index][3]))
index=(index+1)%len(files)
count+=1
else:
count=0
#print(img_datas)
one_hot_labels=to_categorical(img_labels, num_classes=2)
if bbox:
#yield (np.array(img_datas),[np.array(one_hot_labels),np.array(img_bboxes)])
yield (np.array(img_datas),np.array(img_bboxes))
else:
yield (np.array(img_datas),np.array(one_hot_labels))
img_datas = []
img_labels = []
img_bboxes=[]
# random.shuffle(files)
batch_size=32
# t = next(my_img_generator(train_files,img_datagen,batch_size))
# model.load_weights('catdog_pretrain_nf.h5')
# train_data
# train_data.shape
my_train_generator = my_img_generator(train_files,img_datagen,batch_size,True)
my_val_generator = my_img_generator(val_files,img_datagen,batch_size,True)
#train_datagen = ImageDataGenerator(rescale=1./255)
# train_generator = train_datagen.flow_from_directory(
# train_dir, # this is the target directory
# target_size=IMSIZE, # all images will be resized to 299x299 Inception V3 input
# batch_size=batch_size,
# class_mode='categorical')
#test_datagen = ImageDataGenerator(rescale=1./255)
# test_generator = test_datagen.flow_from_directory(
# test_dir, # this is the target directory
# target_size=IMSIZE, # all images will be resized to 299x299 Inception V3 input
# batch_size=batch_size,
# class_mode='categorical')
#print(next(my_train_generator)[1])
# print(a[1].shape)
# print(a[1])
# -
model.load_weights('catdog_bbox_5.h5',by_name=True)
# +
# my_train_generator = my_img_generator(train_files,img_datagen,32)
# my_val_generator = my_img_generator(val_files,img_datagen,32)
# model.fit_generator(
# my_train_generator,
# samples_per_epoch=128,
# nb_epoch=10,
# validation_data=test_datagen,
# verbose=2,
# nb_val_samples=128)
model.fit_generator(
my_train_generator,
samples_per_epoch=128,
nb_epoch=200,
validation_data=my_val_generator,
verbose=2,
nb_val_samples=128)
model.save_weights('catdog_bbox_5.h5')
# +
# img_path = '../data/cat2.jpg'
test_img = train_files[0]
img_path = test_img[1]
# img = image.load_img(img_path, target_size=IMSIZE)
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = inception.preprocess_input(x)
# x = my_load_img(img_path,img_datagen,IMSIZE)
# x = np.expand_dims(x, axis=0)
# preds = model.predict(x)[0]
# print('Predicted:', preds)
# width=test_img[4][0]
# height= test_img[4][1]
# actual_preds=[preds[0]*width*0.01,preds[1]*height*0.01,preds[2]*width*0.01,preds[3]*height*0.01]
# print('Actual_preds',actual_preds)
def getPredForImg(img_path):
x = my_load_img(img_path,img_datagen,IMSIZE)
x = np.expand_dims(x, axis=0)
preds = model.predict(x)[0]
return preds
def testImage(img_path,preds):
size = Image.open(img_path).size
width=size[0]
height=size[1]
im = np.array(Image.open(img_path), dtype=np.uint8)
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
actual_preds=[preds[0]*width*0.01,preds[1]*height*0.01,preds[2]*width*0.01,preds[3]*height*0.01]
# Create a Rectangle patch
rect = patches.Rectangle((actual_preds[0],actual_preds[1]),actual_preds[2]-actual_preds[0],actual_preds[3]-actual_preds[1],linewidth=1,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
return
files = pc.listAllFiles("../data/images/")
for f in val_files[30:31]:
preds = getPredForImg(f[1])
print(preds)
testImage(f[1],preds)
preds = getPredForImg("../data/bill.jpg")
testImage("../data/bill.jpg",preds)
# -
Image.open(img_path).size
| code/notebook - bbox_2xDense.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture9
# ## Tensorboard <hr/>
# ## 5 steps of using TensorBoard
#
# ### 1. From TF graph decide which tensors you want to log
# ```python
# w2_hist = tf.summary.histogram('weights2', W2)
# cost_summ = tf.summary.scalar('cost', cost)
# ```
#
#
# ### 2. Merge all summaries
# ```python
# summary = tf.summary.merge_all()
# ```
#
#
# ### 3. Create writer and add graph
# ```python
# writer = tf.summary.FileWriter('./logs')
# writer.add_graph(sess.graph)
# ```
#
# ### 4. Run summary merge and add_summary
# ```python
# s, _ = sess.run([summary, optimizer], feed_dict = feed_dict)
# writer.add_summary(s, global_step = global_step)
# ```
#
# ### 5. Launch TensorBoard
# ```python
# tensorboard --logdir=./logs
# ```
# <hr/>
# ## Histogram (multi-dimensional tensors)
#
# ```python
# W2 = tf.Variable(tf.random_normal([2,1]))
# b2 = tf.Variable(tf.random_normal([1]))
# hypothesis = tf.sigmoid(tf.matmul(layer1,W2)+b2)
#
# w2_hist = tf.summary.histogram('weights2', W2)
# b2_hist = tf.summary.histogram('biases2', b2)
# hypothesis_hiost = tf.summary.histogram('hypothesis',hypothesis)
# ```
#
# <hr/>
# ## Add scope for better graph hierarchy
# ```python
# with tf.name_scope('layer1') as scope:
# W1 = tf.Variable(tf.random_normal([2,2]), name = 'weight1')
# b1 = tf.Variable(tf.random_normal([2]), name = 'bias1')
# layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)
#
# w1_hist = tf.summary.histogram('weights1', W1)
# b1_hist = tf.summary.histogram('biases1', b1)
# layer1_hist = tf.summary.histogram('layer1', layer1)
#
#
# with tf.name_scope('layer2') as scope:
# W2 = tf.Variable(tf.random_normal([2,1]), name = 'weight2')
# b2 = tf.Variable(tf.random_normal([1]), name = 'bias2')
# hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)
#
# w2_hist = tf.summary.histogram('weights2', W2)
# b2_hist = tf.summary.histogram('biases2', b2)
# hypothesis_hist = tf.summary.histogram('hypothesis', hypothesis)
# ```
#
#
# <hr/>
# ## Multiple runs
# tensorboard -logdir=./log/xor_logs
# ```python
# train = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)
# ...
# writer = tf.summary.FileWriter('.logs/xor_logs')
# ```
# tensorboard -logdir=./log/xor_logs_r0_01
# ```python
# train = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)
# ...
# writer = tf.summary.FileWriter('.logs/xor_logs_r0_01')
# ```
#
# tensorboard -logdir=./logs
| Lecture9 Tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.DataFrame({'key': ['<KEY>'], 'data':range(6)})
df
def filter_by_mean(x):
return x['data'].mean() > 3
df.groupby('key').mean()
df.groupby('key').filter(filter_by_mean)
df = pd.DataFrame({
'key': ['A', 'B', 'C', 'A', 'B', 'C'],
'data1': [1, 2, 3, 1, 2, 3],
'data2': [4, 4, 6, 0, 6, 1]
})
print("DataFrame:")
print(df, "\n")
#key 기준 groupby
print(df.groupby('key').aggregate({'data1': np.sum, 'data2': np.sum}))
# +
#key, data 기준 groupby
print(df)
print(df.groupby(['key', 'data1']).sum())
print(df.groupby(['key', 'data2']).sum())
# +
import numpy as np
import pandas as pd
df = pd.DataFrame({
'key': ['A', 'B', 'C', 'A', 'B', 'C'],
'data1': [0, 1, 2, 3, 4, 5],
'data2': [4, 4, 6, 0, 6, 1]
})
print("DataFrame:")
print(df, "\n")
# aggregate를 이용하여 요약 통계량을 산출해봅시다.
# 데이터 프레임을 'key' 칼럼으로 묶고, data1과 data2 각각의 최솟값, 중앙값, 최댓값을 출력하세요.
df.groupby('key').aggregate([min,np.median,max])
# 데이터 프레임을 'key' 칼럼으로 묶고, data1의 최솟값, data2의 합계를 출력하세요.
df.groupby('key').aggregate({'data1':np.min, 'data2':np.sum})
# -
df.groupby('key').aggregate([min,np.median,max])
df
df.groupby('key').aggregate(min)
df.groupby('key').aggregate([min,np.median,max])
# +
#head() 앞 5개만 보기
# +
#MultiIndex 인덱스 계층을 만드는 것
#a,b 인덱스와 각각 1,2인덱스를 만든다.
df = pd.DataFrame(
np.random.randn(4,2),
index = [['a','a','b','b'],['1','2','1','2']],
columns = ['data1', 'data2'])
df
# -
df = pd.DataFrame(
np.random.randn(4,4),
index = [['a','a','b','b'],['1','2','1','2']])
#columns = ['data1', 'data2','data3','data4'])
df
#columns를 지정해도 계층 지정 됨
df2 = pd.DataFrame(
np.random.randn(4,4),
columns = [['a','a','b','b'],['1','2','1','2']])
df2
#열로 뽑기
df2['a']
df2['b']['1']
#행으로 뽑기 df[열][열_1][인덱스]
df2['a']['1'][0]
#df[columns]: Series로 출력
df[0]
#df[column][index]로 하나씩 뽑을 수 있음
df[1][0]
# +
#pivot-table: 데이터에서 필요한 자료만 뽑아서 새롭게 요약,
#분석 할 수 있는 기능 엑셀에서의 피봇 테이블과 같다
#index: 행 인덱스로 들어갈 key
#column: 열 인덱스로 라벨링될 값
#value: 분석할 데이터
# -
df = pd.DataFrame(columns = ['월별','내역','지출(만 원)','수입(만 원)'])
df['월별'] = [201805,201805,201805,201806,201806,201806,
201807,201807,201807]
df['내역'] = ['관리비','교통비','월급','관리비','교통비','월급',
'관리비','교통비','월급']
df['지출(만 원)'] = [20, 10, 0, 30, 10, 0, 25, 15, 0]
df['수입(만 원)'] = [0,0,40,0,0,50,0,0,60]
df.pivot_table(index = '월별', columns = '내역',
values = ['지출(만 원)', '수입(만 원)'])
import matplotlib.pyplot as plt
x = [1,2,3,4,5]
y = [1,2,3,4,5]
plt.plot(x,y)
# +
#fig.ax = plt.subplots()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
plt.plot(x,y)
plt.title('my first plot')
plt.xlabel('x')
plt.ylabel('y')
# +
x = [1,2,3,4,5]
y = [1,2,3,4,5]
#이건 복사해놓고 x, y만 수정하면 됨
fig, ax = plt.subplots()
ax.plot(x,y)
ax.set_title('my first plot')
ax.set_xlabel('x')
ax.set_ylabel('y')
# -
x = np.linspace(0, np.pi*4, 100) #0부터 3.14*4까지 100구간으로 나눠라
fig, axes = plt.subplots(2,1) #2행 1열로 해라
axes[0].plot(x,np.sin(x)) #sin그래프가 axes의 [0]번째
axes[1].plot(x,np.cos(x)) #cos그래프가 axes의 [1]번째
# +
#linestyle, marker, color 활용
fig, ax = plt.subplots()
x = np.arange(15)
y = x**2
ax.plot(
x,y,
linestyle = ":",
marker = "*",
color = '#1f77b4'
)
# +
# Line style
x = np.arange(10)
fig, ax = plt.subplots() #이 줄은 외워라
ax.plot(x, x, linestyle="-") #linestyle의 여러 형태
# solid
ax.plot(x, x+2, linestyle="--")
# dashed
ax.plot(x, x+4, linestyle="-.")
# dashdot
ax.plot(x, x+6, linestyle=":")
# dotted
# +
# Marker
x = np.arange(10)
fig, ax = plt.subplots()
ax.plot(x, x, marker=".") #marker의 여러 형태
ax.plot(x, x+2, marker="o") # 큰 원
ax.plot(x, x+4, marker='v') # 아래 삼각형
ax.plot(x, x+6, marker="s") # 사각형(s: square)
ax.plot(x, x+8, marker="*")
# +
# 축 경계 조정
x = np.linspace(0, 10, 1000) #0부터 10까지 꼭짓점 1000개. 숫자 작으면 각짐
fig, ax = plt.subplots() #걍 외워
ax.plot(x, np.sin(x)) #sin(x)함수를 그린다.
ax.set_xlim(-2, 12) #lim은 limit의 약자, 경계 표현
ax.set_ylim(-1.5,1.5)
# +
# 범례
fig, ax = plt.subplots()
ax.plot(x, x, label='y=x') #범례 표시
ax.plot(x, x**2, label='y=x^2') #범례 표시
ax.set_xlabel("x")
ax.set_ylabel("y")
# 범례 데이터
ax.legend( #legend가 범례란 뜻
loc='upper left', # loc : 범례위치
shadow=True, # shadow = True 그림자 효과
fancybox=True, # fancybox = True 모서리를 둥글게 만든 박스
borderpad=2) # borderpad =2 흰색 크기 조정
# +
x = np.arange(10)
fig, ax = plt.subplots()
ax.plot(
x, x, label='y=x', #x, y 인데 y=x라 x, x라 씀
marker='o',
color='blue',
linestyle=':'
)
ax.plot(
x, x**2, label='y=x^2',
marker='^',
color='red',
linestyle='--'
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.legend(
loc='upper left',
shadow=True,
fancybox=True,
borderpad=2
)
fig.savefig("plot.png") #이 그림을 저장해라 plot.png라는 이름으로
# +
#삼전도 그리기
fig, ax = plt.subplots()
x = np.arange(10)
ax.plot(x, x**2, "o",
markersize = 15,
markerfacecolor = 'white',
markeredgecolor = 'blue')
# +
#scatter 함수
fig, ax = plt.subplots()
x = np.arange(10)
ax.scatter(
x, x**2, alpha=0.2) #alpha는 투명도
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = np.arange(10)
ax.plot(
x, x**2, "o",
markersize=15,
markerfacecolor='white',
markeredgecolor="blue"
)
fig.savefig("plot.png")
# -
fig, ax = plt.subplots()
x = np.random.randn(50)
y = np.random.randn(50)
colors = np.random.randint(0,100,50)
sizes = 500*np.pi * np.random.randint(50) ** 2
ax.plot(x, y)
# +
#bar
x = np.arange(10)
fig, ax = plt.subplots(figsize=(10,4)) #figsize는 그래프의 x축 길이, y축 길이
ax.bar(x, x**2)
# +
#histogram: data를 bins(구간)별로 나눠서 그래프를 그린다.
#범주형 데이터(구간이 딱딱 나눠진다.)인 bar 그래프와 다르게
#histogram은 분포도(연결돼있다.), 확률형이므로 그래프를 전부 더하면 1이다.
fig, ax = plt.subplots()
data = np.random.randn(1000)
ax.hist(data, bins=50) #bin=50: 50개로 구분해줘
# +
#틀릴 확률이 양쪽 2.5%씩 5%이다.(0.05) 한쪽 틀릴 확률 그래프<0.5이면 x축이 아니라 면적이 0.5보다 작다는 것.
#맞는 확률과 틀릴 확률 분포의 경계는 임계치
# +
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
x = np.array(["축구", "야구", "농구", "배드민턴", "탁구"])
y = np.array([18, 7, 12, 10, 8])
z = np.random.randn(1000)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# Bar 그래프
axes[0].bar(x, y)
# 히스토그램
axes[1].hist(z, bins = 50)
fig.savefig("plot.png")
# -
#bar 함수 아이스크림 그래프
x = np.random.rand(3)
y = np.random.rand(3)
z = np.random.rand(3)
data = [x, y, z]
fig, ax = plt.subplots()
x_ax = np.arange(3)
for i in x_ax:
ax.bar(x_ax, data[i],
bottom=np.sum(data[:i], axis=0))
ax.set_xticks(x_ax)
ax.set_xticklabels(["A", "B", "C"])
| python practice/pypthon practice 8-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
def nonlin(x, deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
x = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
y = np.array([[0],
[1],
[1],
[0]])
np.random.seed(1)
syn0 = 2*np.random.random((3,4)) - 1
syn1 = 2*np.random.random((4,1)) - 1
for j in xrange(60000):
k0 = x
k1 = nonlin(np.dot(k0,syn0))
k2 = nonlin(np.dot(k1,syn1))
k2_error = y - k2
if(j % 10000) == 0:
print("Error:"+str(np.mean(np.abs(k2_error))))
k2_delta = k2_error*nonlin(k2,deriv=True)
k1_error = k2_delta.dot(syn1.T)
k1_delta = k1_error*nonlin(k1,deriv=True)
syn1 += k1.T.dot(k2_delta)
syn0 += k0.T.dot(k1_delta)
# -
print x
print y
print syn0
print syn1
| how_to_do_math_for_deep_learning/demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Advantage Actor-Critic with TensorFlow 2.1
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Setup
# + pycharm={"is_executing": false}
import gym
import logging
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.keras.layers as kl
import tensorflow.keras.losses as kls
import tensorflow.keras.optimizers as ko
# %matplotlib inline
print("TensorFlow Ver: ", tf.__version__)
# + pycharm={"is_executing": false}
# Eager by default!
print("Eager Execution:", tf.executing_eagerly())
print("1 + 2 + 3 + 4 + 5 =", tf.reduce_sum([1, 2, 3, 4, 5]))
# -
# ## Policy & Value Model Class
# + pycharm={"is_executing": false}
class ProbabilityDistribution(tf.keras.Model):
def call(self, logits, **kwargs):
# Sample a random categorical action from the given logits.
return tf.squeeze(tf.random.categorical(logits, 1), axis=-1)
class Model(tf.keras.Model):
def __init__(self, num_actions):
super().__init__('mlp_policy')
# Note: no tf.get_variable(), just simple Keras API!
self.hidden1 = kl.Dense(128, activation='relu')
self.hidden2 = kl.Dense(128, activation='relu')
self.value = kl.Dense(1, name='value')
# Logits are unnormalized log probabilities.
self.logits = kl.Dense(num_actions, name='policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs, **kwargs):
# Inputs is a numpy array, convert to a tensor.
x = tf.convert_to_tensor(inputs)
# Separate hidden layers from the same input tensor.
hidden_logs = self.hidden1(x)
hidden_vals = self.hidden2(x)
return self.logits(hidden_logs), self.value(hidden_vals)
def action_value(self, obs):
# Executes `call()` under the hood.
logits, value = self.predict_on_batch(obs)
action = self.dist.predict_on_batch(logits)
# Another way to sample actions:
# action = tf.random.categorical(logits, 1)
# Will become clearer later why we don't use it.
return np.squeeze(action, axis=-1), np.squeeze(value, axis=-1)
# + pycharm={"name": "#%%\n", "is_executing": false}
# Verify everything works by sampling a single action.
env = gym.make('CartPole-v0')
model = Model(num_actions=env.action_space.n)
model.action_value(env.reset()[None, :])
# -
# ## Advantage Actor-Critic Agent Class
# + pycharm={"is_executing": false}
class A2CAgent:
def __init__(self, model, lr=7e-3, gamma=0.99, value_c=0.5, entropy_c=1e-4):
# `gamma` is the discount factor; coefficients are used for the loss terms.
self.gamma = gamma
self.value_c = value_c
self.entropy_c = entropy_c
self.model = model
self.model.compile(
optimizer=ko.RMSprop(lr=lr),
# Define separate losses for policy logits and value estimate.
loss=[self._logits_loss, self._value_loss])
def train(self, env, batch_sz=64, updates=250):
# Storage helpers for a single batch of data.
actions = np.empty((batch_sz,), dtype=np.int32)
rewards, dones, values = np.empty((3, batch_sz))
observations = np.empty((batch_sz,) + env.observation_space.shape)
# Training loop: collect samples, send to optimizer, repeat updates times.
ep_rewards = [0.0]
next_obs = env.reset()
for update in range(updates):
for step in range(batch_sz):
observations[step] = next_obs.copy()
actions[step], values[step] = self.model.action_value(next_obs[None, :])
next_obs, rewards[step], dones[step], _ = env.step(actions[step])
ep_rewards[-1] += rewards[step]
if dones[step]:
ep_rewards.append(0.0)
next_obs = env.reset()
logging.info("Episode: %03d, Reward: %03d" % (len(ep_rewards) - 1, ep_rewards[-2]))
_, next_value = self.model.action_value(next_obs[None, :])
returns, advs = self._returns_advantages(rewards, dones, values, next_value)
# A trick to input actions and advantages through same API.
acts_and_advs = np.concatenate([actions[:, None], advs[:, None]], axis=-1)
# Performs a full training step on the collected batch.
# Note: no need to mess around with gradients, Keras API handles it.
losses = self.model.train_on_batch(observations, [acts_and_advs, returns])
logging.debug("[%d/%d] Losses: %s" % (update + 1, updates, losses))
return ep_rewards
def test(self, env, render=False):
obs, done, ep_reward = env.reset(), False, 0
while not done:
action, _ = self.model.action_value(obs[None, :])
obs, reward, done, _ = env.step(action)
ep_reward += reward
if render:
env.render()
return ep_reward
def _returns_advantages(self, rewards, dones, values, next_value):
# `next_value` is the bootstrap value estimate of the future state (critic).
returns = np.append(np.zeros_like(rewards), next_value, axis=-1)
# Returns are calculated as discounted sum of future rewards.
for t in reversed(range(rewards.shape[0])):
returns[t] = rewards[t] + self.gamma * returns[t + 1] * (1 - dones[t])
returns = returns[:-1]
# Advantages are equal to returns - baseline (value estimates in our case).
advantages = returns - values
return returns, advantages
def _value_loss(self, returns, value):
# Value loss is typically MSE between value estimates and returns.
return self.value_c * kls.mean_squared_error(returns, value)
def _logits_loss(self, actions_and_advantages, logits):
# A trick to input actions and advantages through the same API.
actions, advantages = tf.split(actions_and_advantages, 2, axis=-1)
# Sparse categorical CE loss obj that supports sample_weight arg on `call()`.
# `from_logits` argument ensures transformation into normalized probabilities.
weighted_sparse_ce = kls.SparseCategoricalCrossentropy(from_logits=True)
# Policy loss is defined by policy gradients, weighted by advantages.
# Note: we only calculate the loss on the actions we've actually taken.
actions = tf.cast(actions, tf.int32)
policy_loss = weighted_sparse_ce(actions, logits, sample_weight=advantages)
# Entropy loss can be calculated as cross-entropy over itself.
probs = tf.nn.softmax(logits)
entropy_loss = kls.categorical_crossentropy(probs, probs)
# We want to minimize policy and maximize entropy losses.
# Here signs are flipped because the optimizer minimizes.
return policy_loss - self.entropy_c * entropy_loss
# + pycharm={"name": "#%%\n", "is_executing": false}
# Verify everything works with random weights.
agent = A2CAgent(model)
rewards_sum = agent.test(env)
print("Total Episode Reward: %d out of 200" % agent.test(env))
# -
# ## Training A2C Agent & Results
# + pycharm={"is_executing": false}
# set to logging.WARNING to disable logs or logging.DEBUG to see losses as well
logging.getLogger().setLevel(logging.INFO)
model = Model(num_actions=env.action_space.n)
agent = A2CAgent(model)
rewards_history = agent.train(env)
print("Finished training! Testing...")
print("Total Episode Reward: %d out of 200" % agent.test(env))
plt.style.use('seaborn')
plt.plot(np.arange(0, len(rewards_history), 5), rewards_history[::5])
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.show()
# -
# ## Static Computational Graph
# + pycharm={"is_executing": false}
with tf.Graph().as_default():
print("Eager Execution:", tf.executing_eagerly()) # False
model = Model(num_actions=env.action_space.n)
agent = A2CAgent(model)
rewards_history = agent.train(env)
print("Finished training! Testing...")
print("Total Episode Reward: %d out of 200" % agent.test(env))
# -
# ## Benchmarks
#
# Note: wall time doesn't show the whole picture, it's better to compare CPU time.
# + pycharm={"is_executing": false}
# Generate 100k observations to run benchmarks on.
env = gym.make('CartPole-v0')
obs = np.repeat(env.reset()[None, :], 100000, axis=0)
# -
# ### Eager Benchmark
# + pycharm={"is_executing": false}
# %%time
model = Model(env.action_space.n)
model.run_eagerly = True
print("Eager Execution: ", tf.executing_eagerly())
print("Eager Keras Model:", model.run_eagerly)
_ = model.predict_on_batch(obs)
# -
# ### Static Benchmark
# + pycharm={"is_executing": false}
# %%time
with tf.Graph().as_default():
model = Model(env.action_space.n)
print("Eager Execution: ", tf.executing_eagerly())
print("Eager Keras Model:", model.run_eagerly)
_ = model.predict_on_batch(obs)
# -
# ### Default Benchmark
# + pycharm={"is_executing": false}
# %%time
model = Model(env.action_space.n)
print("Eager Execution: ", tf.executing_eagerly())
print("Eager Keras Model:", model.run_eagerly)
_ = model.predict_on_batch(obs)
| actor-critic-agent-with-tensorflow2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''deepgrids-env'': conda)'
# metadata:
# interpreter:
# hash: 2800adbcc015a26669636914ab08461ab6a00fd7d4babb9145b4932d63e56c5c
# name: python3
# ---
# # Vaia
# The storm named "Vaia" hit Northern Italy during the night of October 28<sup>th</sup>, 2018. <br>
# The Sisef (Italian Society of Forestry and Forest Ecology) has defined it as "an unprecedented phenomenon in the last 50 years at least".
#
# This is an example on how to use [Google Earth Engine](https://earthengine.google.com) via [Python](https://gee-python-api.readthedocs.io/en/latest/index.html) to visualize the impact of the storm.
# False color images (Sentinel 2) before and after the event are presented.
#
# **Contact**: <NAME> ([Twitter](https://twitter.com/giuliogenova), [GitHub](https://github.com/GiulioGenova), [Linkedin](https://www.linkedin.com/in/giulio-genova-a54b30a4/))
import ee
import geemap
from geemap import ee_initialize
ee_initialize()
# +
roi= ee.Geometry.Polygon(
[[[11.314077113461716, 46.52210953571128],
[11.314077113461716, 46.2744492073579],
[11.894292567563278, 46.2744492073579],
[11.894292567563278, 46.52210953571128]]])
before = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2017-03-01', '2017-09-30') \
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)) \
.filterBounds(roi) \
.median()
after = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2019-03-01', '2019-09-30') \
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)) \
.filterBounds(roi) \
.median()
# +
vizParams = {'bands': ['B8', 'B4', 'B3'], 'gain': 0.05,'gamma': 2}
left_layer = geemap.ee_tile_layer(before, vizParams, 'Before Vaia (2017)')
right_layer = geemap.ee_tile_layer(after, vizParams, 'After Vaia (2019)')
# -
Map = geemap.Map(center=(46.4300,11.5790),zoom = 14)
Map.split_map(left_layer, right_layer)
Map
| notebooks/vaia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
from six.moves import cPickle as pickle
from IPython import display
import matplotlib.pyplot as plt
% matplotlib inline
% config InlineBackend.figure_format = 'retina'
# +
epochs = range(10, 501, 10)
acc_trend_GAN_Mode3 = pickle.load(open("GAN-Mode3/GAN-Mode3_acc.pickle", "rb"))
acc_trend_reg_Mode2 = pickle.load(open("reg-Mode2/regular-Mode2_acc.pickle", "rb"))
acc_trend_reg_Mode1n = pickle.load(open("reg-Mode1n/regular-Mode1n_acc.pickle", "rb"))
# -
plt.figure(figsize=(10,8))
plt.plot(epochs, acc_trend_GAN_Mode3, label="GAN-Mode3")
plt.plot(epochs, acc_trend_reg_Mode2, label="Regular Mode2")
plt.plot(epochs, acc_trend_reg_Mode1n, label="Regular Mode1n")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('Epochs')
plt.ylabel('Test Accuracy')
plt.show()
| model_accuracy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
from __future__ import print_function, absolute_import, division
import tensorflow as tf
print(tf.__version__)
tf.test.is_gpu_available()
import train.qvaluetrainer as qvt
# latest_model=!gsutil ls gs://gomoku-data/model5/export/exporter | grep -v temp | sort | tail -1
latest_model = latest_model[0]
latest_model
estimator = tf.contrib.predictor.from_saved_model(latest_model)
from wgomoku import (
GomokuBoard, HeuristicGomokuPolicy, Heuristics, GomokuTools as gt, data_from_game)
stones=gt.string_to_stones(
'e10g8g5f5f6e7f7f8e8g9h10d9g10f10h8h9i9g7e9j8h11i12e11e12g11f11f12e13g13h14i10g14j9k8i8k4i7')
heuristics = Heuristics(kappa=3.0)
board = GomokuBoard(heuristics=heuristics, N=20, stones=stones, disp_width=10)
policy = HeuristicGomokuPolicy(style=2, topn=5, bias=.5)
board.undo(False).undo(False).undo(False).undo(False).undo()
board.display('current')
from wgomoku import create_sample, to_matrix12
s = create_sample(board.stones, 20, 0)
s.shape
to_matrix12(s)
import numpy as np
sample={'state': np.transpose([s.flatten()])}
q = estimator(sample)
q = q['output'][0]
q = np.rollaxis(q, 2, 0)
unwrapped = q.astype(int)[0][1:-1].T[1:-1].T
unwrapped
from QFunction import heuristic_QF, value_after, least_significant_move
q = heuristic_QF(board, policy)
q[0].astype(int)
board.get_value()
value_after(board, least_significant_move(board), policy)
27**3
| RL_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Purpose
#
# Get D3 10 and 20 element colors (hexadecimal) and use with matplotlib plots.
#
# # Retrieve D3 colors
#
# Use jsfiddle and enter the following code in the HTML and Javascript panels:
#
# __HTML__
# ```html
# <script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.min.js"></script>
#
# <h3>
# D3 category10 colors
# </h3>
# <div id="cat10"></div>
#
# <h3>
# D3 category20 colors
# </h3>
# <div id="cat20"></div>
#
# <h3>
# D3 category20b colors
# </h3>
# <div id="cat20b"></div>
#
# <h3>
# D3 category20c colors
# </h3>
# <div id="cat20c"></div>
# ```
#
# __Javascript__
# ```javascript
# var color10 = d3.scale.category10();
# var r10 = color10.range();
# document.getElementById("cat10").innerHTML = r10;
#
# var color20 = d3.scale.category20();
# var r20 = color20.range();
# document.getElementById("cat20").innerHTML = r20;
#
# var color20b = d3.scale.category20b();
# var r20b = color20b.range();
# document.getElementById("cat20b").innerHTML = r20b;
#
# var color20c = d3.scale.category20c();
# var r20c = color20c.range();
# document.getElementById("cat20c").innerHTML = r20c;
# ```
#
# Hitting the `Update` and `Run` buttons in the upper left part of the window generates the following output:
# ```
# D3 category10 colors
# #1f77b4,#ff7f0e,#2ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf
#
# D3 category20 colors
# #1f77b4,#aec7e8,#ff7f0e,#ffbb78,#2ca02c,#98df8a,#d62728,#ff9896,#9467bd,#c5b0d5,#8c564b,#c49c94,#e377c2,#f7b6d2,#7f7f7f,#c7c7c7,#bcbd22,#dbdb8d,#17becf,#9edae5
#
# D3 category20b colors
# #393b79,#5254a3,#6b6ecf,#9c9ede,#637939,#8ca252,#b5cf6b,#cedb9c,#8c6d31,#bd9e39,#e7ba52,#e7cb94,#843c39,#ad494a,#d6616b,#e7969c,#7b4173,#a55194,#ce6dbd,#de9ed6
#
# D3 category20c colors
# #3182bd,#6baed6,#9ecae1,#c6dbef,#e6550d,#fd8d3c,#fdae6b,#fdd0a2,#31a354,#74c476,#a1d99b,#c7e9c0,#756bb1,#9e9ac8,#bcbddc,#dadaeb,#636363,#969696,#bdbdbd,#d9d9d9
# ```
#
# # Convert hexadecimal colors to python lists
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
category10_colors = '#1f77b4,#ff7f0e,#2ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf'
category10_colors = category10_colors.split(',')
category10_colors
# +
category20_colors = '#1f77b4,#aec7e8,#ff7f0e,#ffbb78,#2ca02c,#98df8a,#d62728,#ff9896,#9467bd,#c5b0d5,#8c564b,#c49c94,#e377c2,#f7b6d2,#7f7f7f,#c7c7c7,#bcbd22,#dbdb8d,#17becf,#9edae5'
category20_colors = category20_colors.split(',')
category20b_colors = '#393b79,#5254a3,#6b6ecf,#9c9ede,#637939,#8ca252,#b5cf6b,#cedb9c,#8c6d31,#bd9e39,#e7ba52,#e7cb94,#843c39,#ad494a,#d6616b,#e7969c,#7b4173,#a55194,#ce6dbd,#de9ed6'
category20b_colors = category20b_colors.split(',')
category20c_colors = '#3182bd,#6baed6,#9ecae1,#c6dbef,#e6550d,#fd8d3c,#fdae6b,#fdd0a2,#31a354,#74c476,#a1d99b,#c7e9c0,#756bb1,#9e9ac8,#bcbddc,#dadaeb,#636363,#969696,#bdbdbd,#d9d9d9'
category20c_colors = category20c_colors.split(',')
# -
# # Create test plots to show colors
xvals = np.array([0, 1, 2, 3, 4])
y1 = np.array([2, 3, 4, 5, 5])
delta_y = 0.5
# ## category10 colors
fig, ax = plt.subplots()
for i, color in enumerate(category10_colors):
ax.plot(xvals, y1 + i*delta_y, color=color)
ax.set_title('Category 10 Colors');
# ## category20 colors
fig, axs = plt.subplots(1, 3, figsize=(18,4))
for ax, colors in zip(axs, [category20_colors, category20b_colors, category20c_colors]):
for i, color in enumerate(colors):
ax.plot(xvals, y1 + i*delta_y, color=color)
axs[0].set_title('Category 20 Colors')
axs[1].set_title('Category 20b Colors')
axs[2].set_title('Category 20c Colors');
| python/170415_d3_category20_colors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
#importing libraries
from sqlalchemy import create_engine
import pandas as pd
# creating connection string
engine = create_engine('postgresql+psycopg2://postgres:<EMAIL>.us-east-2.rds.amazonaws.com:5432/postgres')
#creating a dataframe and loading data from AWS RDS based on RDS view index_4_Actors
actor_Index_DF = pd.read_sql_table('index_4_Actors', con=engine)
actor_Index_DF.head()
#read table /display content
movie_Index_DF = pd.read_sql_table('index_4_movies', con=engine)
movie_Index_DF.head()
#read table /display content
actors_df = pd.read_sql_table('Names', engine)
actors_df.head()
#filtering
actors_df[['imdb_name_id','name']] [actors_df['name'].str.startswith('F')]
| Jupyter_Notebook_Files/indexList.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic Regression
# You should build a machine learning pipeline using a logistic regression model. In particular, you should do the following:
# - Load the `mnist` dataset using [Pandas](https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html). You can find this dataset in the datasets folder.
# - Split the dataset into training and test sets using [Scikit-Learn](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html).
# - Train and test a logistic regression model using [Scikit-Learn](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
# - Check the documentation to identify the most important hyperparameters, attributes, and methods of the model. Use them in practice.
| notebooks/machine-learning-1/logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import numpy as np
import ectopylasm as ep
import ipyvolume as ipv
# %load_ext line_profiler
# # Mock data
# Let's do only 100 points here.
xyz = np.array((np.random.random(100), np.random.random(100), np.random.random(100)))
# # Define shape
# +
thickness = 0.2
# plane
point = (0.5, 0.5, 0.5)
normal = (0, 1, 0) # make it normalized to one
# -
# # Filter points
import sympy as sy
import tqdm
def filter_points_plane_slow(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
"""
Select the points that are within the thick plane.
points_xyz: a vector of shape (3, N) representing N points in 3D space
plane_point: a point in the plane
plane_normal: the normal vector to the plane (x, y, z; any iterable)
plane_thickness: the thickness of the plane (the distance between the two
composing planes)
d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
specified, `plane_point` will be ignored
"""
if d is not None:
plane_point = ep.plane_point_from_d(plane_normal, d)
point1, point2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
plane1 = sy.geometry.Plane(sy.geometry.Point3D(point1), normal_vector=plane_normal)
plane2 = sy.geometry.Plane(sy.geometry.Point3D(point2), normal_vector=plane_normal)
p_filtered = []
for p_i in tqdm.tqdm(points_xyz.T):
sy_point_i = sy.geometry.Point3D(tuple(p_i))
if plane1.distance(sy_point_i) <= plane_thickness and plane2.distance(sy_point_i) <= plane_thickness:
p_filtered.append(p_i)
return p_filtered
plane_points = ep.filter_points_plane(xyz, point, normal, thickness)
# %lprun -f filter_points_plane_slow plane_points = filter_points_plane_slow(xyz, point, normal, thickness)
# This gives the following output:
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 28.1696 s
# File: <ipython-input-13-5c9e992f6bd9>
# Function: filter_points_plane_slow at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def filter_points_plane_slow(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
# 2 """
# 3 Select the points that are within the thick plane.
# 4
# 5 points_xyz: a vector of shape (3, N) representing N points in 3D space
# 6 plane_point: a point in the plane
# 7 plane_normal: the normal vector to the plane (x, y, z; any iterable)
# 8 plane_thickness: the thickness of the plane (the distance between the two
# 9 composing planes)
# 10 d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
# 11 specified, `plane_point` will be ignored
# 12 """
# 13 1 2.0 2.0 0.0 if d is not None:
# 14 plane_point = ep.plane_point_from_d(plane_normal, d)
# 15 1 16.0 16.0 0.0 point1, point2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
# 16 1 17209.0 17209.0 0.1 plane1 = sy.geometry.Plane(sy.geometry.Point3D(point1), normal_vector=plane_normal)
# 17 1 8052.0 8052.0 0.0 plane2 = sy.geometry.Plane(sy.geometry.Point3D(point2), normal_vector=plane_normal)
# 18
# 19 1 1.0 1.0 0.0 p_filtered = []
# 20 101 91274.0 903.7 0.3 for p_i in tqdm.tqdm(points_xyz.T):
# 21 100 26006837.0 260068.4 92.3 sy_point_i = sy.geometry.Point3D(tuple(p_i))
# 22 100 2046189.0 20461.9 7.3 if plane1.distance(sy_point_i) <= plane_thickness and plane2.distance(sy_point_i) <= plane_thickness:
# 23 17 38.0 2.2 0.0 p_filtered.append(p_i)
# 24 1 2.0 2.0 0.0 return p_filtered
# ```
#
# Really surprising result! I would have thought the distance calculation would be the slowest, but in fact the Point3D construction is **ridiculously** slow! So we definitely need to get rid of this whole `sympy.geometry` thing.
def plane_d(point, normal):
"""
Calculate d factor in plane equation ax + by + cz + d = 0
"""
return -(point[0] * normal[0] + point[1] * normal[1] + point[2] * normal[2])
def point_distance_to_plane(point, plane_point, plane_normal, d=None):
"""
Get signed distance of point to plane.
The sign of the resulting distance tells you whether the point is in
the same or the opposite direction of the plane normal vector.
point: an iterable of length 3 representing a point in 3D space
plane_point: a point in the plane
plane_normal: the normal vector to the plane (x, y, z; any iterable)
d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
specified, `plane_point` will be ignored
"""
if d is None:
d = plane_d(plane_point, plane_normal)
a, b, c = plane_normal
# from http://mathworld.wolfram.com/Point-PlaneDistance.html
return (a * point[0] + b * point[1] + c * point[2] + d) / np.sqrt(a**2 + b**2 + c**2)
def filter_points_plane_numpy(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
"""
Select the points that are within the thick plane.
points_xyz: a vector of shape (3, N) representing N points in 3D space
plane_point: a point in the plane
plane_normal: the normal vector to the plane (x, y, z; any iterable)
plane_thickness: the thickness of the plane (the distance between the two
composing planes)
d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
specified, `plane_point` will be ignored
"""
if d is not None:
plane_point = ep.plane_point_from_d(plane_normal, d)
point1, point2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
p_filtered = []
for p_i in points_xyz.T:
distance_1 = abs(point_distance_to_plane(p_i, point1, plane_normal))
distance_2 = abs(point_distance_to_plane(p_i, point2, plane_normal))
if distance_1 <= plane_thickness and distance_2 <= plane_thickness:
p_filtered.append(p_i)
return p_filtered
# %timeit filter_points_plane_numpy(xyz, point, normal, thickness)
# %lprun -f filter_points_plane_numpy plane_points = filter_points_plane_numpy(xyz, point, normal, thickness)
# This runs significantly faster. Interestingly, in a first iteration I still had tqdm in on the for loop, and that was then the dominant factor with 70% of runtime! Removing it shifted dominance to the distance functions, as we would expect:
#
# ```
# Timer unit: 1e-06 s
#
#
# Total time: 0.001685 s
# File: <ipython-input-68-14cb67a3434b>
# Function: filter_points_plane_numpy at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def filter_points_plane_numpy(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
# 2 """
# 3 Select the points that are within the thick plane.
# 4
# 5 points_xyz: a vector of shape (3, N) representing N points in 3D space
# 6 plane_point: a point in the plane
# 7 plane_normal: the normal vector to the plane (x, y, z; any iterable)
# 8 plane_thickness: the thickness of the plane (the distance between the two
# 9 composing planes)
# 10 d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
# 11 specified, `plane_point` will be ignored
# 12 """
# 13 1 1.0 1.0 0.1 if d is not None:
# 14 plane_point = ep.plane_point_from_d(plane_normal, d)
# 15 1 11.0 11.0 0.7 point1, point2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
# 16
# 17 1 0.0 0.0 0.0 p_filtered = []
# 18 101 96.0 1.0 5.7 for p_i in points_xyz.T:
# 19 100 759.0 7.6 45.0 distance_1 = abs(point_distance_to_plane(p_i, point1, plane_normal))
# 20 100 727.0 7.3 43.1 distance_2 = abs(point_distance_to_plane(p_i, point2, plane_normal))
# 21 100 77.0 0.8 4.6 if distance_1 <= plane_thickness and distance_2 <= plane_thickness:
# 22 17 14.0 0.8 0.8 p_filtered.append(p_i)
# 23 1 0.0 0.0 0.0 return p_filtered
#
# ```
#
# So this is an increase of a factor more than ~10000 in speed! Note that this is still with profiling on, and line_profiler seems to add an overhead of a factor ~4.
28.1696 / 0.00203
# %lprun -f point_distance_to_plane plane_points = filter_points_plane_numpy(xyz, point, normal, thickness)
# With this, we see that precalculating `d` can actually give an additional ~15% boost.
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 0.004167 s
# File: <ipython-input-34-3113593bd746>
# Function: point_distance_to_plane at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def point_distance_to_plane(point, plane_point, plane_normal, d=None):
# 2 """
# 3 Get signed distance of point to plane.
# 4
# 5 The sign of the resulting distance tells you whether the point is in
# 6 the same or the opposite direction of the plane normal vector.
# 7
# 8 point: an iterable of length 3 representing a point in 3D space
# 9 plane_point: a point in the plane
# 10 plane_normal: the normal vector to the plane (x, y, z; any iterable)
# 11 d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
# 12 specified, `plane_point` will be ignored
# 13 """
# 14 200 227.0 1.1 5.4 if d is None:
# 15 200 726.0 3.6 17.4 d = plane_d(plane_point, plane_normal)
# 16
# 17 200 168.0 0.8 4.0 a, b, c = plane_normal
# 18 # from http://mathworld.wolfram.com/Point-PlaneDistance.html
# 19 200 3046.0 15.2 73.1 return (a * point[0] + b * point[1] + c * point[2] + d) / np.sqrt(a**2 + b**2 + c**2)```
# +
# def point_distance_to_plane(point, plane_point, plane_normal, d=None):
# """
# Get signed distance of point to plane.
# The sign of the resulting distance tells you whether the point is in
# the same or the opposite direction of the plane normal vector.
# point: an iterable of length 3 representing a point in 3D space
# plane_point: a point in the plane
# plane_normal: the normal vector to the plane (x, y, z; any iterable)
# d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
# specified, `plane_point` will be ignored
# """
# if d is None:
# d = plane_d(plane_point, plane_normal)
# a, b, c = plane_normal
# plane_normal = np.array(plane_normal)
# # from http://mathworld.wolfram.com/Point-PlaneDistance.html
# return (np.sum(plane_normal * np.array(point)) + d) / np.sqrt(np.sum(plane_normal * plane_normal))
# +
# # %lprun -f point_distance_to_plane plane_points = filter_points_plane_numpy(xyz, point, normal, thickness)
# -
# That increases runtime, so let's not.
#
# One last try, precalculating d:
def filter_points_plane_numpy(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
"""
Select the points that are within the thick plane.
points_xyz: a vector of shape (3, N) representing N points in 3D space
plane_point: a point in the plane
plane_normal: the normal vector to the plane (x, y, z; any iterable)
plane_thickness: the thickness of the plane (the distance between the two
composing planes)
d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
specified, `plane_point` will be ignored
"""
if d is not None:
plane_point = ep.plane_point_from_d(plane_normal, d)
plane_point_1, plane_point_2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
d1 = plane_d(plane_point_1, plane_normal)
d2 = plane_d(plane_point_2, plane_normal)
p_filtered = []
for p_i in points_xyz.T:
distance_1 = point_distance_to_plane(p_i, None, plane_normal, d=d1)
distance_2 = point_distance_to_plane(p_i, None, plane_normal, d=d2)
if abs(distance_1) <= plane_thickness and abs(distance_2) <= plane_thickness:
p_filtered.append(p_i)
return p_filtered
# %timeit filter_points_plane_numpy(xyz, point, normal, thickness)
# %lprun -f filter_points_plane_numpy plane_points = filter_points_plane_numpy(xyz, point, normal, thickness)
872/746
# Again a small gain. In the profiling runs there's too much noise to measure the exact gain, but the timeit run shows at least a factor 1.15 faster runs.
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 0.001374 s
# File: <ipython-input-72-00d6dddaec0d>
# Function: filter_points_plane_numpy at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def filter_points_plane_numpy(points_xyz, plane_point, plane_normal, plane_thickness, d=None):
# 2 """
# 3 Select the points that are within the thick plane.
# 4
# 5 points_xyz: a vector of shape (3, N) representing N points in 3D space
# 6 plane_point: a point in the plane
# 7 plane_normal: the normal vector to the plane (x, y, z; any iterable)
# 8 plane_thickness: the thickness of the plane (the distance between the two
# 9 composing planes)
# 10 d [optional]: the constant in the plane equation ax + by + cz + d = 0; if
# 11 specified, `plane_point` will be ignored
# 12 """
# 13 1 2.0 2.0 0.1 if d is not None:
# 14 plane_point = ep.plane_point_from_d(plane_normal, d)
# 15 1 12.0 12.0 0.9 plane_point_1, plane_point_2 = ep.thick_plane_points(plane_point, plane_normal, plane_thickness)
# 16 1 3.0 3.0 0.2 d1 = plane_d(plane_point_1, plane_normal)
# 17 1 1.0 1.0 0.1 d2 = plane_d(plane_point_2, plane_normal)
# 18
# 19 1 0.0 0.0 0.0 p_filtered = []
# 20 101 92.0 0.9 6.7 for p_i in points_xyz.T:
# 21 100 595.0 6.0 43.3 distance_1 = point_distance_to_plane(p_i, None, plane_normal, d=d1)
# 22 100 566.0 5.7 41.2 distance_2 = point_distance_to_plane(p_i, None, plane_normal, d=d2)
# 23 100 87.0 0.9 6.3 if abs(distance_1) <= plane_thickness and abs(distance_2) <= plane_thickness:
# 24 17 16.0 0.9 1.2 p_filtered.append(p_i)
# 25 1 0.0 0.0 0.0 return p_filtered
# ```
# That'll do for now. So in total, we went from:
# %timeit filter_points_plane_slow(xyz, point, normal, thickness)
# ... ~7 seconds to ~700 microseconds, i.e. a speed-up factor of 10000. Decent.
# # Test
#
# Are the results the same though?
p_slow = filter_points_plane_slow(xyz, point, normal, thickness)
p_numpy = filter_points_plane_numpy(xyz, point, normal, thickness)
np.array(p_slow) == np.array(p_numpy)
# Yessur!
| req2.4_filter_planes_faster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
from IPython.display import Image
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import numpy as np
import matplotlib.pyplot as plt
from skgaip.fusion.fusion_data import FusionData
# -
# # Load and process data
data = FusionData("data/fusion/FRNN_1d_sample/shot_data.npz",
"data/fusion/FRNN_1d_sample/test_list.npy",
model_path="data/fusion/FRNN_1d_sample/FRNN_1D_sample.h5",
normalize=False,
headless=False)
# # Collect scores and disruptivity
next(data.featurize())
scores = {key: X[-1].item() for X, Y, key in data.featurize()}
scores = np.asarray([scores[key] for key in data.keys]).squeeze()
disruptive = np.asarray([data.disruptive[key] for key in data.keys])
# # Compute TPR and FPR
tp = np.sum(np.array([np.logical_and(scores > i, disruptive) for i in np.linspace(-1, 1, 2001)]), axis=1)
fp = np.sum(np.array([np.logical_and(scores > i, ~disruptive) for i in np.linspace(-1, 1, 2001)]), axis=1)
tpr = tp / np.sum(disruptive)
fpr = fp / np.sum(~disruptive)
plt.plot(fpr, tpr)
# # Compute AUROC
np.dot(fpr[:-1] - fpr[1:], (tpr[:-1] + tpr[:1]) / 2)
| notebooks/2020-07-15 Replicating FRNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Numpy Tutorial
# [Source](https://www.datacamp.com/community/tutorials/python-numpy-tutorial)
#
# A NumPy tutorial for beginners in which you'll learn how to create a NumPy array, use broadcasting, access values, manipulate arrays, and much more.
#
# NumPy is, just like SciPy, Scikit-Learn, Pandas, etc. one of the packages that you just can’t miss when you’re learning data science, mainly because this library provides you with an array data structure that holds some benefits over Python lists, such as: being more compact, faster access in reading and writing items, being more convenient and more efficient.
#
# Today we'll focus precisely on this. This NumPy tutorial will not only show you what NumPy arrays actually are and how you can install Python, but you’ll also learn how to make arrays (even when your data comes from files!), how broadcasting works, how you can ask for help, how to manipulate your arrays and how to visualize them.
#
# ## Content
# * What Is A Python Numpy Array?
# * How To Make NumPy Arrays
# * How NumPy Broadcasting Works
# * How Do Array Mathematics Work?
# * How To Subset, Slice, And Index Arrays
# * How To Manipulate Arrays
# * How To Visualize NumPy Arrays
# * Beyond Data Analysis with NumPy
# ## What Is A Python Numpy Array?
#
# You already read in the introduction that NumPy arrays are a bit like Python lists, but still very much different at the same time. For those of you who are new to the topic, let’s clarify what it exactly is and what it’s good for.
#
# As the name gives away, a NumPy array is a central data structure of the ```numpy``` library. The library’s name is short for “Numeric Python” or “Numerical Python”.
#
# This already gives an idea of what you’re dealing with, right?
#
# In other words, NumPy is a Python library that is the core library for scientific computing in Python. It contains a collection of tools and techniques that can be used to solve on a computer mathematical models of problems in Science and Engineering. One of these tools is a high-performance multidimensional array object that is a powerful data structure for efficient computation of arrays and matrices. To work with these arrays, there’s a vast amount of high-level mathematical functions operate on these matrices and arrays.
#
# Then, what is an array?
#
# When you look at the print of a couple of arrays, you could see it as a grid that contains values of the same type:
# import the library
import numpy as np
# create a 1-dimensional array
my_array = np.array([1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12])
print(type(my_array), my_array)
# create a 2-dimensional array
my_2d_array = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(my_2d_array)
# create a 3-dimensional array
my_3d_array = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]])
print(my_3d_array)
# You see that, in the example above, the data are integers. The array holds and represents any regular data in a structured way.
#
# However, you should know that, on a structural level, an array is basically nothing but pointers. It’s a combination of a memory address, a data type, a shape, and strides:
#
# * The ```data``` pointer indicates the memory address of the first byte in the array,
# * The data type or ```dtype``` pointer describes the kind of elements that are contained within the array,
# * The ```shape``` indicates the shape of the array, and
# * The ```strides``` are the number of bytes that should be skipped in memory to go to the next element. If your strides are (10,1), you need to proceed one byte to get to the next column and 10 bytes to locate the next row.
#
# Or, in other words, an array contains information about the raw data, how to locate an element and how to interpret an element.
#
# You can easily test this by exploring the numpy array attributes:
# +
# Print out memory address
print('Memory Address', my_2d_array.data)
# Print out the shape of `my_array`
print('Shape', my_2d_array.shape)
# Print out the data type of `my_array`
print('Data Type', my_2d_array.dtype)
# Print out the stride of `my_array`
print('Strides', my_2d_array.strides)
# -
# You see that now, you get a lot more information: for example, the data type that is printed out is ‘int64’ or signed 32-bit integer type; This is a lot more detailed! That also means that the array is stored in memory as 64 bytes (as each integer takes up 8 bytes and you have an array of 8 integers). The strides of the array tell us that you have to skip 8 bytes (one value) to move to the next column, but 32 bytes (4 values) to get to the same position in the next row. As such, the strides for the array will be (32,8).
#
# Note that if you set the data type to ```int32```, the strides tuple that you get back will be ```(16, 4)```, as you will still need to move one value to the next column and 4 values to get the same position. The only thing that will have changed is the fact that each integer will take up 4 bytes instead of 8.
#
# <img src='http://community.datacamp.com.s3.amazonaws.com/community/production/ckeditor_assets/pictures/332/content_arrays-axes.png' />
#
# The array that you see above is, as its name already suggested, a 2-dimensional array: you have rows and columns. The rows are indicated as the “axis 0”, while the columns are the “axis 1”. The number of the axis goes up accordingly with the number of the dimensions: in 3-D arrays, of which you have also seen an example in the previous code chunk, you’ll have an additional “axis 2”. Note that these axes are only valid for arrays that have at least 2 dimensions, as there is no point in having this for 1-D arrays;
#
# These axes will come in handy later when you’re manipulating the shape of your NumPy arrays.
# ## How To Make NumPy Arrays
#
# To make a numpy array, you can just use the ```np.array()``` function. All you need to do is pass a list to it, and optionally, you can also specify the data type of the data. If you want to know more about the possible data types that you can pick, [go](https://docs.scipy.org/doc/numpy/user/basics.types.html) here or consider taking a brief look at [DataCamp’s NumPy cheat sheet](https://www.datacamp.com/community/blog/python-numpy-cheat-sheet/).
#
# There’s no need to go and memorize these NumPy data types if you’re a new user; But you do have to know and care what data you’re dealing with. The data types are there when you need more control over how your data is stored in memory and on disk. Especially in cases where you’re working with extensive data, it’s good that you know to control the storage type.
#
# Don’t forget that, in order to work with the ```np.array()``` function, you need to make sure that the ```numpy``` library is present in your environment. The NumPy library follows an import convention: when you import this library, you have to make sure that you import it as ```np```. By doing this, you’ll make sure that other Pythonistas understand your code more easily.
#
# In the following example you’ll create the my_array array that you have already played around with above:
# +
# Import `numpy` as `np`
import numpy as np
# Make the array `my_array`
my_array = np.array([[1,2,3,4], [5,6,7,8]], dtype=np.int32)
# Print `my_array`
print(my_array, my_array.dtype)
# -
# However, sometimes you don’t know what data you want to put in your array, or you want to import data into a numpy array from another source. In those cases, you’ll make use of initial placeholders or functions to load data from text into arrays, respectively.
#
# The following sections will show you how to do this.
# ### How To Make An “Empty” NumPy Array
#
# What people often mean when they say that they are creating “empty” arrays is that they want to make use of initial placeholders, which you can fill up afterward. You can initialize arrays with ones or zeros, but you can also create arrays that get filled up with evenly spaced values, constant or random values.
#
# However, you can still make a totally empty array, too.
#
# Luckily for us, there are quite a lot of functions to make
#
# Try it all out below!
# Create an array of ones
np.ones((3, 4))
# Create an array of zeros
np.zeros((2, 3, 4), dtype=np.int16)
# Create an array with random values
np.random.random((2, 2))
# Create an empty array
np.empty((3, 2))
# Create a full array
np.full((2, 2), 6)
# Create an array of evenly-spaced values
print(np.arange(10, 31, 5))
# Create an array of evenly-spaced values
np.linspace(0, 2, 9)
# * For some, such as ```np.ones()```, ```np.random.random()```, ```np.empty()```, ```np.full()``` or ```np.zeros()``` the only thing that you need to do in order to make arrays with ones or zeros is pass the shape of the array that you want to make. As an option to ```np.ones()``` and ```np.zeros()```, you can also specify the data type. In the case of ```np.full()```, you also have to specify the constant value that you want to insert into the array.
# * With ```np.linspace()``` and ```np.arange()``` you can make arrays of evenly spaced values. The difference between these two functions is that the last value of the three that are passed in the code chunk above designates either the step value for np.linspace() or a number of samples for np.arange(). What happens in the first is that you want, for example, an array of 9 values that lie between 0 and 2. For the latter, you specify that you want an array to start at 10 and per steps of 5, generate values for the array that you’re creating.
# * Remember that NumPy also allows you to create an identity array or matrix with ```np.eye()``` and ```np.identity()```. An identity matrix is a square matrix of which all elements in the principal diagonal are ones, and all other elements are zeros. When you multiply a matrix with an identity matrix, the given matrix is left unchanged.
# ### How To Load NumPy Arrays From Text
#
# Creating arrays with the help of initial placeholders or with some example data is an excellent way of getting started with ```numpy```. But when you want to get started with data analysis, you’ll need to load data from text files.
#
# With that what you have seen up until now, you won’t really be able to do much. Make use of some specific functions to load data from your files, such as ```loadtxt()``` or ```genfromtxt()```.
#
# Let’s say you have the following text files with data:
# Import your data
x, y, z = np.loadtxt('data.txt',
skiprows=1,
unpack=True)
print(x)
print(y)
print(z)
# In the code above, you use ```loadtxt()``` to load the data in your environment. You see that the first argument that both functions take is the text file ```data.txt```. Next, there are some specific arguments for each: in the first statement, you skip the first row, and you return the columns as separate arrays with ```unpack=True```. This means that the values in column ```Value1``` will be put in ```x```, and so on.
#
# Note that, in case you have comma-delimited data or if you want to specify the data type, there are also the arguments ```delimiter``` and ```dtype``` that you can add to the ```loadtxt()``` arguments.
my_array2 = np.genfromtxt('data2.txt',
skip_header=1,
filling_values=-999)
print(my_array2)
# You see that here, you resort to ```genfromtxt()``` to load the data. In this case, you have to handle some missing values that are indicated by the ```'MISSING'``` strings. Since the ```genfromtxt()``` function converts character strings in numeric columns to ```nan```, you can convert these values to other ones by specifying the filling_values argument. In this case, you choose to set the value of these missing values to -999.
#
# If by any chance, you have values that don’t get converted to ```nan``` by ```genfromtxt()```, there’s always the ```missing_values``` argument that allows you to specify what the missing values of your data exactly are.
#
# But this is not all.
#
# Tip: check out [this page](https://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html#numpy.genfromtxt) to see what other arguments you can add to import your data successfully.
#
# You now might wonder what the difference between these two functions really is.
#
# The examples indicated this maybe implicitly, but, in general, ```genfromtxt()``` gives you a little bit more flexibility; It’s more robust than ```loadtxt()```.
#
# Let’s make this difference a little bit more practical: the latter, loadtxt(), only works when each row in the text file has the same number of values; So when you want to handle missing values easily, you’ll typically find it easier to use genfromtxt().
#
# But this is definitely not the only reason.
#
# A brief look on the number of arguments that ```genfromtxt()``` has to offer will teach you that there is really a lot more things that you can specify in your import, such as the maximum number of rows to read or the option to automatically strip white spaces from variables.
# ### How To Save NumPy Arrays
# Once you have done everything that you need to do with your arrays, you can also save them to a file. If you want to save the array to a text file, you can use the ```savetxt()``` function to do this:
x = np.arange(0.0, 5.0, 1.0)
print(x)
np.savetxt('test.out', x, delimiter=',')
# Remember that ```np.arange()``` creates a NumPy array of evenly-spaced values. The third value that you pass to this function is the step value.
#
# There are, of course, other ways to save your NumPy arrays to text files. Check out the functions in the table below if you want to get your data to binary files or archives:
#
# | Method | Description |
# | --- | --- |
# | ```save()``` | Save an array to a binary file in NumPy .npy format |
# | ```savez()``` | Save several arrays into an uncompressed .npz archive |
# | ```savez_compressed()``` | Save several arrays into a compressed .npz archive |
# ### How To Inspect Your NumPy Arrays
#
# Besides the array attributes that have been mentioned above, namely, data, shape, dtype and strides, there are some more that you can use to easily get to know more about your arrays. The ones that you might find interesting to use when you’re just starting out are the following:
# Print the number of `my_array`'s dimensions
print(my_array.ndim)
# Print the number of `my_array`'s elements
print(my_array.size)
# Print information about `my_array`'s memory layout
print(my_array.flags)
# Print the length of one array element in bytes
print(my_array.itemsize)
# Print the total consumed bytes by `my_array`'s elements
print(my_array.nbytes)
# Also note that, besides the attributes, you also have some other ways of gaining more information on and even tweaking your array slightly:
# Print the length of `my_array`
print(len(my_array))
# Change the data type of `my_array`
my_float_array = my_array.astype(float)
print(my_float_array)
# ## How NumPy Broadcasting Works
#
# Before you go deeper into scientific computing, it might be a good idea to first go over what broadcasting exactly is: it’s a mechanism that allows NumPy to work with arrays of different shapes when you’re performing arithmetic operations.
#
# To put it in a more practical context, you often have an array that’s somewhat larger and another one that’s slightly smaller. Ideally, you want to use the smaller array multiple times to perform an operation (such as a sum, multiplication, etc.) on the larger array.
#
# To do this, you use the broadcasting mechanism.
#
# However, there are some rules if you want to use it. And, before you already sigh, you’ll see that these “rules” are very simple and kind of straightforward!
#
# * First off, to make sure that the broadcasting is successful, the dimensions of your arrays need to be compatible. Two dimensions are compatible when they are equal. Consider the following example:
# +
# Initialize `x`
x = np.ones((3, 4))
# Check shape of `x`
print(x.shape)
print(x)
# Initialize `y`
y = np.random.random((3, 4))
# Check shape of `y`
print(y.shape)
print(y)
# Add `x` and `y`
x + y
# -
# * Two dimensions are also compatible when one of them is 1:
# +
# Initialize `x`
x = np.ones((3, 4))
# Check shape of `x`
print(x.shape)
print(x)
# Initialize `y`
y = np.arange(4)
# Check shape of `y`
print(y.shape)
print(y)
# Subtract `x` and `y`
x - y
# -
# Note that if the dimensions are not compatible, you will get a ```ValueError```.
#
# *Tip: also test what the size of the resulting array is after you have done the computations! You’ll see that the size is actually the maximum size along each dimension of the input arrays.*
#
# In other words, you see that the result of ```x-y``` gives an array with shape ```(3,4)```: ```y``` had a shape of ```(4,)``` and ```x``` had a shape of ```(3,4)```. The maximum size along each dimension of x and y is taken to make up the shape of the new, resulting array.
#
# Lastly, the arrays can only be broadcast together if they are compatible in all dimensions. Consider the following example:
# +
# Initialize `x` and `y`
x = np.ones((3, 4))
y = np.random.random((5,1,4))
# Add `x` and `y`
x + y
# -
# You see that, even though ```x``` and ```y``` seem to have somewhat different dimensions, the two can be added together.
#
# That is because they are compatible in all dimensions:
#
# * Array ```x``` has dimensions 3 X 4,
# * Array ```y``` has dimensions 5 X 1 X 4
#
# Since you have seen above that dimensions are also compatible if one of them is equal to 1, you see that these two arrays are indeed a good candidate for broadcasting!
#
# What you will notice is that in the dimension where ```y``` has size 1, and the other array has a size greater than 1 (that is, 3), the first array behaves as if it were copied along that dimension.
#
# Note that the shape of the resulting array will again be the maximum size along each dimension of ```x``` and ```y```: the dimension of the result will be ```(5,3,4)```
#
# In short, if you want to make use of broadcasting, you will rely a lot on the shape and dimensions of the arrays with which you’re working.
#
# But what if the dimensions are not compatible?
#
# What if they are not equal or if one of them is not equal to 1?
#
# You’ll have to fix this by manipulating your array! You’ll see how to do this in one of the next sections.
# ## How Do Array Mathematics Work?
#
# You’ve seen that broadcasting is handy when you’re doing arithmetic operations. In this section, you’ll discover some of the functions that you can use to do mathematics with arrays.
#
# As such, it probably won’t surprise you that you can just use ```+```, ```-```, ```*```, ```/``` or ```%``` to add, subtract, multiply, divide or calculate the remainder of two (or more) arrays. However, a big part of why NumPy is so handy, is because it also has functions to do this. The equivalent functions of the operations that you have seen just now are, respectively, ```np.add()```, ```np.subtract()```, ```np.multiply()```, ```np.divide()``` and ```np.remainder()```.
#
# You can also easily do exponentiation and taking the square root of your arrays with ```np.exp()``` and ```np.sqrt()```, or calculate the sines or cosines of your array with ```np.sin()``` and ```np.cos()```. Lastly, its’ also useful to mention that there’s also a way for you to calculate the natural logarithm with ```np.log()``` or calculate the dot product by applying the ```dot()``` to your array.
# Add `x` and `y`
np.add(x, y)
# Subtract `x` and `y`
np.subtract(x, y)
# Multiply `x` and `y`
np.multiply(x, y)
# Divide `x` and `y`
np.divide(x, y)
# Calculate the remainder of `x` and `y`
np.remainder(x, y)
# Remember how broadcasting works? Check out the dimensions and the shapes of both x and y in your IPython shell. Are the rules of broadcasting respected?
#
# But there is more.
#
# Check out this small list of aggregate functions:
#
# | Function | Description |
# | --- | --- |
# | ```a.sum()``` | Array-wise sum |
# | ```a.min()``` | Array-wise minimum value |
# | ```b.max(axis=0)``` | Maximum value of an array row |
# | ```b.cumsum(axis=1)``` | Cumulative sum of the elements |
# | ```a.mean()``` | Mean |
# | ```b.median()``` | Median |
# | ```a.corrcoef()``` | Correlation coefficient |
# | ```np.std(b)``` | Standard deviation |
# Besides all of these functions, you might also find it useful to know that there are mechanisms that allow you to compare array elements. For example, if you want to check whether the elements of two arrays are the same, you might use the ```==``` operator. To check whether the array elements are smaller or bigger, you use the ```<``` or ```>``` operators.
#
# This all seems quite straightforward, yes?
#
# However, you can also compare entire arrays with each other! In this case, you use the ```np.array_equal()``` function. Just pass in the two arrays that you want to compare with each other, and you’re done.
#
# Note that, besides comparing, you can also perform logical operations on your arrays. You can start with ```np.logical_or()```, ```np.logical_not()``` and ```np.logical_and()```. This basically works like your typical OR, NOT and AND logical operations;
#
# In the simplest example, you use OR to see whether your elements are the same (for example, 1), or if one of the two array elements is 1. If both of them are 0, you’ll return ```FALSE```. You would use AND to see whether your second element is also 1 and NOT to see if the second element differs from 1.
# `x` AND `y`
np.logical_and(x, y)
# `x` OR `y`
np.logical_or(x, y)
# ## How To Subset, Slice, And Index Arrays
#
# Besides mathematical operations, you might also consider taking just a part of the original array (or the resulting array) or just some array elements to use in further analysis or other operations. In such case, you will need to subset, slice and/or index your arrays.
#
# These operations are very similar to when you perform them on Python lists. If you want to check out the similarities for yourself, or if you want a more elaborate explanation, you might consider checking out DataCamp’s Python list tutorial.
#
# If you have no clue at all on how these operations work, it suffices for now to know these two basic things:
#
# * You use square brackets ```[]``` as the index operator, and
# * Generally, you pass integers to these square brackets, but you can also put a colon ```:``` or a combination of the colon with integers in it to designate the elements/rows/columns you want to select.
#
# Besides from these two points, the easiest way to see how this all fits together is by looking at some examples of subsetting:
# Select the element at the 1st index
print(my_array[1])
# Select the element at row 1 column 2
print(my_2d_array[1][2])
# Select the element at row 1 column 2
print(my_2d_array[1,2])
# Select the element at row 1, column 2 and
print(my_3d_array[1,1,2])
# Something a little bit more advanced than subsetting, if you will, is slicing. Here, you consider not just particular values of your arrays, but you go to the level of rows and columns. You’re basically working with “regions” of data instead of pure “locations”.
# Select items at index 0 and 1
print(my_array[0:2])
# Select items at row 0 and 1, column 1
print(my_2d_array[0:2,1])
# Select items at row 1
# This is the same as saying `my_3d_array[1,:,:]
print(my_3d_array[1,...])
# Lastly, there’s also indexing. When it comes to NumPy, there are boolean indexing and advanced or “fancy” indexing.
#
# First up is boolean indexing. Here, instead of selecting elements, rows or columns based on index number, you select those values from your array that fulfill a certain condition.
# Try out a simple example
mask = my_array < 2
print(mask)
print()
print(my_array[mask])
print(my_array[my_array < 2])
# Try out a simple example
mask = my_array > 3
print(mask)
print(my_array[mask])
# +
# Specify a condition
bigger_than_3 = (my_3d_array >= 3)
# Use the condition to index our 3d array
print(my_3d_array[bigger_than_3])
# +
# Specify a condition
mask = (my_3d_array >= 3) & (my_3d_array < 10)
# Use the condition to index our 3d array
print(my_3d_array[mask])
# -
# Note that, to specify a condition, you can also make use of the logical operators ```|``` (OR) and ```&``` (AND). If you would want to rewrite the condition above in such a way (which would be inefficient, but I demonstrate it here for educational purposes :)), you would get ```bigger_than_3 = (my_3d_array > 3) | (my_3d_array == 3)```.
#
# With the arrays that have been loaded in, there aren’t too many possibilities, but with arrays that contain for example, names or capitals, the possibilities could be endless!
#
# When it comes to fancy indexing, that what you basically do with it is the following: you pass a list or an array of integers to specify the order of the subset of rows you want to select out of the original array.
# Select elements at (1,0), (0,1), (1,2) and (0,0)
print(my_2d_array[[1, 0, 1, 0],[0, 1, 2, 0]])
# Select a subset of the rows and columns
print(my_2d_array[[1, 0, 1, 0]][:,[0,1,2,0]])
# Now, the second statement might seem to make less sense to you at first sight. This is normal. It might make more sense if you break it down:
#
# * If you just execute ```my_2d_array[[1,0,1,0]]```, the result is the following:
my_2d_array[[1,0,1,0]]
# * What the second part, namely, ```[:,[0,1,2,0]]```, is tell you that you want to keep all the rows of this result, but that you want to change the order of the columns around a bit. You want to display the columns 0, 1, and 2 as they are right now, but you want to repeat column 0 as the last column instead of displaying column number 3. This will give you the following result:
my_2d_array[:,[0,1,2,0]]
# ## How To Manipulate Arrays
#
# Performing mathematical operations on your arrays is one of the things that you’ll be doing, but probably most importantly to make this and the broadcasting work is to know how to manipulate your arrays.
#
# Below are some of the most common manipulations that you’ll be doing.
#
# ### How To Transpose Your Arrays
#
# What transposing your arrays actually does is permuting the dimensions of it. Or, in other words, you switch around the shape of the array. Let’s take a small example to show you the effect of transposition:
# Print `my_2d_array`
print(my_2d_array)
# Transpose `my_2d_array`
print(np.transpose(my_2d_array))
# Or use `T` to transpose `my_2d_array`
print(my_2d_array.T)
# ### Reshaping Versus Resizing Your Arrays
#
# You might have read in the broadcasting section that the dimensions of your arrays need to be compatible if you want them to be good candidates for arithmetic operations. But the question of what you should do when that is not the case, was not answered yet.
#
# Well, this is where you get the answer!
#
# What you can do if the arrays don’t have the same dimensions, is resize your array. You will then return a new array that has the shape that you passed to the ```np.resize()``` function. If you pass your original array together with the new dimensions, and if that new array is larger than the one that you originally had, the new array will be filled with copies of the original array that are repeated as many times as is needed.
#
# However, if you just apply ```np.resize()``` to the array and you pass the new shape to it, the new array will be filled with zeros.
# Print the shape of `x`
print(x.shape)
print(x)
# Resize `x` to ((6,4))
np.resize(x, (4, 3))
np.resize(x, (3, 4))
# Besides resizing, you can also reshape your array. This means that you give a new shape to an array without changing its data. The key to reshaping is to make sure that the total size of the new array is unchanged. If you take the example of array ```x``` that was used above, which has a size of 3 X 4 or 12, you have to make sure that the new array also has a size of 12.
#
# If you want to calculate the size of an array with code, make sure to use the size attribute: ```x.size``` or ```x.reshape((2,6)).size```:
# Print the size of `x` to see what's possible
print(x.size)
# +
# Flatten `x`
z = x.ravel()
# Print `z`
print(z)
# -
# If all else fails, you can also append an array to your original one or insert or delete array elements to make sure that your dimensions fit with the other array that you want to use for your computations.
#
# Another operation that you might keep handy when you’re changing the shape of arrays is ```ravel()```. This function allows you to flatten your arrays. This means that if you ever have 2D, 3D or n-D arrays, you can just use this function to flatten it all out to a 1-D array.
# ### How To Append Arrays
# When you append arrays to your original array, they are “glued” to the end of that original array. If you want to make sure that what you append does not come at the end of the array, you might consider inserting it. Go to the next section if you want to know more.
#
# Appending is a pretty easy thing to do thanks to the NumPy library; You can just make use of the ```np.append()```.
# +
# Append a 1D array to your `my_array`
new_array = np.append(my_array, [7, 8, 9, 10, 11, 12])
# Print `new_array`
print(my_array)
print(new_array)
# +
# Append an extra column to your `my_2d_array`
new_2d_array = np.append(my_2d_array, [[7], [8], [9]], axis=1)
# Print `new_2d_array`
print(new_2d_array)
# -
# Note how, when you append an extra column to my_2d_array, the axis is specified. Remember that axis 1 indicates the columns, while axis 0 indicates the rows in 2-D arrays.
# ### How To Insert And Delete Array Elements
#
# Next to appending, you can also insert and delete array elements. As you might have guessed by now, the functions that will allow you to do these operations are ```np.insert()``` and ```np.delete()```:
# Insert `5` at index 1
np.insert(my_array, 1, 5)
# +
# Delete the value at index 1
# np.delete(my_array,[1])
# -
# ### How To Join And Split Arrays
#
# You can also ‘merge’ or join your arrays. There are a bunch of functions that you can use for that purpose and most of them are listed below.
#
# Try them out, but also make sure to test out what the shape of the arrays is in the IPython shell. The arrays that have been loaded are ```x```, ```my_array```, ```my_resized_array``` and ```my_2d_array```.
# Concatentate `my_array` and `x`
print(np.concatenate((my_array, x)))
# Stack arrays row-wise
print(np.vstack((my_array, my_2d_array)))
my_resized_array = np.array([[91, 92, 93, 94],
[91, 92, 93, 94],
[91, 92, 93, 94]])
print(my_resized_array)
# Stack arrays row-wise
print(np.r_[my_resized_array, my_2d_array])
# Stack arrays horizontally
print(np.hstack((my_resized_array, my_2d_array)))
# Stack arrays column-wise
print(np.column_stack((my_resized_array, my_2d_array)))
# Stack arrays column-wise
print(np.c_[my_resized_array, my_2d_array])
# You’ll note a few things as you go through the functions:
#
# * The number of dimensions needs to be the same if you want to concatenate two arrays with ```np.concatenate()```. As such, if you want to concatenate an array with ```my_array```, which is 1-D, you’ll need to make sure that the second array that you have, is also 1-D.
# * With ```np.vstack()```, you effortlessly combine ```my_array``` with ```my_2d_array```. You just have to make sure that, as you’re stacking the arrays row-wise, that the number of columns in both arrays is the same. As such, you could also add an array with shape ```(2,4)``` or ```(3,4)``` to ```my_2d_array```, as long as the number of columns matches. Stated differently, the arrays must have the same shape along all but the first axis. The same holds also for when you want to use ```np.r[]```.
# * For ```np.hstack()```, you have to make sure that the number of dimensions is the same and that the number of rows in both arrays is the same. That means that you could stack arrays such as ```(2,3)``` or ```(2,4)``` to ```my_2d_array```, which itself as a shape of (2,4). Anything is possible as long as you make sure that the number of rows matches. This function is still supported by NumPy, but you should prefer ```np.concatenate()``` or ```np.stack()```.
# * With ```np.column_stack()```, you have to make sure that the arrays that you input have the same first dimension. In this case, both shapes are the same, but if ```my_resized_array``` were to be ```(2,1)``` or ```(2,)```, the arrays still would have been stacked.
# * ```np.c_[]``` is another way to concatenate. Here also, the first dimension of both arrays needs to match.
#
# When you have joined arrays, you might also want to split them at some point. Just like you can stack them horizontally, you can also do the same but then vertically. You use ```np.hsplit()``` and ```np.vsplit()```, respectively:
my_stacked_array = np.r_[my_resized_array, my_2d_array]
# Split `my_stacked_array` horizontally at the 2nd index
print(np.hsplit(my_stacked_array, 2))
# Split `my_stacked_array` vertically at the 2nd index
print(np.vsplit(my_stacked_array, 2))
# What you need to keep in mind when you’re using both of these split functions is probably the shape of your array. Let’s take the above case as an example: ```my_stacked_array``` has a shape of ```(2,8)```. If you want to select the index at which you want the split to occur, you have to keep the shape in mind.
# ## How To Visualize NumPy Arrays
# Lastly, something that will definitely come in handy is to know how you can plot your arrays. This can especially be handy in data exploration, but also in later stages of the data science workflow, when you want to visualize your arrays.
#
# ### With np.histogram()
# Contrary to what the function might suggest, the ```np.histogram()``` function doesn’t draw the histogram but it does compute the occurrences of the array that fall within each bin; This will determine the area that each bar of your histogram takes up.
#
# What you pass to the ```np.histogram()``` function then is first the input data or the array that you’re working with. The array will be flattened when the histogram is computed.
# Initialize your array
my_3d_array = np.array([[[1,2,3,4], [5,6,7,8]], [[1,2,3,4], [9,10,11,12]]], dtype=np.int64)
# Pass the array to `np.histogram()`
print(np.histogram(my_3d_array))
# Specify the number of bins
print(np.histogram(my_3d_array, bins=range(0,13)))
# You’ll see that as a result, the histogram will be computed: the first array lists the frequencies for all the elements of your array, while the second array lists the bins that would be used if you don’t specify any bins.
#
# If you do specify a number of bins, the result of the computation will be different: the floats will be gone and you’ll see all integers for the bins.
#
# There are still some other arguments that you can specify that can influence the histogram that is computed. You can find all of them [here](https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html).
#
# But what is the point of computing such a histogram if you can’t visualize it?
#
# Visualization is a piece of cake with the help of Matplotlib, but you don’t need ```np.histogram()``` to compute the histogram. ```plt.hist()``` does this for itself when you pass it the (flattened) data and the bins:
# +
# Import numpy and matplotlib
import numpy as np
import matplotlib.pyplot as plt
# Construct the histogram with a flattened 3d array and a range of bins
plt.hist(my_3d_array.ravel(), bins=range(0,13))
# Add a title to the plot
plt.title('Frequency of My 3D Array Elements')
# Show the plot
plt.show()
# -
# ### Using np.meshgrid()
#
# Another way to (indirectly) visualize your array is by using ```np.meshgrid()```. The problem that you face with arrays is that you need 2-D arrays of x and y coordinate values. With the above function, you can create a rectangular grid out of an array of x values and an array of y values: the ```np.meshgrid()``` function takes two 1D arrays and produces two 2D matrices corresponding to all pairs of (x, y) in the two arrays. Then, you can use these matrices to make all sorts of plots.
#
# ```np.meshgrid()``` is particularly useful if you want to evaluate functions on a grid, as the code below demonstrates:
# +
# Import NumPy and Matplotlib
import numpy as np
import matplotlib.pyplot as plt
# Create an array
points = np.arange(-5, 5, 0.01)
# Make a meshgrid
xs, ys = np.meshgrid(points, points)
z = np.sqrt(xs ** 2 + ys ** 2)
# Display the image on the axes
plt.imshow(z, cmap=plt.cm.gray)
# Draw a color bar
plt.colorbar()
# Show the plot
plt.show()
# -
| _site/lectures/Week 04 - Data Processing and Visualization Part 1/03.a - NumPy Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="http://landlab.github.io"><img style="float: left" src="../../landlab_header.png"></a>
# # Introduction to Landlab: Creating a simple 2D scarp diffusion model
# <hr>
# <small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>
# <hr>
#
# This tutorial illustrates how you can use Landlab to construct a simple two-dimensional numerical model on a regular (raster) grid, using a simple forward-time, centered-space numerical scheme. The example is the erosional degradation of an earthquake fault scarp, and which evolves over time in response to the gradual downhill motion of soil. Here we use a simple "geomorphic diffusion" model for landform evolution, in which the downhill flow of soil is assumed to be proportional to the (downhill) gradient of the land surface multiplied by a transport coefficient.
#
# We start by importing the [numpy](https://numpy.org) and [matplotlib](https://matplotlib.org) libraries:
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ## Part 1: 1D version using numpy
#
# This example uses a finite-volume numerical solution to the 2D diffusion equation. The 2D diffusion equation in this case is derived as follows. Continuity of mass states that:
#
# $\frac{\partial z}{\partial t} = -\nabla \cdot \mathbf{q}_s$,
#
# where $z$ is elevation, $t$ is time, the vector $\mathbf{q}_s$ is the volumetric soil transport rate per unit width, and $\nabla$ is the divergence operator (here in two dimensions). (Note that we have omitted a porosity factor here; its effect will be subsumed in the transport coefficient). The sediment flux vector depends on the slope gradient:
#
# $\mathbf{q}_s = -D \nabla z$,
#
# where $D$ is a transport-rate coefficient---sometimes called *hillslope diffusivity*---with dimensions of length squared per time. Combining the two, and assuming $D$ is uniform, we have a classical 2D diffusion equation:
#
# $\frac{\partial z}{\partial t} = -\nabla^2 z$.
#
# In this first example, we will create a our 1D domain in $x$ and $z$, and set a value for $D$.
#
# This means that the equation we solve will be in 1D.
#
# $\frac{d z}{d t} = \frac{d q_s}{dx}$,
#
# where
#
# $q_s = -D \frac{d z}{dx}$
#
dx = 1
x = np.arange(0, 100, dx, dtype=float)
z = np.zeros(x.shape, dtype=float)
D = 0.01
# Next we must create our fault by uplifting some of the domain. We will increment all elements of `z` in which `x>50`.
z[x>50] += 100
# Finally, we will diffuse our fault for 1,000 years.
#
# We will use a timestep with a [Courant–Friedrichs–Lewy condition](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) of $C_{cfl}=0.2$. This will keep our solution numerically stable.
#
# $C_{cfl} = \frac{\Delta t D}{\Delta x^2} = 0.2$
# +
dt = 0.2 * dx * dx / D
total_time = 1e3
nts = int(total_time/dt)
z_orig = z.copy()
for i in range(nts):
qs = -D * np.diff(z)/dx
dzdt = -np.diff(qs)/dx
z[1:-1] += dzdt*dt
plt.plot(x, z_orig, label="Original Profile")
plt.plot(x, z, label="Diffused Profile")
plt.legend()
# -
# The prior example is pretty simple. If this was all you needed to do, you wouldn't need Landlab.
#
# But what if you wanted...
#
# ... to use the same diffusion model in 2D instead of 1D.
#
# ... to use an irregular grid (in 1 or 2D).
#
# ... wanted to combine the diffusion model with a more complex model.
#
# ... have a more complex model you want to use over and over again with different boundary conditions.
#
# These are the sorts of problems that Landlab was designed to solve.
#
# In the next two sections we will introduce some of the core capabilities of Landlab.
#
# In Part 2 we will use the RasterModelGrid, fields, and a numerical utility for calculating flux divergence.
#
# In Part 3 we will use the HexagonalModelGrid.
#
# In Part 4 we will use the LinearDiffuser component.
#
# ## Part 2: 2D version using Landlab's Model Grids
#
# The Landlab model grids are data structures that represent the model domain (the variable `x` in our prior example). Here we will use `RasterModelGrid` which creates a grid with regularly spaced square grid elements. The RasterModelGrid knows how the elements are connected and how far apart they are.
#
# Lets start by creating a RasterModelGrid class. First we need to import it.
from landlab import RasterModelGrid
#
# ### (a) Explore the RasterModelGrid
#
# Before we make a RasterModelGrid for our fault example, lets explore the Landlab model grid.
#
# Landlab considers the grid as a "dual" graph. Two sets of points, lines and polygons that represent 2D space.
#
# The first graph considers points called "nodes" that are connected by lines called "links". The area that surrounds each node is called a "cell".
#
# First, the nodes
from landlab.plot.graph import plot_graph
grid = RasterModelGrid((4, 5), xy_spacing=(3,4))
plot_graph(grid, at="node")
# You can see that the nodes are points and they are numbered with unique IDs from lower left to upper right.
#
# Next the links
plot_graph(grid, at="link")
# which are lines that connect the nodes and each have a unique ID number.
#
# And finally, the cells
plot_graph(grid, at="cell")
# which are polygons centered around the nodes.
#
# Landlab is a "dual" graph because it also keeps track of a second set of points, lines, and polygons ("corners", "faces", and "patches"). We will not focus on them further.
#
# ### (b) Use the RasterModelGrid for 2D diffusion
#
# Lets continue by making a new grid that is bigger. We will use this for our next fault diffusion example.
#
# The syntax in the next line says: create a new *RasterModelGrid* object called **mg**, with 25 rows, 40 columns, and a grid spacing of 10 m.
mg = RasterModelGrid((25, 40), 10.0)
# Note the use of object-oriented programming here. `RasterModelGrid` is a class; `mg` is a particular instance of that class, and it contains all the data necessary to fully describe the topology and geometry of this particular grid.
#
# Next we'll add a *data field* to the grid, to represent the elevation values at grid nodes. The "dot" syntax below indicates that we are calling a function (or *method*) that belongs to the *RasterModelGrid* class, and will act on data contained in **mg**. The arguments indicate that we want the data elements attached to grid nodes (rather than links, for example), and that we want to name this data field `topographic__elevation`. The `add_zeros` method returns the newly created NumPy array.
z = mg.add_zeros('topographic__elevation', at='node')
# The above line of code creates space in memory to store 1,000 floating-point values, which will represent the elevation of the land surface at each of our 1,000 grid nodes.
# Let's plot the positions of all the grid nodes. The nodes' *(x,y)* positions are stored in the arrays `mg.x_of_node` and `mg.y_of_node`, respectively.
plt.plot(mg.x_of_node, mg.y_of_node, '.')
# If we bothered to count, we'd see that there are indeed 1,000 grid nodes, and a corresponding number of `z` values:
len(z)
# Now for some tectonics. Let's say there's a fault trace that angles roughly east-northeast. We can describe the trace with the equation for a line. One trick here: by using `mg.x_of_node`, in the line of code below, we are calculating a *y* (i.e., north-south) position of the fault trace for each grid node---meaning that this is the *y* coordinate of the trace at the *x* coordinate of a given node.
fault_trace_y = 50.0 + 0.25 * mg.x_of_node
# Here comes the earthquake. For all the nodes north of the fault (i.e., those with a *y* coordinate greater than the corresponding *y* coordinate of the fault trace), we'll add elevation equal to 10 meters plus a centimeter for every meter east along the grid (just to make it interesting):
z[mg.y_of_node >
fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]
# (A little bit of Python under the hood: the statement `mg.y_of_node > fault_trace_y` creates a 1000-element long boolean array; placing this within the index brackets will select only those array entries that correspond to `True` in the boolean array)
#
# Let's look at our newly created initial topography using Landlab's *imshow_node_grid* plotting function (which we first need to import).
from landlab.plot.imshow import imshow_grid
imshow_grid(mg, 'topographic__elevation')
# To finish getting set up, we will define two parameters: the transport ("diffusivity") coefficient, `D`, and the time-step size, `dt`. (The latter is set using the Courant condition for a forward-time, centered-space finite-difference solution; you can find the explanation in most textbooks on numerical methods).
D = 0.01 # m2/yr transport coefficient
dt = 0.2 * mg.dx * mg.dx / D
dt
# Boundary conditions: for this example, we'll assume that the east and west sides are closed to flow of sediment, but that the north and south sides are open. (The order of the function arguments is east, north, west, south)
mg.set_closed_boundaries_at_grid_edges(True, False, True, False)
# *A note on boundaries:* with a Landlab raster grid, all the perimeter nodes are boundary nodes. In this example, there are 24 + 24 + 39 + 39 = 126 boundary nodes. The previous line of code set those on the east and west edges to be **closed boundaries**, while those on the north and south are **open boundaries** (the default). All the remaining nodes are known as **core** nodes. In this example, there are 1000 - 126 = 874 core nodes:
len(mg.core_nodes)
# One more thing before we run the time loop: we'll create an array to contain soil flux. In the function call below, the first argument tells Landlab that we want one value for each grid link, while the second argument provides a name for this data *field*:
qs = mg.add_zeros('sediment_flux', at='link')
# And now for some landform evolution. We will loop through 25 iterations, representing 50,000 years. On each pass through the loop, we do the following:
#
# 1. Calculate, and store in the array `g`, the gradient between each neighboring pair of nodes. These calculations are done on **links**. The gradient value is a positive number when the gradient is "uphill" in the direction of the link, and negative when the gradient is "downhill" in the direction of the link. On a raster grid, link directions are always in the direction of increasing $x$ ("horizontal" links) or increasing $y$ ("vertical" links).
#
# 2. Calculate, and store in the array `qs`, the sediment flux between each adjacent pair of nodes by multiplying their gradient by the transport coefficient. We will only do this for the **active links** (those not connected to a closed boundary, and not connecting two boundary nodes of any type); others will remain as zero.
#
# 3. Calculate the resulting net flux at each node (positive=net outflux, negative=net influx). The negative of this array is the rate of change of elevation at each (core) node, so store it in a node array called `dzdt'.
#
# 4. Update the elevations for the new time step.
for i in range(25):
g = mg.calc_grad_at_link(z)
qs[mg.active_links] = -D * g[mg.active_links]
dzdt = -mg.calc_flux_div_at_node(qs)
z[mg.core_nodes] += dzdt[mg.core_nodes] * dt
# Let's look at how our fault scarp has evolved.
imshow_grid(mg, 'topographic__elevation')
# Notice that we have just created and run a 2D model of fault-scarp creation and diffusion with fewer than two dozen lines of code. How long would this have taken to write in C or Fortran?
#
# While it was very very easy to write in 1D, writing this in 2D would mean we would have needed to keep track of the adjacency of the different parts of the grid. This is the primary problem that the Landlab grids are meant to solve.
#
# Think about how difficult this would be to hand code if the grid were irregular or hexagonal. In order to conserve mass and implement the differential equation you would need to know how nodes were conected, how long the links were, and how big each cell was.
#
# We do such an example after the next section.
# ### (c) What's going on under the hood?
#
# This example uses a finite-volume numerical solution to the 2D diffusion equation. The 2D diffusion equation in this case is derived as follows. Continuity of mass states that:
#
# $\frac{\partial z}{\partial t} = -\nabla \cdot \mathbf{q}_s$,
#
# where $z$ is elevation, $t$ is time, the vector $\mathbf{q}_s$ is the volumetric soil transport rate per unit width, and $\nabla$ is the divergence operator (here in two dimensions). (Note that we have omitted a porosity factor here; its effect will be subsumed in the transport coefficient). The sediment flux vector depends on the slope gradient:
#
# $\mathbf{q}_s = -D \nabla z$,
#
# where $D$ is a transport-rate coefficient---sometimes called *hillslope diffusivity*---with dimensions of length squared per time. Combining the two, and assuming $D$ is uniform, we have a classical 2D diffusion equation:
#
# $\frac{\partial z}{\partial t} = -\nabla^2 z$.
#
# For the numerical solution, we discretize $z$ at a series of *nodes* on a grid. The example in this notebook uses a Landlab *RasterModelGrid*, in which every interior node sits inside a cell of width $\Delta x$, but we could alternatively have used any grid type that provides nodes, links, and cells.
#
# The gradient and sediment flux vectors will be calculated at the *links* that connect each pair of adjacent nodes. These links correspond to the mid-points of the cell faces, and the values that we assign to links represent the gradients and fluxes, respectively, along the faces of the cells.
#
# The flux divergence, $\nabla \mathbf{q}_s$, will be calculated by summing, for every cell, the total volume inflows and outflows at each cell face, and dividing the resulting sum by the cell area. Note that for a regular, rectilinear grid, as we use in this example, this finite-volume method is equivalent to a finite-difference method.
#
# To advance the solution in time, we will use a simple explicit, forward-difference method. This solution scheme for a given node $i$ can be written:
#
# $\frac{z_i^{t+1} - z_i^t}{\Delta t} = -\frac{1}{A_i} \sum\limits_{j=1}^{N_i} \delta (l_{ij}) q_s (l_{ij}) \lambda(l_{ij})$.
#
# Here the superscripts refer to time steps, $\Delta t$ is time-step size, $q_s(l_{ij})$ is the sediment flux per width associated with the link that crosses the $j$-th face of the cell at node $i$, $\lambda(l_{ij})$ is the width of the cell face associated with that link ($=\Delta x$ for a regular uniform grid), and $N_i$ is the number of active links that connect to node $i$. The variable $\delta(l_{ij})$ contains either +1 or -1: it is +1 if link $l_{ij}$ is oriented away from the node (in which case positive flux would represent material leaving its cell), or -1 if instead the link "points" into the cell (in which case positive flux means material is entering).
#
# To get the fluxes, we first calculate the *gradient*, $G$, at each link, $k$:
#
# $G(k) = \frac{z(H_k) - z(T_k)}{L_k}$.
#
# Here $H_k$ refers the *head node* associated with link $k$, $T_k$ is the *tail node* associated with link $k$. Each link has a direction: from the tail node to the head node. The length of link $k$ is $L_k$ (equal to $\Delta x$ is a regular uniform grid). What the above equation says is that the gradient in $z$ associated with each link is simply the difference in $z$ value between its two endpoint nodes, divided by the distance between them. The gradient is positive when the value at the head node (the "tip" of the link) is greater than the value at the tail node, and vice versa.
#
# The calculation of gradients in $z$ at the links is accomplished with the `calc_grad_at_link` function. The sediment fluxes are then calculated by multiplying the link gradients by $-D$. Once the fluxes at links have been established, the `calc_flux_div_at_node` function performs the summation of fluxes.
#
# ## Part 3: Hexagonal grid
#
# Next we will use an non-raster Landlab grid.
#
# We start by making a random set of points with x values between 0 and 400 and y values of 0 and 250. We then add zeros to our grid at a field called "topographic__elevation" and plot the node locations.
#
# Note that the syntax here is exactly the same as in the RasterModelGrid example (once the grid has been created).
# +
from landlab import HexModelGrid
mg = HexModelGrid((25, 40), 10, node_layout="rect")
z = mg.add_zeros('topographic__elevation', at='node')
plt.plot(mg.x_of_node, mg.y_of_node, '.')
# -
# Next we create our fault trace and uplift the hanging wall.
#
# We can plot just like we did with the RasterModelGrid.
fault_trace_y = 50.0 + 0.25 * mg.x_of_node
z[mg.y_of_node >
fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]
imshow_grid(mg, "topographic__elevation")
# And we can use the same code as before to create a diffusion model!
#
# Landlab supports multiple grid types. You can read more about them [here](https://landlab.readthedocs.io/en/latest/reference/grid/index.html).
qs = mg.add_zeros('sediment_flux', at='link')
for i in range(25):
g = mg.calc_grad_at_link(z)
qs[mg.active_links] = -D * g[mg.active_links]
dzdt = -mg.calc_flux_div_at_node(qs)
z[mg.core_nodes] += dzdt[mg.core_nodes] * dt
imshow_grid(mg, 'topographic__elevation')
# ## Part 3: Landlab Components
#
# Finally we will use a Landlab component, called the LinearDiffuser [link to its documentation](https://landlab.readthedocs.io/en/latest/reference/components/diffusion.html).
#
# Landlab was designed to have many of the utilities like `calc_grad_at_link`, and `calc_flux_divergence_at_node` to help you make your own models. Sometimes, however, you may use such a model over and over and over. Then it is nice to be able to put it in its own python class with a standard interface.
#
# This is what a Landlab Component is.
#
# There is a whole [tutorial on components](../component_tutorial/component_tutorial.ipynb) and a [page on the User Guide](https://landlab.readthedocs.io/en/latest/user_guide/components.html). For now we will just show you what the prior example looks like if we use the LinearDiffuser.
#
# First we import it, set up the grid, and uplift our fault block.
# +
from landlab.components import LinearDiffuser
mg = HexModelGrid((25, 40), 10, node_layout="rect")
z = mg.add_zeros('topographic__elevation', at='node')
fault_trace_y = 50.0 + 0.25 * mg.x_of_node
z[mg.y_of_node >
fault_trace_y] += 10.0 + 0.01 * mg.x_of_node[mg.y_of_node > fault_trace_y]
# -
# Next we instantiate a LinearDiffuser. We have to tell the component what value to use for the diffusivity.
ld = LinearDiffuser(mg, linear_diffusivity=D)
# Finally we run the component forward in time and plot. Like many Landlab components, the LinearDiffuser has a method called "run_one_step" that takes one input, the timestep dt. Calling this method runs the LinearDiffuser forward in time by an increment dt.
for i in range(25):
ld.run_one_step(dt)
imshow_grid(mg, 'topographic__elevation')
# Congratulations on making it to the end of this tutorial!
#
# ### Click here for more <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">Landlab tutorials</a>
| notebooks/tutorials/fault_scarp/landlab-fault-scarp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! pip install requests
# ! pip install colorama
#from colorama import init
#init()
from colorama import Fore
print(Fore.GREEN + "Green")
# +
# %%writefile testcol.py
#from colorama import init
#init()
from colorama import Fore
print(Fore.BLUE + "Blue")
print(Fore.RED + "Red")
# -
# ! python testcol.py
# # Writing a Module
# %%writefile file1.py
def myfunc(x):
return [num for num in range(x) if num%2 == 0]
list1 = myfunc(21)
# %%writefile file2.py
import file1
file1.list1.append(100)
print(file1.list1)
# ! python file2.py
# # Passing command line arguments
# %%writefile file3.py
import sys
import file1
num = int(sys.argv[1])
print(file1.myfunc(num))
# ! python file3.py 11
# # __name__ == "__main__"
# ! python fileone.py
# ! python filetwo.py
| Modules and Packages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
import tools as tl
# -
# SETTING IMAGES
def show_image(img):
fig, ax = plt.subplots(figsize=(15, 15))
ax.imshow(img, cmap='gray')
plt.show()
# +
img_list = ['1.png','2.png','3.png','4.jpg','5.jpg','5.png']
imgs = []
vises = []
for img in img_list:
tmp = cv2.imread(img, 0)
imgs.append(tmp)
tmp = cv2.imread(img)
vises.append(tmp)
#show_image(tmp)
# -
# GET GRADIENT
def getGradient(gray, x = 0, y = 0, useGradient = True):
if useGradient:
#Finding gradinet
grad = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=x, dy=y, ksize=3)
'''
take absolute value of gradient to use negative gradient
'''
grad = np.absolute(grad)
'''
Normalization of gradient
'''
(minVal, maxVal) = (np.min(grad), np.max(grad))
if maxVal - minVal > 0:
grad = (255 * ((grad - minVal) / float(maxVal - minVal))).astype("uint8")
else:
grad = np.zeros(gray.shape, dtype = "uint8")
else:
grad = cv2.adaptiveThreshold( gray,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,
11,
2)
return grad
# +
bin_imgs = []
###
for img in imgs:
tmp = getGradient(img,x=1,useGradient = True)
bin_imgs.append(tmp)
#show_image(tmp)
true_bin_imgs = []
###
for img in imgs:
tmp = getGradient(img,x=1,useGradient = False)
true_bin_imgs.append(tmp)
#show_image(tmp)
# +
verps = []
for i in range(len(imgs)):
verp = np.sum(bin_imgs[i], axis=1) / 255 #initially axis = 1
drawed_verp = tl.getDrawProjectionVer(imgs[i], verp)
verps.append([verp,drawed_verp])
#bigImg = tl.concat_hor((imgs[i],drawed_verp))
#show_image(bigImg)
# + active=""
# ADAPTIVE FILTER SIZE
# +
cons_and_hiers = []
halfs = []
for verp in verps:
half = int(np.max(verp[0]) / 2)
halfs.append(half)
sliceLine = verp[1][:,(half-1):(half+1)]
contours, hierarchy = cv2.findContours(cv2.cvtColor( sliceLine,
cv2.COLOR_BGR2GRAY),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cons_and_hiers.append([contours, hierarchy])
# +
global_heights = []
for ch in cons_and_hiers:
local_heights = []
for cnt in ch[0]:
x,y,w,h = cv2.boundingRect(cnt)
local_heights.append(h)
global_heights.append(local_heights)
#print(global_heights)
# +
median_heights = []
for heights in global_heights:
median_height = (np.median(np.asarray(heights)) * 1.5).astype(int)
print("medianHeight", median_height)
#median_heights.append(median_height)
# -
cnt = 0
for drawed_verp in verps:
drawed_verp[1] = cv2.line(drawed_verp[1],
(halfs[cnt],0),
(halfs[cnt],drawed_verp[1].shape[0]),
(0,0,255),
1)
cnt+=1
#print(drawed_verp[1])
# Consolve peaks
# +
kernels = []
verps_convolved = []
drawed_verps_conv = []
for median_height in median_heights:
kernel = median_height
kernels.append(kernel)
cnt1 = 0
cnt2 = 0
for kernel in kernels:
cnt1+=1
for verp in verps:
cnt2+=1
if cnt1 == cnt2:
print(kernel)
verp_convolved = np.convolve(verp[0], np.ones((kernel,))/kernel, mode='same')
verps_convolved.append(verp_convolved)
else:
continue
cnt2=0
cnt1 = 0
cnt2 = 0
for img in imgs:
cnt1+=1
for verp_convolved in verps_convolved:
cnt2+=1
if cnt1 == cnt2:
drawed_verp_conv = tl.getDrawProjectionVer(img, verp_convolved)
drawed_verps_conv.append(drawed_verp_conv)
else:
continue
cnt2=0
# +
global_band_P1_ranges = []
global_peaks = []
c1 = 0.2
c2 = 0.4
args = [30,30,80,13,15,21]
args1 = [[0.2,0.4],
[0.2,0.4],
[0.2,0.4],
[0.2,0.4],
[0.2,0.4],
[0.2,0.4]
]
for i in range(len(imgs)):
band_P1_ranges = []
peaks = []
while np.max(verps_convolved[i]) > args[i]:
ybm = np.argmax(verps_convolved[i])
yb0 = tl.findb0(verps_convolved[i],
ybm,
args1[i][0] * verps_convolved[i][ybm])
yb1 = tl.findb1(verps_convolved[i],
ybm,
args1[i][1] * verps_convolved[i][ybm])
if yb1 - yb0 > median_heights[i]:
band_P1_ranges.append((yb0,yb1))
peaks.append((int(verps_convolved[i][ybm]), ybm))
verps_convolved[i][yb0:yb1] = 0
global_band_P1_ranges.append(band_P1_ranges)
global_peaks.append(peaks)
# -
# draw peaks
for i in range(len(imgs)):
for peak in global_peaks[i]:
cv2.circle(drawed_verps_conv[i], peak, 5, (255,0,0), -1)
# draw bands
bandsImgs = []
for i in range(len(imgs)):
bandsImg = np.zeros(vises[i].shape, dtype = np.uint8)
for band in global_band_P1_ranges[i]:
yt, yb = band
bandsImg[yt:yb] = [0,255,0]
horp = np.sum(true_bin_imgs[i][yt:yb], axis=0) / 255 #initially axis = 1
drawed_horp = tl.getDrawProjectionHor(imgs[i], horp)
kernel = np.ones((8,8),dtype = np.uint8)
drawed_horp = cv2.morphologyEx(drawed_horp,cv2.MORPH_CLOSE,kernel)
kernel = np.ones((2,2),dtype = np.uint8)
#drawed_horp = cv2.morphologyEx(drawed_horp,cv2.MORPH_OPEN,kernel)
start = 0
end = 0
for x in range(drawed_horp.shape[1]):
max_y = drawed_horp.shape[0]
max_y-= int(max_y/5)
max_ys =[int(max_y/50),
int(max_y/5),
int(max_y/5),
int(max_y/5)
,int(max_y/5),
int(max_y/5)]
limit = [
6,
30,
30,
30,
30,
30
]
cnt = 0
if start == 0:
if (drawed_horp[max_ys[i]][x][0] == 255 and drawed_horp[max_ys[i]][x][1] == 255 and drawed_horp[max_ys[i]][x][2] == 255):
start = x
else:
if (drawed_horp[max_ys[i]][x][0] == 0 and drawed_horp[max_ys[i]][x][1] == 0 and drawed_horp[max_ys[i]][x][2] == 0):
end = x
cnt +=1
if start != 0 and end != 0:
if(end - start <limit[i]):
end = 0
else:
y = bin_imgs[i].shape[0]
imgs[i][yt:yb,start-1:start+1] = 70
imgs[i][yt:yb,end-1:end+1] = 30
#imgs[i] = cv2.line(imgs[i] , (start,yt),(start,yb),(255,255,0),1)
#imgs[i] = cv2.line(imgs[i] , (end,yt),(end,yb),(0,0,255),1)
start = 0
end = 0
bigImg1 = tl.concat_ver((imgs[i][yt:yb],drawed_horp))
show_image(bigImg1)
bandsImgs.append(bandsImg)
""" for i in range(len(imgs)):
vises[i] = cv2.addWeighted(vises[i], 0.6, bandsImgs[i], 0.4, 0)
"""
for i in range(len(imgs)):
bigImg1 = tl.concat_hor((imgs[i], verps[i][1], drawed_verps_conv[i]))
fig, ax = plt.subplots(figsize=(50, 50))
ax.imshow(bigImg1, cmap='gray')
plt.show()
| Lab1/.ipynb_checkpoints/Task1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <font color='grey'>PROJECT SPECIFICATION</font>
# # Identify Fraud from Enron Email
# ### Quality of Code
# <div class="alert alert-block alert-success">
# <b>CRITERIA</b><br>MEETS SPECIFICATIONS
# </div>
#
#
# <div class="alert alert-block alert-info">
# <b>Functionality:</b><br> Code reflects the description in the answers to questions in the writeup. i.e. code performs the functions documented in the writeup and the writeup clearly specifies the final analysis strategy.
# </div>
#
# <div class="alert alert-block alert-info">
# <b>Usability:</b><br> poi_id.py can be run to export the dataset, list of features and algorithm, so that the final algorithm can be checked easily using tester.py.
# </div>
#
#
# ### Understanding the Dataset and Question
#
#
# <div class="alert alert-block alert-info">
# <b>Data Exploration (related lesson: "Datasets and Questions"):</b><br> Student response addresses the most important characteristics of the dataset and uses these characteristics to inform their analysis.
# </div>
#
# <div class="alert alert-block alert-info">
# <b>Important characteristics include:</b><br><ul><li> total number of data points
# <li>allocation across classes (POI/non-POI)
# <li>number of features used
# <li>are there features with many missing values? etc.
# </div>
#
#
#
# <div class="alert alert-block alert-info">
# <b>Outlier Investigation (related lesson: "Outliers"):</b><br> Student response identifies outlier(s) in the financial data, and explains how they are removed or otherwise handled.
# </div>
#
#
#
#
# ### Optimize Feature Selection/Engineering
# <div class="alert alert-block alert-info">
# <b>Create new features (related lesson: "Feature Selection"):</b><br> At least one new feature is implemented. Justification for that feature is provided in the written response. The effect of that feature on final algorithm performance is tested or its strength is compared to other features in feature selection. The student is not required to include their new feature in their final feature set.
# </div>
#
# <div class="alert alert-block alert-info">
# <b>Intelligently select features (related lesson: "Feature Selection"):</b><br> Univariate or recursive feature selection is deployed, or features are selected by hand (different combinations of features are attempted, and the performance is documented for each one). Features that are selected are reported and the number of features selected is justified. For an algorithm that supports getting the feature importances (e.g. decision tree) or feature scores (e.g. SelectKBest), those are documented as well.
# </div>
#
# <div class="alert alert-block alert-info">
# <b>Properly scale features (related lesson: "Feature Scaling"):</b><br> If algorithm calls for scaled features, feature scaling is deployed.
# </div>
#
# ### Pick and Tune an Algorithm
# <div class="alert alert-block alert-info">
# <b>Pick an algorithm (related lessons: "Naive Bayes" through "Choose Your Own Algorithm"):</b><br> At least two different algorithms are attempted and their performance is compared, with the best performing one used in the final analysis.
# </div>
#
#
# <div class="alert alert-block alert-info">
# <b>Discuss parameter tuning and its importance:</b><br> Response addresses what it means to perform parameter tuning and why it is important.
# </div>
#
# <div class="alert alert-block alert-info">
# <b>Tune the algorithm (related lesson: "Validation"):</b><br> At least one important parameter tuned with at least 3 settings investigated systematically, or any of the following are true:
# <ul>
# <li>GridSearchCV used for parameter tuning</li>
# <li>Several parameters tuned</li>
# <li>Parameter tuning incorporated into algorithm selection (i.e. parameters tuned for more than one algorithm, and best algorithm-tune combination selected for final analysis).</li>
# </ul>
#
# </div>
#
#
# ### Validate and Evaluate
#
# <div class="alert alert-block alert-info">
# <b>Usage of Evaluation Metrics (related lesson: "Evaluation Metrics"):</b><br> At least two appropriate metrics are used to evaluate algorithm performance (e.g. precision and recall), and the student articulates what those metrics measure in context of the project task.
# </div>
#
#
#
#
# <div class="alert alert-block alert-info">
# <b>Discuss validation and its importance.:</b><br> Response addresses what validation is and why it is important.
# </div>
#
#
# <div class="alert alert-block alert-info">
# <b>Validation Strategy (related lesson "Validation"):</b><br> Performance of the final algorithm selected is assessed by splitting the data into training and testing sets or through the use of cross validation, noting the specific type of validation performed.
# </div>
#
#
# <div class="alert alert-block alert-info">
# <b>Algorithm Performance:</b><br>When tester.py is used to evaluate performance, precision and recall are both at least 0.3
# </div>
#
#
#
#
#
#
#
#
| UDACITY PROJECT SPECIFICATION RUBRIC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## California wildfires 2017 - Thomas Fire analysis
# The Thomas Fire was a massive wildfire that started in early December 2017 in Ventura and Santa Barbara counties and grew into California's largest fire ever.
#
# 
import arcgis
from arcgis import *
from arcgis.mapping import MapImageLayer
gis = GIS("https://python.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
# ## Visualize the extent of damage
# +
from ipywidgets import *
postfire = MapImageLayer('https://tiles.arcgis.com/tiles/DO4gTjwJVIJ7O9Ca/arcgis/rest/services/Digital_Globe_Imagery_Dec_11th/MapServer')
def side_by_side(address):
location = geocode(address)[0]
satmap1 = gis.map(location)
satmap1.basemap = 'satellite'
satmap2 = gis.map(location)
satmap2.add_layer(postfire)
satmap1.layout=Layout(flex='1 1', padding='6px', height='450px')
satmap2.layout=Layout(flex='1 1', padding='6px', height='450px')
box = HBox([satmap1, satmap2])
return box
# -
# ### <NAME>, Ventura, CA
side_by_side('Montclair Dr, Ventura, CA')
# ### Vista Del Mar Hospital, Ventura, CA
side_by_side('801 Seneca St, Ventura, CA 93001')
# ## Remote Sensing and Image Processing
landsat_item = gis.content.search('title:Multispectral Landsat', 'Imagery Layer', outside_org=True)[0]
landsat = landsat_item.layers[0]
landsat_item
# + [markdown] heading_collapsed=true
# ### Select before and after rasters
# + hidden=true
aoi = {'spatialReference': {'latestWkid': 3857, 'wkid': 102100}, 'type': 'extent',
'xmax': -13305000, 'xmin': -13315000, 'ymax': 4106000, 'ymin': 4052000}
arcgis.env.analysis_extent = {"xmin":-13337766,"ymin":4061097,"xmax":-13224868,"ymax":4111469,
"spatialReference":{"wkid":102100,"latestWkid":3857}}
landsat.extent = aoi
# + hidden=true
import pandas as pd
from datetime import datetime
selected = landsat.filter_by(where="(Category = 1)",
time=[datetime(2017, 11, 15), datetime(2018, 1, 1)],
geometry=arcgis.geometry.filters.intersects(aoi))
df = selected.query(out_fields="AcquisitionDate, GroupName, CloudCover, DayOfYear",
order_by_fields="AcquisitionDate").sdf
df['AcquisitionDate'] = pd.to_datetime(df['AcquisitionDate'], unit='ms')
df.tail(5)
# + hidden=true
prefire = landsat.filter_by('OBJECTID=668630') # 2017-11-23
midfire = landsat.filter_by('OBJECTID=681950') # 2017-12-09
# -
# ## Visual Assessment
# +
from arcgis.raster.functions import *
apply(midfire, 'Natural Color with DRA')
# -
# ### Visualize Burn Scars
# Extract the [6, 4, 1] bands to improve visibility of fire and burn scars. This band combination pushes further into the SWIR range of the electromagnetic spectrum, where there is less susceptibility to smoke and haze generated by a burning fire.
extract_band(midfire, [6,4,1])
extract_band(prefire, [6,4,1])
# For comparison, the same area before the fire started shows no burn scar.
# ## Quantitative Assessment
# The **Normalized Burn Ratio (NBR)** can be used to delineate the burnt areas and identify the severity of the fire.
#
# The formula for the NBR is very similar to that of NDVI except that it uses near-infrared band 5 and the short-wave infrared band 7:
# \begin{align}
# {\mathbf{NBR}} = \frac{\mathbf{B_5} - \mathbf{B_7}}{\mathbf{B_5} + \mathbf{B_7}} \\
# \end{align}
#
# The NBR equation was designed to be calcualted from reflectance, but it can be calculated from radiance and digital_number_(dn) with changes to the burn severity table below.
#
# For a given area, NBR is calculated from an image just prior to the burn and a second NBR is calculated for an image immediately following the burn. Burn extent and severity is judged by taking the difference between these two index layers:
#
# \begin{align}
# {\Delta \mathbf{NBR}} = \mathbf{NBR_{prefire}} - \mathbf{NBR_{postfire}} \\
# \end{align}
#
# The meaning of the ∆NBR values can vary by scene, and interpretation in specific instances should always be based on some field assessment. However, the following table from the USGS FireMon program can be useful as a first approximation for interpreting the NBR difference:
#
#
# | \begin{align}{\Delta \mathbf{NBR}} \end{align} | Burn Severity |
# | ------------- |:-------------:|
# | 0.1 to 0.27 | Low severity burn |
# | 0.27 to 0.44 | Medium severity burn |
# | 0.44 to 0.66 | Moderate severity burn |
# | > 0.66 | High severity burn |
#
# [Source: http://wiki.landscapetoolbox.org/doku.php/remote_sensing_methods:normalized_burn_ratio]
# ### Use Band Arithmetic and Map Algebra
# +
nbr_prefire = band_arithmetic(prefire, "(b5 - b7) / (b5 + b7+1000)")
nbr_postfire = band_arithmetic(midfire, "(b5 - b7) / (b5 + b7+1000)")
nbr_diff = nbr_prefire - nbr_postfire
# -
burnt_areas = colormap(remap(nbr_diff,
input_ranges=[0.1, 0.27, # low severity
0.27, 0.44, # medium severity
0.44, 0.66, # moderate severity
0.66, 1.00], # high severity burn
output_values=[1, 2, 3, 4],
no_data_ranges=[-1, 0.1], astype='u8'),
colormap=[[4, 0xFF, 0xC3, 0], [3, 0xFA, 0x8E, 0], [2, 0xF2, 0x55, 0], [1, 0xE6, 0, 0]])
burnt_areas.draw_graph()
# + [markdown] heading_collapsed=true
# ### Area calculation
# + hidden=true
ext = {"xmax": -13246079.10806628, "ymin": 4035733.9433013694, "xmin": -13438700.419344831, "ymax": 4158033.188557592,
"spatialReference": {"wkid": 102100, "latestWkid": 3857}, "type": "extent"}
pixx = (ext['xmax'] - ext['xmin']) / 1200.0
pixy = (ext['ymax'] - ext['ymin']) / 450.0
res = burnt_areas.compute_histograms(ext, pixel_size={'x':pixx, 'y':pixy})
numpix = 0
histogram = res['histograms'][0]['counts'][1:]
for i in histogram:
numpix += i
# -
# ### Report burnt area
# +
from IPython.display import HTML
sqmarea = numpix * pixx * pixy # in sq. m
acres = 0.00024711 * sqmarea # in acres
HTML('<h3>Thomas fire has consumed <i>{:,} acres</i> till {}</h3>.'.format(int(acres), df.iloc[-1]['AcquisitionDate'].date()))
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.title('Distribution by severity', y=-0.1)
plt.pie(histogram, labels=['Low Severity', 'Medium Severity', 'Moderate Severity', 'High Severity']);
plt.axis('equal');
# -
# ### Visualize burnt areas
m = gis.map('Carpinteria, CA')
m
m.add_layer([midfire, burnt_areas])
# ## Raster to Feature layer conversion
# Use Raster Analytics and Geoanalytics to convert the burnt area raster to a feature layer. The `to_features()` method converts the raster to a feature layer and `create_buffers()` fills holes in the features and dissolves them to output one feature that covers the extent of the Thomas Fire.
# +
from arcgis.geoanalytics.use_proximity import create_buffers
fire_item = burnt_areas.to_features(output_name='ThomasFire_Boundary')
fire_layer = fire_item.layers[0]
fire_layer.filter = 'st_area_sh > 3000000'
fire = create_buffers(lyr, 100, 'Meters', dissolve_option='All', multipart=True, output_name='ThomasFire')
# -
fire = gis.content.search('Thomas_Fire', 'Feature Layer')[0]
fire
vectormap = gis.map('Carpinteria, CA')
vectormap.basemap = 'dark-gray'
vectormap.add_layer(fire)
vectormap
# ## Impact Assessment
# + [markdown] heading_collapsed=true
# ### Compute infrastructure and human impact
# + hidden=true
from arcgis.geoenrichment import enrich
from arcgis.features import SpatialDataFrame
sdf = SpatialDataFrame.from_layer(fire.layers[0])
fire_geometry = sdf.iloc[0].SHAPE
sa_filter = geometry.filters.intersects(geometry=fire_geometry, sr=4326)
secondary_roads_layer = FeatureLayer("https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/Transportation_LargeScale/MapServer/1")
secondary_roads = secondary_roads_layer.query(geometry_filter=sa_filter, out_sr=4326)
local_roads_layer = FeatureLayer("https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/Transportation_LargeScale/MapServer/2")
local_roads = local_roads_layer.query(geometry_filter=sa_filter, out_sr=4326)
def age_pyramid(df):
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
warnings.simplefilter(action='ignore', category=FutureWarning)
pd.options.mode.chained_assignment = None
plt.style.use('ggplot')
df = df[[x for x in impacted_people.columns if 'MALE' in x or 'FEM' in x]]
sf = pd.DataFrame(df.sum())
sf['age'] = sf.index.str.extract('(\d+)').astype('int64')
f = sf[sf.index.str.startswith('FEM')]
m = sf[sf.index.str.startswith('MALE')]
f = f.sort_values(by='age', ascending=False).set_index('age')
m = m.sort_values(by='age', ascending=False).set_index('age')
popdf = pd.concat([f, m], axis=1)
popdf.columns = ['F', 'M']
popdf['agelabel'] = popdf.index.map(str) + ' - ' + (popdf.index+4).map(str)
popdf.M = -popdf.M
sns.barplot(x="F", y="agelabel", color="#CC6699", label="Female", data=popdf, edgecolor='none')
sns.barplot(x="M", y="agelabel", color="#008AB8", label="Male", data=popdf, edgecolor='none')
plt.ylabel('Age group')
plt.xlabel('Number of people');
return plt;
# -
# ### Visualize affected roads on map
# +
impactmap = gis.map('Carpinteria, CA')
impactmap.basemap = 'streets'
impactmap
# -
impactmap.draw([local_roads, secondary_roads])
# ### Age Pyramid of affected population
impacted_people = enrich(sdf, 'Age')
age_pyramid(impacted_people);
# 
| samples/04_gis_analysts_data_scientists/california_wildfires_2017_thomas_fire_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Train a ready to use TensorFlow model with a simple pipeline
# +
import os
import sys
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import matplotlib.pyplot as plt
# the following line is not required if BatchFlow is installed as a python package.
sys.path.append("../..")
from batchflow import Pipeline, B, C, D, F, V
from batchflow.opensets import MNIST, CIFAR10, CIFAR100
from batchflow.models.tf import ResNet18
# -
# BATCH_SIZE might be increased for modern GPUs with lots of memory (4GB and higher).
BATCH_SIZE = 64
# # Create a dataset
# [MNIST](http://yann.lecun.com/exdb/mnist/) is a dataset of handwritten digits frequently used as a baseline for machine learning tasks.
#
# Downloading MNIST database might take a few minutes to complete.
dataset = MNIST(bar=True)
# There are also predefined CIFAR10 and CIFAR100 datasets.
# # Define a pipeline config
# Config allows to create flexible pipelines which take parameters.
#
# For instance, if you put a model type into config, you can run a pipeline against different models.
#
# See [a list of available models](https://analysiscenter.github.io/batchflow/intro/tf_models.html#ready-to-use-models) to choose the one which fits you best.
config = dict(model=ResNet18)
# # Create a template pipeline
# A template pipeline is not linked to any dataset. It's just an abstract sequence of actions, so it cannot be executed, but it serves as a convenient building block.
train_template = (Pipeline()
.init_variable('loss_history', [])
.init_model('dynamic', C('model'), 'conv_nn',
config={'inputs/images/shape': B.image_shape,
'inputs/labels/classes': D.num_classes,
'initial_block/inputs': 'images'})
.to_array()
.train_model('conv_nn', fetches='loss', images=B.images, labels=B.labels,
save_to=V('loss_history', mode='a'))
)
# # Train the model
# Apply a dataset and a config to a template pipeline to create a runnable pipeline:
train_pipeline = (train_template << dataset.train) << config
# Run the pipeline (it might take from a few minutes to a few hours depending on your hardware)
train_pipeline.run(BATCH_SIZE, shuffle=True, n_epochs=1, drop_last=True, bar=True, prefetch=1)
# Note that the progress bar often increments by 2 at a time - that's prefetch in action.
# It does not give much here, though, since almost all time is spent in model training which is performed under a thread-lock one batch after another without any parallelism (otherwise the model would not learn anything as different batches would rewrite one another's model weights updates).
plt.figure(figsize=(15, 5))
plt.plot(train_pipeline.v('loss_history'))
plt.xlabel("Iterations"), plt.ylabel("Loss")
plt.show()
# # Test the model
# It is much faster than training, but if you don't have GPU it would take some patience.
test_pipeline = (dataset.test.p
.import_model('conv_nn', train_pipeline)
.init_variable('predictions')
.init_variable('metrics')
.to_array()
.predict_model('conv_nn', fetches='predictions', images=B.images, save_to=V('predictions'))
.gather_metrics('class', targets=B.labels, predictions=V('predictions'),
fmt='logits', axis=-1, save_to=V('metrics', mode='a'))
.run(BATCH_SIZE, shuffle=True, n_epochs=1, drop_last=False, bar=True)
)
# Let's get the accumulated [metrics information](https://analysiscenter.github.io/batchflow/intro/models.html#model-metrics)
metrics = test_pipeline.get_variable('metrics')
# Or a shorter version: `metrics = test_pipeline.v('metrics')`
# Now we can easiliy calculate any metrics we need
metrics.evaluate('accuracy')
metrics.evaluate(['false_positive_rate', 'false_negative_rate'], multiclass=None)
# # Save the model
# After learning the model, you may need to save it. It's easy to do this.
train_pipeline.save_model_now('conv_nn', path='path/to/save')
# ## What's next?
# See [the image augmentation tutorial](./06_image_augmentation.ipynb) or return to the [table of contents](./00_description.ipynb).
| examples/tutorials/03_ready_to_use_model_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# <h3> ABSTRACT </h3>
# All CMEMS in situ data products can be found and downloaded after [registration](http://marine.copernicus.eu/services-portfolio/register-now/) via [CMEMS catalogue](http://marine.copernicus.eu/services-portfolio/access-to-products/).
#
# Such channel is advisable just for sporadic netCDF donwloading because when operational, interaction with the web user interface is not practical. In this context though, the use of scripts for ftp file transference is is a much more advisable approach.
#
# As long as every line of such files contains information about the netCDFs contained within the different directories [see at tips why](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md), it is posible for users to loop over its lines to download only those that matches a number of specifications such as spatial coverage, time coverage, provider, data_mode, parameters or file_name related (region, data type, TS or PF, platform code, or/and platform category, timestamp).
# <h3>PREREQUISITES</h3>
# - [credentias](http://marine.copernicus.eu/services-portfolio/register-now/)
# - aimed [in situ product name](http://cmems-resources.cls.fr/documents/PUM/CMEMS-INS-PUM-013.pdf)
# - aimed [hosting distribution unit](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md)
# - aimed [index file](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/tips/README.md)
#
# i.e:
user = '' #type CMEMS user name within colons
password = ''#type CMEMS password within colons
product_name = 'INSITU_BAL_NRT_OBSERVATIONS_013_032' #type aimed CMEMS in situ product
distribution_unit = 'cmems.smhi.se' #type aimed hosting institution
index_file = 'index_latest.txt' #type aimed index file name
# <h3>DOWNLOAD</h3>
# 1. Index file download
import ftplib
ftp=ftplib.FTP(distribution_unit,user,password)
ftp.cwd("Core")
ftp.cwd(product_name)
remote_filename= index_file
local_filename = remote_filename
local_file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + remote_filename, local_file.write)
local_file.close()
ftp.quit()
#ready when 221 Goodbye.!
# <h3>QUICK VIEW</h3>
# Reading a random line of the index file to know more about the information it contains.
import numpy as np
import pandas as pd
from random import randint
index = np.genfromtxt(index_file, skip_header=6, unpack=False, delimiter=',', dtype=None,
names=['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters'])
dataset = randint(0,len(index)) #ramdom line of the index file
values = [index[dataset]['catalog_id'], '<a href='+index[dataset]['file_name']+'>'+index[dataset]['file_name']+'</a>', index[dataset]['geospatial_lat_min'], index[dataset]['geospatial_lat_max'],
index[dataset]['geospatial_lon_min'], index[dataset]['geospatial_lon_max'], index[dataset]['time_coverage_start'],
index[dataset]['time_coverage_end'], index[dataset]['provider'], index[dataset]['date_update'], index[dataset]['data_mode'],
index[dataset]['parameters']]
headers = ['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters']
df = pd.DataFrame(values, index=headers, columns=[dataset])
df.style
# <h3>FILTERING CRITERIA</h3>
# Regarding the above glimpse, it is posible to filter by 12 criteria. As example we will setup next a filter to only download those files that contains an [data_type](http://www.socib.es/users/protllan/CMEMS/marineWeek/reveal.js-master/coding/#/treeSection).
# 1. Aimed data_type
variable = 'MO'
# 2. netCDF filtering/selection
# Remember that depending on the folder we are, the data_type tag in the file_name is at an specific place: for history directory is at position 2 and for monthly and latest directory it would be at position 3. See more at [naming convention](https://github.com/CopernicusMarineInsitu/INSTACTraining/blob/master/images/naming.png) or [PUM](http://archimer.ifremer.fr/doc/00324/43494/)
#read file lines (iterate over them)
selected_netCDFs = [];
for netCDF in index:
file_name = netCDF['file_name'].decode('utf-8')
last_idx_slash = file_name.rfind('/')
ncdf_file_name = file_name[last_idx_slash+1:]
#set up a selection criteria: i.e specific data_type
#history: position 2
#monthly and latest: position 3
position = 3
data_type = ncdf_file_name.split('_')[position] #index_latest (above)
aimed_data_type = 'MO'#choose a data_type: i.e mooring
if data_type == aimed_data_type :
selected_netCDFs.append(file_name)
print("total: " +str(len(selected_netCDFs)))
# <h3> SELECTION DOWNLOAD </h3>
for nc in selected_netCDFs:
last_idx_slash = file_name.rfind('/')
ncdf_file_name = file_name[last_idx_slash+1:]
folders = file_name.split('/')[3:len(file_name.split('/'))-1]
host = file_name.split('/')[2] #distribution unit
ftp=ftplib.FTP(host,user,password)
for folder in folders:
ftp.cwd(folder)
local_file = open(ncdf_file_name, 'wb')
ftp.retrbinary('RETR '+ncdf_file_name, local_file.write)
local_file.close()
ftp.quit()
| PythonNotebooks/indexFileNavigation/index_file_navigation_by_datatype.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Phb6EtsJkikD" colab_type="code" colab={}
import pandas as pd
# + id="luBbG6HmnFs_" colab_type="code" colab={}
data = pd.read_csv("lstm400.8.csv",engine ='python')
# + id="n7MKJBCInP9S" colab_type="code" outputId="4e60b998-4a8c-4409-d8fb-419bb71dfbca" colab={"base_uri": "https://localhost:8080/", "height": 244}
data.head()
# + [markdown] id="yX1UNlpEQc-O" colab_type="text"
# 資料切割
# + id="Ah7L5eGYQb2j" colab_type="code" colab={}
from keras.layers import Dense, Dropout, LSTM, Embedding,MaxPooling1D,ConvLSTM2D,RNN
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
input_file = "lstm400.10.csv"
def load_data(test_split = 0.2):
print ('Loading data...')
df = pd.read_csv(input_file)
X = df.iloc[:,:400].values
y = df.iloc[:,-1].values
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(X)
#X = preprocessing.scale(X)
print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split)
return pad_sequences(X_train), y_train, pad_sequences(X_test), y_test
# + id="MYdSfk5yn3wM" colab_type="code" outputId="f440596f-dcc6-4565-aff5-b342ff9ada76" colab={"base_uri": "https://localhost:8080/", "height": 1000}
def create_model(input_length):
print ('Creating model...')
model = Sequential()
model.add(Embedding(input_dim = 188, output_dim = 50, input_length = input_length))
model.add(LSTM(output_dim=256, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
model.add(LSTM(output_dim=256, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
print ('Compiling...')
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
X_train, y_train, X_test, y_test = load_data()
model = create_model(len(X_train[0]))
print ('Fitting model...')
hist = model.fit(X_train, y_train, batch_size=1, nb_epoch=20, validation_split = 0.1, verbose = 1)
score, acc = model.evaluate(X_test, y_test, batch_size=1)
print('Test score:', score)
print('Test accuracy:', acc)
# + id="SecFMBWSsqJQ" colab_type="code" outputId="1a34d820-3b09-4a7d-89d3-5762c655bde0" colab={"base_uri": "https://localhost:8080/", "height": 168}
X_train, y_train, X_test, y_test = load_data()
import numpy as np
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
y_train = np.reshape(y_train, (y_train.shape[0], 1))
y_test = np.reshape(y_test, (y_test.shape[0], 1))
print(X_train.shape,X_test.shape)
# + id="m3X3sLt0_uYK" colab_type="code" outputId="392cc774-754a-4def-b693-3b8749a18935" colab={"base_uri": "https://localhost:8080/", "height": 205}
from __future__ import print_function
import numpy as np
from keras.callbacks import Callback
from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from numpy.random import choice
USE_SEQUENCES = False
USE_STATELESS_MODEL = False
# you can all the four possible combinations
# USE_SEQUENCES and USE_STATELESS_MODEL
max_len = 400
batch_size = 1
class ResetStatesCallback(Callback):
def __init__(self):
self.counter = 0
def on_batch_begin(self, batch, logs={}):
if self.counter % max_len == 0:
self.model.reset_states()
self.counter += 1
print('sequences_x_train shape:', X_train.shape)
print('sequences_y_train shape:', y_train.shape)
print('sequences_x_test shape:', X_test.shape)
print('sequences_y_test shape:', y_test.shape)
if USE_STATELESS_MODEL:
print('Build STATELESS model...')
model = Sequential()
model.add(LSTM(10, input_shape=(max_len, 1), return_sequences=True))
model.add(LSTM(10, input_shape=(max_len, 1), return_sequences=False))
model.add(Dense(1, optimizer='rmsprop'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
validation_data=(X_test, y_test), shuffle=False, callbacks=[ResetStatesCallback()])
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
print('___________________________________')
print('Test score:', score)
print('Test accuracy:', acc)
else:
# STATEFUL MODEL
print('Build STATEFUL model...')
model = Sequential()
model.add(LSTM(10,batch_input_shape=(1, 1, 1), return_sequences=False,stateful=True))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
x = np.expand_dims(np.expand_dims(X_train.flatten(), axis=1), axis=1)
y = np.expand_dims(np.array([[v] * max_len for v in y_train.flatten()]).flatten(), axis=1)
model.fit(x,
y,
callbacks=[ResetStatesCallback()],
batch_size=1,
shuffle=False)
print('Train...')
for epoch in range(15):
mean_tr_acc = []
mean_tr_loss = []
for i in range(len(X_train)):
y_true = y_train[i]
for j in range(max_len):
tr_loss, tr_acc = model.train_on_batch(np.expand_dims(np.expand_dims(X_train[i][j], axis=1), axis=1),
np.array([y_true]))
mean_tr_acc.append(tr_acc)
mean_tr_loss.append(tr_loss)
model.reset_states()
print('accuracy training = {}'.format(np.mean(mean_tr_acc)))
print('loss training = {}'.format(np.mean(mean_tr_loss)))
print('___________________________________')
mean_te_acc = []
mean_te_loss = []
for i in range(len(X_test)):
for j in range(max_len):
te_loss, te_acc = model.test_on_batch(np.expand_dims(np.expand_dims(X_test[i][j], axis=1), axis=1),
y_test[i])
mean_te_acc.append(te_acc)
mean_te_loss.append(te_loss)
model.reset_states()
for j in range(max_len):
y_pred = model.predict_on_batch(np.expand_dims(np.expand_dims(X_test[i][j], axis=1), axis=1))
model.reset_states()
print('accuracy testing = {}'.format(np.mean(mean_te_acc)))
print('loss testing = {}'.format(np.mean(mean_te_loss)))
print('___________________________________')
# + id="x71zbJbuAMu8" colab_type="code" colab={}
| new_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **How to run this notebook (command-line)?**
# 1. Install the `ReinventCommunity` environment:
# `conda env create -f environment.yml`
# 2. Activate the environment:
# `conda activate ReinventCommunity`
# 3. Execute `jupyter`:
# `jupyter notebook`
# 4. Copy the link to a browser
#
#
# # `REINVENT 2.0`: sampling mode demo
# The *reinforcement learning* mode can be used to train an agent to find molecules that maximize a user-defined score (typically comprised by a number of score components - check out the respective notebooks). While each iteration of the agent will output a number of molecules (batch), it might be necessary to generate more afterwards, e.g. if more ideas are required to push a project forward. This can be achieved with the *sampling* running mode of `REINVENT 2.0`, which takes a (trained) agent and generates more ideas which is illustrated by this notebook.
#
# To proceed, please update the following code block such that it reflects your system's installation and execute it.
# +
# load dependencies
import os
import re
import json
import tempfile
# --------- change these path variables as required
reinvent_dir = os.path.expanduser("~/Desktop/Projects/Publications/2020/2020-04_REINVENT_2.0/Reinvent")
reinvent_env = os.path.expanduser("~/miniconda3/envs/reinvent_shared.v2.1")
output_dir = os.path.expanduser("~/Desktop/REINVENT_sampling_demo")
# --------- do not change
# get the notebook's root path
try: ipynb_path
except NameError: ipynb_path = os.getcwd()
# if required, generate a folder to store the results
try:
os.mkdir(output_dir)
except FileExistsError:
pass
# -
# ## Setting up the configuration
# `REINVENT` has an entry point that loads a specified `JSON` file on startup. `JSON` is a low-level data format that allows to specify a fairly large number of parameters in a cascading fashion very quickly. The parameters are structured into *blocks* which can in turn contain blocks or simple values, such as *True* or *False*, strings and numbers. In this tutorial, we will go through the different blocks step-by-step, explaining their purpose and potential values for given parameters. Note, that while we will write out the configuration as a `JSON` file in the end, in `python` we handle the same information as a simple `dict`.
# initialize the dictionary
configuration = {
"version": 2, # we are going to use REINVENT's newest release
"run_type": "sampling" # other run types: "scoring", "validation",
# "transfer_learning",
# "reinforcement_learning" and
# "create_model"
}
# add block to specify whether to run locally or not and
# where to store the results and logging
configuration["logging"] = {
"sender": "http://127.0.0.1", # only relevant if "recipient" is set to "remote"
"recipient": "local", # either to local logging or use a remote REST-interface
"logging_path": os.path.join(output_dir, "progress.log"), # where the run's output is stored
"job_name": "Scoring mode demo", # set an arbitrary job name for identification
"job_id": "demo" # only relevant if "recipient" is set to "remote"
}
# We will need to specify a path to an agent (parameter `model_path`), which can be a prior or trained agent. For the purpose of this notebook, we will use a prior shipped with the `REINVENT 2.0` repository.
# +
# set up the file path, where the generated molecules are
# deposited (as SMILES)
output_SMILES_path = os.path.join(output_dir, "sampled", "sampled.smi")
# add the "parameters" block
configuration["parameters"] = {
"model_path": os.path.join(reinvent_dir, # path to prior or trained agent
"data",
"augmented.prior"),
"output_smiles_path": output_SMILES_path, # output path where SMILES will be deposited
"num_smiles": 1024, # specifies, how many molecules are to be sampled
"batch_size": 128, # internal batch size; should match the one used
# during training
"with_likelihood": False # also provide the log-likelihood
}
# -
# We do not need to add any scoring function definition (in contrast to the e.g. *reinforcement learning* or *scoring* running modes). The reason is that the agent has (hopefully) "learned" to find resonable molecules (defined by the scoring function) and stores that knowledge internally in its weights.
#
# We now have successfully filled the dictionary and will write it out as a `JSON` file in the output directory. Please have a look at the file before proceeding in order to see how the paths have been inserted where required and the `dict` -> `JSON` translations (e.g. `True` to `true`) have taken place.
# write the configuration file to the disc
configuration_JSON_path = os.path.join(output_dir, "sampling_config.json")
with open(configuration_JSON_path, 'w') as f:
json.dump(configuration, f, indent=4, sort_keys=True)
# ## Run `REINVENT`
# Now it is time to execute `REINVENT` locally. As we will not update any weights, execution should be very fast. The result will be a `SMILES` file in the `sampled` directory specified. If you want to generate the scores for these, you can make use of the *scoring mode* (see respective notebook for more details).
#
# The command-line execution looks like this:
# ```
# # activate envionment
# conda activate reinvent_shared.v2.1
#
# # execute REINVENT
# python <your_path>/input.py <config>.json
# ```
# +
# %%capture captured_err_stream --no-stderr
# execute REINVENT from the command-line
# !python {reinvent_dir}/input.py {configuration_JSON_path}
# -
# print the output to a file, just to have it for documentation
with open(os.path.join(output_dir, "run.err"), 'w') as file:
file.write(captured_err_stream.stdout)
# print the resulting SMILES file
# !head -n 15 {output_dir}/sampled/sampled.smi
| notebooks/Sampling_Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Estatística Descritiva com apoio do *Python*
# ## As bibliotecas *numpy* e *pandas*
# + [markdown] slideshow={"slide_type": "slide"}
# * Vamos apresentar vários métodos estatísticos desenvolvidos para *Series* e *DataFrames*.
# + slideshow={"slide_type": "-"}
import numpy as np
import pandas as pd
# + [markdown] slideshow={"slide_type": "slide"}
# ## Distribuição de Frequência
#
# * Uma distribuição de frequência é uma tabela que contém um resumo dos dados obtido em uma amostra.
#
# * A distribuição é organizada em formato de tabela, e cada entrada da tabela contém a frequência dos dados em um determinado intervalo, ou em um grupo.
# + [markdown] slideshow={"slide_type": "slide"}
# * Abaixo vemos um exemplo simplificado de tabela de distribuição de frequência:
#
# | Alturas em metros | Número dos Alunos |
# | :-------------: |:-------------:|
# |1,50 $|\!-$ 1,60 | 5 |
# | 1,60 $|\!-$ 1,70 | 15 |
# | 1,70 $|\!-$ 1,80 | 17 |
# | 1,80 $|\!-$ 1,90 | 3 |
# | Total | 40 |
# + [markdown] slideshow={"slide_type": "slide"}
# ### Construção de uma distribuição de frequência
#
# * Para ilustrar como se constrói uma distribuição de frequência, nós vamos considerar um exemplo específico.
#
# * Assim, suponha que uma pesquisa foi feita, e o seguinte conjunto de dados foi obtido:
#
# * **Dados Brutos**: 24-23-22-28-35-21-23-33-34-24-21-25-36-26-22-30-32-25-26-33-34-21-31-25-31-26-25-35-33-31.
#
#
# -
dados = [24,23,22,28,35,21,23,33,34,24,21,25,36,26,22,30,32,25,26,33,34,21,31,25,31,26,25,35,33,31]
# + [markdown] slideshow={"slide_type": "slide"}
# #### Rol de dados
#
# * A primeira coisa que fazemos é ordenar os dados do menor para o maior, formando o *rol de dados*:
#
# * **Rol de dados**: 21-21-21-22-22-23-23-24-25-25-25-25-26-26-26-28-30-31-31-31-32-33-33-33-34-34-34-35-35-36.
# -
np.sort(dados)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Amplitude Total
#
# * Em seguida, calculamos a *amplitude total*, ou seja, o maior valor obtido na amostra subtraído do menor
# valor obtido na amostra:
#
# * **Amplitude Total**: R = 36-21 = 15.
# -
R = np.max(dados) - np.min(dados); R
# + [markdown] slideshow={"slide_type": "slide"}
# #### Tamanho Amostral
#
# * Vamos calcular, agora, o tamanho amostral, ou seja, o número de observações obtidas na amostra.
#
# * **Tamanho Amostral**: n = 30.
#
# -
n = len(dados); n
# * Para Series e DataFrames o método **count()** retorna a total de valores.
n = pd.Series(dados).count(); n
# + [markdown] slideshow={"slide_type": "slide"}
# #### Número de Classes
#
# * Queremos, agora, dividir a amostra em uma quantidade de grupos que formarão os intervalos. Cada grupo é chamado de *classe*,
# assim, queremos definir o *número de classes* a ser considerado na tabela de *distribuição de frequência*:
#
# * **Número de Classes**: K.
#
# * K=5 para $n\leq 25$ e $K\approx \sqrt{n}$, para $n>25$.
#
# * Fórmula de Sturges $K\approx 1+3,22\log n$.
#
# * Logo, pela primeira regra temos $K=\sqrt{30}\approx 5,48 \approx 6$, e pela segunda regra
# $K\approx 1+3,22\log 30\approx 5,75 \approx 6.$ Desta forma, em ambos os casos temos $K=6$, que será o valor considerado.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Número de classes padrão:
# + slideshow={"slide_type": "-"}
if n<25:
K = 5
else:
K = np.ceil(np.sqrt(n))
K
# + [markdown] slideshow={"slide_type": "-"}
# Número de classes fórmula de Sturges:
# -
KFS = np.ceil( 1 + 3.22*np.log10(n)); KFS
# + [markdown] slideshow={"slide_type": "slide"}
# #### Amplitude das Classes
#
# * O próximo passo é saber o comprimento de cada intervalo a ser considerado, ou seja, calcular a amplitude de cada classe. Queremos que todas as classes tenham a mesma amplitude e portanto, temos:
#
# * **Amplitude das Classes**: $h=\frac{R}{K}=\frac{15}{6}=2,5\approx 3$.
# -
h = np.ceil(R/K); h
# + [markdown] slideshow={"slide_type": "slide"}
# #### Limites das Classes
# * Vamos agora definir os *limites das classes*. Para tanto, começamos com o menor valor obtido da amostra, ou equivalentemente, o primeiro valor do *rol de dados*, e vamos somando a amplitude para definir cada limite de intervalo:
#
# | Classes |
# | :-------------:
# | 21 $|\!-$ 24 |
# | 24 $|\!-$ 27 |
# | 27 $|\!-$ 30 |
# | 30 $|\!-$ 33 |
# | 33 $|\!-$ 36 |
# | 36 $|\!-$ 39 |
# -
bins = [np.min(dados) + i*h.astype('int') for i in range(K.astype('int')+1)]; bins
# + [markdown] slideshow={"slide_type": "slide"}
# #### Frequência dos Dados
#
# * Agora, calculamos as frequências dos dados em cada intervalo e, chamada também de *frequência absoluta*. E finalmente montamos a tabela de *Distribuição de Frequência*.
#
# | Classes | Frequência |
# | :-------------:| :-------------:|
# | 21 $|\!-$ 24 | 7 |
# | 24 $|\!-$ 27 | 9 |
# | 27 $|\!-$ 30 | 1 |
# | 30 $|\!-$ 33 | 5 |
# | 33 $|\!-$ 36 | 7 |
# | 36 $|\!-$ 39 | 1 |
# + [markdown] slideshow={"slide_type": "slide"}
# No *pandas*, a função **cut** cria classes a partir dos dados e o método **value_counts()** cria uma tabela de frequências. Combinando os dois obtemos uma *Distribuição de Frequência*.
# -
pd.cut(dados, bins=bins, right=False).value_counts()
# + slideshow={"slide_type": "slide"}
def n_classes(dados: pd.Series, tipo='Padrão'):
n_obs = len(dados)
if tipo=='Padrão':
return 5 if n_obs<25 else np.ceil(np.sqrt(n_obs)).astype(int)
if tipo=='Sturges':
return (1 + np.ceil(np.log2(n_obs))).astype(int)
def amplitude_classes(dados: pd.Series, tipo='Padrão', arredondar=True):
amplitude = np.ceil((dados.max() - dados.min())/n_classes(dados, tipo=tipo)) \
if arredondar else (dados.max() - dados.min())/n_classes(dados, tipo=tipo)
return amplitude
def construir_tabela(dados, tipo='Padrão', direita=False, arredondar=True):
dados_series = pd.Series(dados)
n_class = n_classes(dados_series, tipo=tipo)
amp_class = amplitude_classes(dados_series, tipo=tipo, arredondar=arredondar)
bins = [dados_series.min() + i*amp_class for i in range(n_class+1)]
return pd.cut(dados_series, bins=bins, right=direita).value_counts(sort=False).rename('Frequência')
def formatar_intervalos(intervalos, prec, direita=False):
fechado = 'right' if direita else 'left'
return [pd.Interval(left=np.round(intervalo.left,prec),
right=np.round(intervalo.right,prec), closed=fechado) for intervalo in intervalos]
# + slideshow={"slide_type": "slide"}
def dist_freq(dados, tipo='Padrão', prec=2, direita=False, arredondar=True, exibir_total=True):
df_dist_freq = pd.DataFrame(construir_tabela(dados, tipo=tipo, direita=direita, arredondar=arredondar))
df_dist_freq.index = formatar_intervalos(df_dist_freq.index.array, prec, direita=direita)
df_dist_freq.index = df_dist_freq.index.rename('Classes')
if exibir_total:
total_dist = pd.DataFrame({'Frequência':df_dist_freq['Frequência'].sum()}, index=['Total'])
total_dist.index = total_dist.index.rename('Classes')
df_dist_freq = pd.concat([df_dist_freq, total_dist])
return df_dist_freq.query('Frequência>0')
# + slideshow={"slide_type": "slide"}
dist_freq(dados)
# + slideshow={"slide_type": "slide"}
z=np.random.normal(0,1,200)
dist_freq(z)
# -
np.min(z)
np.max(z)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Medidas de Posição
#
# * As medidas de posição são valores que representam a tendência de concentração dos dados observados.
#
# * As mais importantes são as _medidas de tendência central_.
#
# * As três medidas de tendência central mais utilizadas são: *Média*, *Moda* e *Mediana*.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Média
#
# * É um valor que representa uma característica do conjunto de dados. Essa característica é tal que a soma dos dados é preservada. A média é obtida a partir de todos os elementos da distribuição e do tamanho da amostra.
#
# * *Notação*: representamos a média de um conjunto de dados por $\overline{X}$.
#
# * Calculamos a média aritmética pela fórmula:
# $$
# \overline{X}=\sum_{i=1}^{n}\frac{X_i}{n}.
# $$
#
# + [markdown] slideshow={"slide_type": "slide"}
#
# * Para Series e DataFrames o método **mean()** retorna a média dos valores.
# -
pd.Series(dados).mean()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Média para dados agrupados em intervalos
#
# * No caso em que temos os dados agrupados em intervalos, utilizamos a frequência e o ponto médio de cada classes para calcular a média pela fórmula:
# $$
# \overline{X}=\sum_{i=1}^{K}\frac{F_i\cdot pm_i}{n},
# $$
# onde $K$ é o número de classes, $F_i$ é a frequência da $i$-ésima classe e $pm_i$ é o ponto médio da $i$-ésima classe.
# + slideshow={"slide_type": "slide"}
dist_freq_pm = dist_freq(dados, exibir_total=False)
intervalos = dist_freq_pm.index.array
dist_freq_pm['Ponto Médio'] = [(intervalo.left+intervalo.right)/2 for intervalo in intervalos]
dist_freq_pm
# -
media = (dist_freq_pm['Frequência']*dist_freq_pm['Ponto Médio']).sum()/dist_freq_pm['Frequência'].sum()
media
# + slideshow={"slide_type": "slide"}
def media_dist_freq(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
intervalos = d_freq.index.array
d_freq['Ponto Médio'] = [(intervalo.left+intervalo.right)/2 for intervalo in intervalos]
return (d_freq['Frequência']*d_freq['Ponto Médio']).sum()/d_freq['Frequência'].sum()
# -
media_dist_freq(dist_freq(dados))
media_dist_freq(dist_freq(z))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Moda
#
# * Definimos a moda de um conjunto de dados como o valor mais frequente deste conjunto.
#
# * *Notação*: representamos a moda de um conjunto de dados por $Mo$.
#
# * *Exemplo*:
#
# * 1, 2, 4, 5 e 8 - não existe valor mais frequente - não existe moda (Amodal).
# * 2, 2, 3, 7 e 8 - $Mo$ = 2 (Unimodal).
# * 1, 1, 10, 5, 5, 8, 7, 2 - $Mo$ = 1 e 5 (Bimodal).
#
# * Para Series e DataFrames o método **mode()** retorna a moda dos valores.
#
# + slideshow={"slide_type": "slide"}
pd.Series(dados).mode()
# -
pd.Series(z).mode()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Moda em dados agrupados em intervalos
#
# * Neste caso, utiliza-se a fórmula de Czuber identificando a *classe modal*, isto é, a classe com a maior frequencia.
#
# $$
# {\rm Mo}=l_{\rm Mo} + \left[\frac{h(F_{\rm Mo} - F_{\rm ant})}{2 F_{\rm Mo}-(F_{\rm ant}+F_{\rm Pos})} \right],
# $$
# onde:
#
# $h$ é a amplitude intervalar,
#
# $F_{\rm Mo}$ é a frequência da *classe modal*,
#
# $l_{\rm Mo}$ é o limite inferior da *classe modal*,
#
# $F_{\rm ant}$ é a frequência da classe anterior à *classe modal*,
#
# $F_{\rm Pos}$ é a frequência da classe posterior à classe modal.
#
#
# +
def encontra_indices_modais(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
d_temp = d_freq.reset_index()['Frequência']
return ((d_temp[d_temp == d_temp.max()]).index).to_numpy()
def encontra_freq_anterior(d_freq):
idx_modal = encontra_indices_modais(d_freq).astype('float')
idx_anterior = idx_modal-1
idx_anterior[idx_anterior<0] = np.nan
freq_anterior = d_freq['Frequência'].iloc[idx_anterior[~np.isnan(idx_anterior)]].to_numpy()
if(np.isnan(idx_anterior[0])):
freq_anterior = np.insert(freq_anterior,0,0)
return freq_anterior
def encontra_freq_posterior(d_freq):
idx_modal = encontra_indices_modais(d_freq).astype('float')
n_classes = d_freq.shape[0]
idx_posterior = idx_modal+1
idx_posterior[idx_posterior >= n_classes] = np.nan
freq_posterior = d_freq['Frequência'].iloc[idx_posterior[~np.isnan(idx_posterior)]].to_numpy()
if(np.isnan(idx_posterior[-1])):
freq_posterior = np.append(freq_posterior,0)
return freq_posterior
def moda_dist_freq(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
d_freq.index = d_freq.index.array.astype(pd.arrays.IntervalArray)
idx_modal = encontra_indices_modais(d_freq)
h = d_freq.index.array[0].right-d_freq.index.array[0].left
lMo = d_freq.index.array[idx_modal].left.array
FMo = d_freq.iloc[idx_modal]['Frequência'].array
FPos = encontra_freq_posterior(d_freq)
FAnt = encontra_freq_anterior(d_freq)
return lMo + (h*(FMo-FAnt))/(2*FMo - (FAnt+FPos))
# -
moda_dist_freq(dist_freq(z))
moda_dist_freq(dist_freq(dados))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Mediana
#
# * Definimos a mediana de um conjunto de dados como o valor que divide um o *rol de dados* em duas partes com a mesma quantidade de dados.
#
# * Notação: representamos a mediana de um conjunto de dados por $Md$.
#
# * O *elemento mediano*, $E_{\rm Md}$, aponta o local no *rol de dados* onde a mediana está localizada. A mediana será o valor assumido na posição $E_{\rm Md}$.
#
# * Se o tamanho amostral $n$ é ímpar, temos que $E_{\rm Md} = \frac{(n+1)}{2}$.
#
# * Caso tamanho amostral seja par, teremos dois valores possíveis para o elemento mediano: $\frac{n}{2}$ e $\frac{n}{2}+1$. Neste caso a mediana será a média dos valores assumidos nestas posições.
# + [markdown] slideshow={"slide_type": "slide"}
# * Exemplos:
#
# * 1, 2, 4, 5, 8. Como $n$ é ímpar, temos $E_{\rm Md} = 3$, e $Md = 4$.
#
# * 2, 2, 3, 7, 8, 10. Aqui $n$ é par, assim $E_{\rm Md,1} = \frac{6}{2} = 3$ e $E_{\rm Md,2} = \frac{6}{2}+1 = 4$. Daí ${ Md} = \frac{3+7}{2} = 5$.
#
# * Para Series e DataFrames o método **median()** retorna a mediana dos valores.
# -
pd.Series(dados).median()
pd.Series(z).median()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Mediana em dados agrupados em intervalos
#
# * Neste caso, utilizamos $E_{\rm Md} = \frac{n}{2}$ independentemente de $n$ ser par ou ímpar.
#
# * A *classe mediana* é a primeira classe tal que $F_{\rm ac} \geq E_{\rm Md}$.
# + [markdown] slideshow={"slide_type": "slide"}
# * Definimos a *mediana* pela fórmula
#
# $$
# {\rm Md} = l_{\rm Md} + h\cdot\left[ \frac{E_{\rm Md} - F_{\rm ac,ant}}{F_{\rm Md}}\right],
# $$
#
# onde,
#
# $l_{\rm Md}$ é o limite inferior da *classe mediana*,
#
# $h$ é a amplitude do intervalo,
#
# $F_{\rm ac,ant}$ é a frequência acumulada da classe anterior à *classe mediana*,
#
# $F_{\rm Md}$ é a frequência da *classe mediana*.
# + [markdown] slideshow={"slide_type": "slide"}
# * Para Series e DataFrames o método **cumsum()** retorna a soma acumulada dos valores.
# + slideshow={"slide_type": "-"}
d_freq_temp = dist_freq(dados, exibir_total=False)
d_freq_temp['Freq Acumulada'] = d_freq_temp['Frequência'].cumsum()
d_freq_temp
# + slideshow={"slide_type": "slide"}
def mediana_dist_freq(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
d_freq.index = d_freq.index.array.astype(pd.arrays.IntervalArray)
n_obs = d_freq['Frequência'].sum()
h = d_freq.index.array[0].right-d_freq.index.array[0].left
d_freq['Freq Acumulada'] = d_freq['Frequência'].cumsum()
lMd = (d_freq[d_freq['Freq Acumulada'] >= n_obs/2].iloc[0]).name.left
EMd = n_obs/2
FMd = d_freq[d_freq['Freq Acumulada'] >= n_obs/2].iloc[0]['Frequência']
if (d_freq['Freq Acumulada'] < n_obs/2).any():
FAcAnt = d_freq[d_freq['Freq Acumulada'] < n_obs/2].iloc[-1]['Freq Acumulada']
else:
FAcAnt = 0
return lMd + h*(EMd-FAcAnt)/FMd
# -
mediana_dist_freq(dist_freq(z))
mediana_dist_freq(dist_freq(dados))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Medidas de Dispersão
#
#
# * As medidas de dispersão medem o grau de variabilidade dos elementos de uma distribuição;
#
# * O valor zero indica ausência de dispersão;
#
# * A dispersão aumenta à medida que aumenta o valor da medida de dispersão.
#
# * As principais Medidas de Dispersão: *Amplitude*, *Desvio Médio*, *Variância*, *Desvio Padrão*.
# + [markdown] slideshow={"slide_type": "slide"}
# * Motivação para as medidas de dispersão
#
# |Alunos||| Notas||| Média|
# |:--:|:--:|:--:|:--:|:--:|:--:|:--:|
# |Antônio|5|5|5|5|5|5|
# |João |6|4|5|4|6|5|
# |José |10|5|5|5|0|5|
# |Pedro |10|10|5|0|0|5|
#
#
# * Observa-se que:
# * As notas de Antônio não variaram;
#
# * As notas de João variaram menos do que as notas de José;
#
# * As notas de Pedro variaram mais do que as notas de todos os outros alunos.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Amplitude
#
# * A amplitude nos fornece uma idéia do campo de variação dos elementos. Mais precisamente, ela fornece a maior variação possível dos dados.
#
# * A amplitude é dada pela fórmula:
# $$
# R = X_{\max} - X_{\min}.
# $$
# onde, $X_{\max}$ é o máximo dos valores nos dados e $X_{\min}$ é o mínimo dos valores nos dados.
# * Para Series e DataFrames os métodos **max()** e **min()** retornam respectivamente o máximo e mínimo dos valores.
# -
R = pd.Series(dados).max()-pd.Series(dados).min(); R
# + [markdown] slideshow={"slide_type": "slide"}
# ### <NAME>
#
# * Desejando-se medir a dispersão dos dados em relação a média, parece interessante a análise dos desvios em torno da média. Isto é, análise dos desvios:
#
# $$
# d_i=(X_i-\overline{X}).
# $$
#
# * Mas a soma de todos os desvios é igual a zero. Isto é:
#
# $$
# \sum_{i=1}^{n} d_i= \sum_{i=1}^{n} (X_i-\overline{X})= \sum_{i=1}^{n}X_i-\sum_{i=1}^{n}\overline{X}=\sum_{i=1}^{n}X_i-{n}\overline{X}=
# $$
#
# $$
# =\sum_{i=1}^{n}X_i-n\frac{\sum_{i=1}^{n}X_i}{n}= \sum_{i=1}^{n}X_i-\sum_{i=1}^{n}X_i=0.
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# * Logo, será preciso encontrar uma maneira de se trabalhar com os desvios sem que a soma dê zero. Dessa forma, define-se o *desvio médio*.
#
# * Notação: representamos o *desvio médio* de um conjunto de dados por $DM$.
#
# * Portanto, definimos o *desvio médio* pela fórmula:
# $$
# DM=\sum_{i=1}^{n} \frac{|d_i|}{n}= \sum_{i=1}^{n} \frac{|X_i-\overline{X}|}{n}.
# $$
# * Para Series e DataFrames o método **mad()** retorna a *desvio médio* dos valores.
# -
pd.Series(dados).mad()
pd.Series(z).mad()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Desvio médio em dados agrupados em intervalos
#
# * No caso em que temos os dados agrupados em intervalos, utilizamos a frequência e o ponto médio de cada classes para calcular a *desvio médio* pela fórmula:
#
# $$
# DM=\sum_{i=1}^{K} \frac{|d_i|\cdot F_i}{n}= \sum_{i=1}^{K} \frac{|pm_i-\overline{X}|\cdot F_i}{n}.
# $$
#
# onde $K$ é o número de classes, $F_i$ é a frequência da $i$-ésima classe e $pm_i$ é o ponto médio da $i$-ésima classe.
# + slideshow={"slide_type": "slide"}
def dm_dist_freq(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
intervalos = d_freq.index.array
d_freq['Ponto Médio'] = [(intervalo.left+intervalo.right)/2 for intervalo in intervalos]
return (d_freq['Frequência']*np.abs(d_freq['Ponto Médio'] -
media_dist_freq(d_freq))).sum()/(d_freq['Frequência'].sum())
# -
dm_dist_freq(dist_freq(dados))
dm_dist_freq(dist_freq(z))
# + [markdown] slideshow={"slide_type": "slide"}
# **Observações**:
#
# * A *amplitude* não mede bem a dispersão dos dados porque, usam-se apenas os valores extremos, ao invés de utilizar todos os elementos da distribuição.
#
# * O *desvio médio* é mais vantajoso que a *amplitude*, visto que leva em consideração todos os valores da distribuição e é menos sensível a *outliers*.
#
# * No entanto, *desvio médio* não é tão frequentemente empregado no ajuste de modelos, pois não apresenta propriedades matemáticas interessantes, porém é bastante utilizado na validação e comparação de modelos.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Variância
#
# * A *variância* é a medida de dispersão mais utilizada. É o quociente entre a soma dos quadrados dos desvios e o número de elementos.
# * Assim, temos a seguinte definição de *variância populacional* que a é dada pela fórmula:
#
# $$
# \sigma^2=\sum_{i=1}^{N} \frac{d_i^2}{N}= \sum_{i=1}^{N} \frac{(X_i-\overline{X})^2}{N}.
# $$
# onde $\sigma^2$ indica a variância populacional e lê-se sigma ao quadrado ou sigma dois. Neste caso, $\overline{X}$ e $N$ da formúla representam a média populacional e o tamanho populacional, respectivamente.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Variância Amostral
#
# * Temos a seguinte definição de *variância amostral* que a é dada pela fórmula:
#
# $$
# S^2=\sum_{i=1}^{n} \frac{d_i^2}{n-1}= \sum_{i=1}^{n} \frac{(X_i-\overline{X})^2}{n-1}.
# $$
#
# * Para Series e DataFrames o método **var()** retorna a *variância amostral* dos valores.
# -
pd.Series(dados).var()
pd.Series(z).var()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Variância amostral em dados agrupados em intervalos
#
# * No caso em que temos os dados agrupados em intervalos, utilizamos a frequência e o ponto médio de cada classes para calcular a *variância* pela fórmula:
#
# $$
# S^2=\sum_{i=1}^{K} \frac{d_i^2\cdot F_i}{n-1}= \sum_{i=1}^{K} \frac{(pm_i-\overline{X})^2\cdot F_i}{n-1}.
# $$
#
# onde $K$ é o número de classes, $F_i$ é a frequência da $i$-ésima classe e $pm_i$ é o ponto médio da $i$-ésima classe.
# + slideshow={"slide_type": "slide"}
def var_dist_freq(d_freq):
if(type(d_freq.index.array).__name__ != 'IntervalArray'):
d_freq = d_freq.head(-1).copy()
intervalos = d_freq.index.array
d_freq['Ponto Médio'] = [(intervalo.left+intervalo.right)/2 for intervalo in intervalos]
return (d_freq['Frequência']*(d_freq['Ponto Médio'] -
media_dist_freq(d_freq))**2).sum()/(d_freq['Frequência'].sum()-1)
# -
var_dist_freq(dist_freq(dados))
var_dist_freq(dist_freq(z))
# + [markdown] slideshow={"slide_type": "slide"}
# ### <NAME>
#
# * Temos também outra medida de dispersão, que é a raiz quadrada da variância, chamada de *desvio padrão*. Assim,
#
# $$
# \sigma = \sqrt{\sigma^2} \quad \hbox{é o desvio desvio padrão populacional}
# $$
#
# e
#
# $$
# S = \sqrt{S^2} \quad \hbox{é o desvio desvio padrão amostral.}
# $$
#
# * Para o cálculo do *desvio padrão* deve-se primeiramente determinar o valor da variância e, em seguida, extrair a raiz quadrada desse resultado.
# * Para Series e DataFrames o método **std()** retorna a *variância amostral* dos valores.
# + slideshow={"slide_type": "slide"}
pd.Series(dados).std()
# -
np.sqrt(pd.Series(dados).var())
# + slideshow={"slide_type": "-"}
pd.Series(z).std()
# + slideshow={"slide_type": "-"}
np.sqrt(pd.Series(z).var())
# + [markdown] slideshow={"slide_type": "slide"}
# #### Desvio Padrão para dados agrupados em intervalos
# -
def dp_dist_freq(d_freq):
return np.sqrt(var_dist_freq(d_freq))
dp_dist_freq(dist_freq(dados))
dp_dist_freq(dist_freq(z))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Resumo Estatístico de uma *Serie* ou *DataFrame*
#
# Para obtermos um resumo estatístico de uma *Serie* ou *DataFrame* do *pandas* utilizamos o método **describe**.
#
# O método **describe** exclui observações faltantes por padrão.
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplos:
# + slideshow={"slide_type": "-"}
pd.Series(dados).describe()
# + slideshow={"slide_type": "-"}
pd.DataFrame(z).describe()
# + [markdown] slideshow={"slide_type": "slide"}
# **Observações**
#
# * Se as entradas da *Serie* não forem numéricas o método *describe* retornará uma tabela contendo as quantidades de valores únicos, qual o valor mais frequente e qual a quantidade de elementos do valor mais frequente.
#
# * No caso de um *DataFrame* que contenha colunas numéricas e colunas não-numéricas, o método *describe* só irá considerar as colunas numéricas.
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplos:
# + slideshow={"slide_type": "-"}
serie_ex1 = pd.Series(['a','b','c','d','e','f','g','h','i','j'])
serie_ex2 = pd.Series(range(10))
# -
serie_ex1.describe()
serie_ex2.describe()
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplo:
# -
df_exemplo = pd.concat([serie_ex1, serie_ex2], axis=1)
# + slideshow={"slide_type": "-"}
df_exemplo
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplo:
# -
df_exemplo.describe()
# + [markdown] slideshow={"slide_type": "slide"}
# **Observação**: Podemos controlar o que será considerado no describe utilizando os argumentos *include* ou *exclude*. No caso, devemos colocar como argumento uma lista contendo os tipos a serem incluídos ou excluídos. Existem vários tipos que podem ser considerados para serem incluídos ou excluídos. Para uma lista dos tipos disponíveis, por favor consultem a documentação da função **select_dtypes()**.
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplos:
# + slideshow={"slide_type": "-"}
df_exemplo.describe(exclude='number')
# -
df_exemplo.describe(include='object')
# + [markdown] slideshow={"slide_type": "slide"}
# Exemplo:
# + slideshow={"slide_type": "-"}
df_exemplo.describe(include='all')
| _build/html/_sources/ipynb/08b-estatistica-descritiva.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Rigid-body transformations in three-dimensions
#
# <NAME>
# The kinematics of a rigid body is completely described by its pose, i.e., its position and orientation in space (and the corresponding changes, translation and rotation). In a three-dimensional space, at least three coordinates and three angles are necessary to describe the pose of the rigid body, totalizing six degrees of freedom for a rigid body.
#
# In motion analysis, to describe a translation and rotation of a rigid body with respect to a coordinate system, typically we attach another coordinate system to the rigid body and determine a transformation between these two coordinate systems.
#
# A transformation is any function mapping a set to another set. For the description of the kinematics of rigid bodies, we are interested only in what is called rigid or Euclidean transformations (denoted as SE(3) for the three-dimensional space) because they preserve the distance between every pair of points of the body (which is considered rigid by definition). Translations and rotations are examples of rigid transformations (a reflection is also an example of rigid transformation but this changes the right-hand axis convention to a left hand, which usually is not of interest). In turn, rigid transformations are examples of [affine transformations](https://en.wikipedia.org/wiki/Affine_transformation). Examples of other affine transformations are shear and scaling transformations (which preserves angles but not lengths).
#
# We will follow the same rationale as in the notebook [Rigid-body transformations in a plane (2D)](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/Transformation2D.ipynb) and we will skip the fundamental concepts already covered there. So, you if haven't done yet, you should read that notebook before continuing here.
# ## Translation
#
# A pure three-dimensional translation of a rigid body (or a coordinate system attached to it) in relation to other rigid body (with other coordinate system) is illustrated in the figure below.
# <br>
# <figure><img src='./../images/translation3D.png' alt='translation 3D'/> <figcaption><center><i>Figure. A point in three-dimensional space represented in two coordinate systems, with one coordinate system translated.</i></center></figcaption> </figure>
#
# The position of point $\mathbf{P}$ originally described in the $xyz$ (local) coordinate system but now described in the $\mathbf{XYZ}$ (Global) coordinate system in vector form is:
#
# $$ \mathbf{P_G} = \mathbf{L_G} + \mathbf{P_l} $$
#
# Or in terms of its components:
#
# $$ \begin{array}{}
# \mathbf{P_X} =& \mathbf{L_X} + \mathbf{P}_x \\
# \mathbf{P_Y} =& \mathbf{L_Y} + \mathbf{P}_y \\
# \mathbf{P_Z} =& \mathbf{L_Z} + \mathbf{P}_z
# \end{array} $$
#
# And in matrix form:
#
# $$ \begin{bmatrix}
# \mathbf{P_X} \\
# \mathbf{P_Y} \\
# \mathbf{P_Z}
# \end{bmatrix} =
# \begin{bmatrix}
# \mathbf{L_X} \\
# \mathbf{L_Y} \\
# \mathbf{L_Z}
# \end{bmatrix} +
# \begin{bmatrix}
# \mathbf{P}_x \\
# \mathbf{P}_y \\
# \mathbf{P}_z
# \end{bmatrix} $$
#
# From classical mechanics, this is an example of [Galilean transformation](http://en.wikipedia.org/wiki/Galilean_transformation).
#
# Let's use Python to compute some numeric examples:
# Import the necessary libraries
import numpy as np
# suppress scientific notation for small numbers:
np.set_printoptions(precision=4, suppress=True)
# For example, if the local coordinate system is translated by $ \mathbf{L_G}=[1, 2, 3] $ in relation to the Global coordinate system, a point with coordinates $ \mathbf{P_l}=[4, 5, 6] $ at the local coordinate system will have the position $ \mathbf{P_G}=[5, 7, 9] $ at the Global coordinate system:
LG = np.array([1, 2, 3]) # Numpy array
Pl = np.array([4, 5, 6])
PG = LG + Pl
PG
# This operation also works if we have more than one point (NumPy try to guess how to handle vectors with different dimensions):
Pl = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # 2D array with 3 rows and 2 columns
PG = LG + Pl
PG
# ## Rotation
#
# A pure three-dimensional rotation of a $xyz$ (local) coordinate system in relation to other $\mathbf{XYZ}$ (Global) coordinate system and the position of a point in these two coordinate systems are illustrated in the next figure (remember that this is equivalent to describing a rotation between two rigid bodies).
# <br>
# <figure><img src='./../images/rotation3D.png' alt='rotation 3D'/> <figcaption><center><i>A point in three-dimensional space represented in two coordinate systems, with one system rotated.</i></center></figcaption> </figure>
#
# In analogy to the rotation in two dimensions, we can calculate the rotation matrix that describes the rotation of the $xyz$ (local) coordinate system in relation to the $\mathbf{XYZ}$ (Global) coordinate system using the direction cosines between the axes of the two coordinate systems:
#
# $$ \mathbf{R_{Gl}} = \begin{bmatrix}
# cos\mathbf{X}x & cos\mathbf{X}y & cos\mathbf{X}z \\
# cos\mathbf{Y}x & cos\mathbf{Y}y & cos\mathbf{Y}z \\
# cos\mathbf{Z}x & cos\mathbf{Z}y & cos\mathbf{Z}z
# \end{bmatrix} $$
#
# Note however that for rotations around more than one axis, these angles will not lie in the main planes ($\mathbf{XY, YZ, ZX}$) of the $\mathbf{XYZ}$ coordinate system, as illustrated in the figure below for the direction angles of the $y$ axis only. Thus, the determination of these angles by simple inspection, as we have done for the two-dimensional case, would not be simple.
# <br>
# <figure>
# <img src='./../images/directioncosine3D.png' width=260 alt='direction angles 3D'/> <figcaption><center><i>Figure. Definition of direction angles for the $y$ axis of the local coordinate system in relation to the $\mathbf{XYZ}$ Global coordinate system.</i></center></figcaption>
# </figure>
#
# Note that the nine angles shown in the matrix above for the direction cosines are obviously redundant since only three angles are necessary to describe the orientation of a rigid body in the three-dimensional space.
#
# An important characteristic of angles in the three-dimensional space is that angles cannot be treated as vectors: the result of a sequence of rotations of a rigid body around different axes depends on the order of the rotations, as illustrated in the next figure.
# <br>
# <figure>
# <img src='./../images/rotationsseqs2.png' alt='rotations'/><figcaption><i>Figure. The result of a sequence of rotations around different axes of a coordinate system depends on the order of the rotations. In the first example (first row), the rotations are around a Global (fixed) coordinate system. In the second example (second row), the rotations are around a local (rotating) coordinate system.</i></figcaption>
# </figure>
#
# Let's focus now on how to understand rotations in the three-dimensional space, looking at the rotations between coordinate systems (or between rigid bodies). Later we will apply what we have learned to describe the position of a point in these different coordinate systems.
# ### Euler angles
#
# There are different ways to describe a three-dimensional rotation of a rigid body (or of a coordinate system). Probably, the most straightforward solution would be to use a [spherical coordinate system](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ReferenceFrame.ipynb#Spherical-coordinate-system), but spherical coordinates would be difficult to give an anatomical or clinical interpretation. A solution that has been often employed in biomechanics to handle rotations in the three-dimensional space is to use Euler angles. Under certain conditions, Euler angles can have an anatomical interpretation, but this representation also has some caveats. Let's see the Euler angles now.
#
# [Leonhard Euler](https://en.wikipedia.org/wiki/Leonhard_Euler) in the XVIII century showed that two three-dimensional coordinate systems with a common origin can be related by a sequence of up to three elemental rotations about the axes of the local coordinate system, where no two successive rotations may be about the same axis, which now are known as [Euler (or Eulerian) angles](http://en.wikipedia.org/wiki/Euler_angles).
#
# #### Elemental rotations
#
# First, let's see rotations around a fixed Global coordinate system as we did for the two-dimensional case. The next figure illustrates elemental rotations of the local coordinate system around each axis of the fixed Global coordinate system.
# <br>
# <figure>
# <img src='./../images/rotations.png' alt='rotations'/> <figcaption><center><i>Figure. Elemental rotations of the $xyz$ coordinate system around each axis, $\mathbf{X}$, $\mathbf{Y}$, and $\mathbf{Z}$, of the fixed $\mathbf{XYZ}$ coordinate system. Note that for better clarity, the axis around where the rotation occurs is shown perpendicular to this page for each elemental rotation.</i></center></figcaption>
# </figure>
#
# The rotation matrices for the elemental rotations around each axis of the fixed $\mathbf{XYZ}$ coordinate system (rotations of the local coordinate system in relation to the Global coordinate system) are shown next.
#
# Around $\mathbf{X}$ axis:
#
# $$ \mathbf{R_{Gl,\:X}} =
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & cos\alpha & -sin\alpha \\
# 0 & sin\alpha & cos\alpha
# \end{bmatrix} $$
#
# Around $\mathbf{Y}$ axis:
#
# $$ \mathbf{R_{Gl,\:Y}} =
# \begin{bmatrix}
# cos\beta & 0 & sin\beta \\
# 0 & 1 & 0 \\
# -sin\beta & 0 & cos\beta
# \end{bmatrix} $$
#
# Around $\mathbf{Z}$ axis:
#
# $$ \mathbf{R_{Gl,\:Z}} =
# \begin{bmatrix}
# cos\gamma & -sin\gamma & 0\\
# sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix} $$
#
# These matrices are the rotation matrices for the case of two-dimensional coordinate systems plus the corresponding terms for the third axes of the local and Global coordinate systems, which are parallel.
# To understand why the terms for the third axes are 1's or 0's, for instance, remember they represent the cosine directors. The cosines between $\mathbf{X}x$, $\mathbf{Y}y$, and $\mathbf{Z}z$ for the elemental rotations around respectively the $\mathbf{X}$, $\mathbf{Y}$, and $\mathbf{Z}$ axes are all 1 because $\mathbf{X}x$, $\mathbf{Y}y$, and $\mathbf{Z}z$ are parallel ($cos 0^o$). The cosines of the other elements are zero because the axis around where each rotation occurs is perpendicular to the other axes of the coordinate systems ($cos 90^o$).
#
# The rotation matrices for the elemental rotations this time around each axis of the $xyz$ coordinate system (rotations of the Global coordinate system in relation to the local coordinate system), similarly to the two-dimensional case, are simplily the transpose of the above matrices as shown next.
#
# Around $x$ axis:
#
# $$ \mathbf{R}_{\mathbf{lG},\;x} =
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & cos\alpha & sin\alpha \\
# 0 & -sin\alpha & cos\alpha
# \end{bmatrix} $$
#
# Around $y$ axis:
#
# $$ \mathbf{R}_{\mathbf{lG},\;y} =
# \begin{bmatrix}
# cos\beta & 0 & -sin\beta \\
# 0 & 1 & 0 \\
# sin\beta & 0 & cos\beta
# \end{bmatrix} $$
#
# Around $z$ axis:
#
# $$ \mathbf{R}_{\mathbf{lG},\;z} =
# \begin{bmatrix}
# cos\gamma & sin\gamma & 0\\
# -sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix} $$
#
# Notice this is equivalent to instead of rotating the local coordinate system by $\alpha, \beta, \gamma$ in relation to axes of the Global coordinate system, to rotate the Global coordinate system by $-\alpha, -\beta, -\gamma$ in relation to the axes of the local coordinate system; remember that $cos(-\:\cdot)=cos(\cdot)$ and $sin(-\:\cdot)=-sin(\cdot)$.
#
# The fact that we chose to rotate the local coordinate system by a counterclockwise (positive) angle in relation to the Global coordinate system is just a matter of convention.
# #### Sequence of rotations
#
# Consider now a sequence of elemental rotations around the $\mathbf{X}$, $\mathbf{Y}$, and $\mathbf{Z}$ axes of the fixed $\mathbf{XYZ}$ coordinate system illustrated in the next figure.
# <br>
# <figure><img src='./../images/rotations_XYZ.png' alt='rotations'/> <figcaption><center><i>Figure. Sequence of elemental rotations of the $xyz$ coordinate system around each axis, $\mathbf{X}$, $\mathbf{Y}$, and $\mathbf{Z}$, of the fixed $\mathbf{XYZ}$ coordinate system.</i></center></figcaption> </figure>
#
# This sequence of elemental rotations (each one of the local coordinate system with respect to the fixed Global coordinate system) is mathematically represented by a multiplication between the rotation matrices:
#
# $$ \begin{array}{l l}
# \mathbf{R_{Gl,\;XYZ}} & = \mathbf{R_{Z}} \mathbf{R_{Y}} \mathbf{R_{X}} \\
# \\
# & = \begin{bmatrix}
# cos\gamma & -sin\gamma & 0\\
# sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# cos\beta & 0 & sin\beta \\
# 0 & 1 & 0 \\
# -sin\beta & 0 & cos\beta
# \end{bmatrix}
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & cos\alpha & -sin\alpha \\
# 0 & sin\alpha & cos\alpha
# \end{bmatrix} \\
# \\
# & =
# \begin{bmatrix}
# cos\beta\:cos\gamma \;&\;
# sin\alpha\:sin\beta\:cos\gamma-cos\alpha\:sin\gamma \;&\;
# cos\alpha\:sin\beta\:cos\gamma+sin\alpha\:sin\gamma \;\;\; \\
# cos\beta\:sin\gamma \;&\;
# sin\alpha\:sin\beta\:sin\gamma+cos\alpha\:cos\gamma \;&\;
# cos\alpha\:sin\beta\:sin\gamma-sin\alpha\:cos\gamma \;\;\; \\
# -sin\beta \;&\; sin\alpha\:cos\beta \;&\; cos\alpha\:cos\beta \;\;\;
# \end{bmatrix}
# \end{array} $$
#
# Note that the order of the multiplication of the matrices is from right to left (first the second rightmost matrix times the rightmost matrix, then the leftmost matrix times this result).
#
# We can check this matrix multiplication using [Sympy](http://sympy.org/en/index.html):
# +
#import the necessary libraries
from IPython.core.display import Math, display
import sympy as sym
cos, sin = sym.cos, sym.sin
a, b, g = sym.symbols('alpha, beta, gamma')
# Elemental rotation matrices of xyz in relation to XYZ:
RX = sym.Matrix([[1, 0, 0], [0, cos(a), -sin(a)], [0, sin(a), cos(a)]])
RY = sym.Matrix([[cos(b), 0, sin(b)], [0, 1, 0], [-sin(b), 0, cos(b)]])
RZ = sym.Matrix([[cos(g), -sin(g), 0], [sin(g), cos(g), 0], [0, 0, 1]])
# Rotation matrix of xyz in relation to XYZ:
RXYZ = RZ*RY*RX
display(Math(sym.latex(r'\mathbf{R_{Gl,\;XYZ}}=') + sym.latex(RXYZ, mat_str='matrix')))
# -
# For instance, we can calculate the numerical rotation matrix for these sequential elemental rotations by $90^o$ around $\mathbf{X,Y,Z}$:
R = sym.lambdify((a, b, g), RXYZ, 'numpy')
R = R(np.pi/2, np.pi/2, np.pi/2)
display(Math(r'\mathbf{R_{Gl,\;XYZ}}(90^o, 90^o, 90^o) =' + \
sym.latex(sym.Matrix(R).n(chop=True, prec=3))))
# Examining the matrix above and the correspondent previous figure, one can see they agree: the rotated $x$ axis (first column of the above matrix) has value -1 in the $\mathbf{Z}$ direction $[0,0,-1]$, the rotated $y$ axis (second column) is at the $\mathbf{Y}$ direction $[0,1,0]$, and the rotated $z$ axis (third column) is at the $\mathbf{X}$ direction $[1,0,0]$.
#
# We also can calculate the sequence of elemental rotations around the $x$, $y$, and $z$ axes of the rotating $xyz$ coordinate system illustrated in the next figure.
# <br>
# <figure>
# <img src='./../images/rotations_xyz2.png' alt='rotations'/> <figcaption><center><i>Figure. Sequence of elemental rotations of a second $xyz$ local coordinate system around each axis, $x$, $y$, and $z$, of the rotating $xyz$ coordinate system.</i></center></figcaption>
# </figure>
#
# Likewise, this sequence of elemental rotations (each one of the local coordinate system with respect to the rotating local coordinate system) is mathematically represented by a multiplication between the rotation matrices (which are the inverse of the matrices for the rotations around $\mathbf{X,Y,Z}$ as we saw earlier):
#
# $$ \begin{array}{l l}
# \mathbf{R}_{\mathbf{lG},\;xyz} & = \mathbf{R_{z}} \mathbf{R_{y}} \mathbf{R_{x}} \\
# \\
# & = \begin{bmatrix}
# cos\gamma & sin\gamma & 0\\
# -sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# cos\beta & 0 & -sin\beta \\
# 0 & 1 & 0 \\
# sin\beta & 0 & cos\beta
# \end{bmatrix}
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & cos\alpha & sin\alpha \\
# 0 & -sin\alpha & cos\alpha
# \end{bmatrix} \\
# \\
# & =
# \begin{bmatrix}
# cos\beta\:cos\gamma \;&\;
# sin\alpha\:sin\beta\:cos\gamma+cos\alpha\:sin\gamma \;&\;
# cos\alpha\:sin\beta\:cos\gamma-sin\alpha\:sin\gamma \;\;\; \\
# -cos\beta\:sin\gamma \;&\;
# -sin\alpha\:sin\beta\:sin\gamma+cos\alpha\:cos\gamma \;&\;
# cos\alpha\:sin\beta\:sin\gamma+sin\alpha\:cos\gamma \;\;\; \\
# sin\beta \;&\; -sin\alpha\:cos\beta \;&\; cos\alpha\:cos\beta \;\;\;
# \end{bmatrix}
# \end{array} $$
#
# As before, the order of the multiplication of the matrices is from right to left (first the second rightmost matrix times the rightmost matrix, then the leftmost matrix times this result).
#
# Once again, we can check this matrix multiplication using [Sympy](http://sympy.org/en/index.html):
a, b, g = sym.symbols('alpha, beta, gamma')
# Elemental rotation matrices of xyz (local):
Rx = sym.Matrix([[1, 0, 0], [0, cos(a), sin(a)], [0, -sin(a), cos(a)]])
Ry = sym.Matrix([[cos(b), 0, -sin(b)], [0, 1, 0], [sin(b), 0, cos(b)]])
Rz = sym.Matrix([[cos(g), sin(g), 0], [-sin(g), cos(g), 0], [0, 0, 1]])
# Rotation matrix of xyz' in relation to xyz:
Rxyz = Rz*Ry*Rx
Math(sym.latex(r'\mathbf{R}_{\mathbf{lG},\;xyz}=') + sym.latex(Rxyz, mat_str='matrix'))
# For instance, let's calculate the numerical rotation matrix for these sequential elemental rotations by $90^o$ around $x,y,z$:
R = sym.lambdify((a, b, g), Rxyz, 'numpy')
R = R(np.pi/2, np.pi/2, np.pi/2)
display(Math(r'\mathbf{R}_{\mathbf{lG},\;xyz}(90^o, 90^o, 90^o) =' + \
sym.latex(sym.Matrix(R).n(chop=True, prec=3))))
# Examining the above matrix and the correspondent previous figure, one can see they also agree: the rotated $x$ axis (first column of the above matrix) is at the $\mathbf{Z}$ direction $[0,0,1]$, the rotated $y$ axis (second column) is at the $\mathbf{-Y}$ direction $[0,-1,0]$, and the rotated $z$ axis (third column) is at the $\mathbf{X}$ direction $[1,0,0]$.
#
# Examining the $\mathbf{R_{Gl,\;XYZ}}$ and $\mathbf{R}_{lG,\;xyz}$ matrices one can see that negating the angles from one of the matrices results in the other matrix. That is, the rotations of $xyz$ in relation to $\mathbf{XYZ}$ by $\alpha, \beta, \gamma$ result in the same matrix as the rotations of $\mathbf{XYZ}$ in relation to $xyz$ by $-\alpha, -\beta, -\gamma$, as we saw for the elemental rotations.
# Let's check that:
# There is another property of the rotation matrices for the different coordinate systems: the rotation matrix, for example from the Global to the local coordinate system for the $xyz$ sequence, is just the transpose of the rotation matrix for the inverse operation (from the local to the Global coordinate system) of the inverse sequence ($\mathbf{ZYX}$) and vice-versa:
# +
# Rotation matrix of xyz in relation to XYZ:
display(Math(sym.latex(r'\mathbf{R_{GL,\;XYZ}}(\alpha,\beta,\gamma) \quad =') + \
sym.latex(RXYZ, mat_str='matrix')))
# Elemental rotation matrices of XYZ in relation to xyz and negate all the angles:
Rx_n = sym.Matrix([[1, 0, 0], [0, cos(-a), -sin(-a)], [0, sin(-a), cos(-a)]]).T
Ry_n = sym.Matrix([[cos(-b), 0, sin(-b)], [0, 1, 0], [-sin(-b), 0, cos(-b)]]).T
Rz_n = sym.Matrix([[cos(-g), -sin(-g), 0], [sin(-g), cos(-g), 0], [0, 0, 1]]).T
# Rotation matrix of XYZ in relation to xyz:
Rxyz_n = Rz_n*Ry_n*Rx_n
display(Math(sym.latex(r'\mathbf{R}_{\mathbf{lG},\;xyz}(-\alpha,-\beta,-\gamma)=') + \
sym.latex(Rxyz_n, mat_str='matrix')))
# Check that the two matrices are equal:
print('\n')
display(Math(sym.latex(r'\mathbf{R_{GL,\;XYZ}}(\alpha,\beta,\gamma) \;==\;' + \
r'\mathbf{R}_{\mathbf{lG},\;xyz}(-\alpha,-\beta,-\gamma)')))
RXYZ == Rxyz_n
# -
RZYX = RX*RY*RZ
display(Math(sym.latex(r'\mathbf{R_{Gl,\;ZYX}^T}=') + sym.latex(RZYX.T, mat_str='matrix')))
print('\n'')
display(Math(sym.latex(r'\mathbf{R}_{\mathbf{lG},\;xyz}(\alpha,\beta,\gamma) \;==\;' + \
r'\mathbf{R_{Gl,\;ZYX}^T}(\gamma,\beta,\alpha)')))
Rxyz == RZYX.T
# #### The 12 different sequences of Euler angles
#
# The Euler angles are defined in terms of rotations around a rotating local coordinate system. As we saw for the sequence of rotations around $x, y, z$, the axes of the local rotated coordinate system are not fixed in space because after the first elemental rotation, the other two axes rotate.
#
# Other sequences of rotations could be produced without combining axes of the two different coordinate systems (Global and local) for the definition of the rotation axes. There is a total of 12 different sequences of three elemental rotations that are valid and may be used for describing the rotation of a coordinate system with respect to another coordinate system:
#
# $$ xyz \quad xzy \quad yzx \quad yxz \quad zxy \quad zyx $$
#
# $$ xyx \quad xzx \quad yzy \quad yxy \quad zxz \quad zyz $$
#
# The first six sequences (first row) are all around different axes, they are usually referred as Cardan or Tait–Bryan angles. The other six sequences (second row) have the first and third rotations around the same axis, but keep in mind that the axis for the third rotation is not at the same place anymore because it changed its orientation after the second rotation. The sequences with repeated axes are known as proper or classic Euler angles.
#
# Which order to use it is a matter of convention, but because the order affects the results, it's fundamental to follow a convention and report it. In Engineering Mechanics (including Biomechanics), the $xyz$ order is more common; in Physics the $zxz$ order is more common (but the letters chosen to refer to the axes are arbitrary, what matters is the directions they represent). In Biomechanics, the order for the Cardan angles is most often based on the angle of most interest or of most reliable measurement. Accordingly, the axis of flexion/extension is typically selected as the first axis, the axis for abduction/adduction is the second, and the axis for internal/external rotation is the last one. We will see about this order later. The $zyx$ order is commonly used to describe the orientation of a ship or aircraft and the rotations are known as the nautical angles: yaw, pitch and roll, respectively (see next figure).
# <br>
# <figure><img src='https://upload.wikimedia.org/wikipedia/commons/thumb/1/16/Yaw_Axis.svg/319px-Yaw_Axis.svg.png' alt='translation and rotation 3D'/> <figcaption><center><i>Figure. The principal axes of an aircraft and the names for the rotations around these axes (<a href="https://en.wikipedia.org/wiki/Euler_angles">image from Wikipedia</a>).</i></center></figcaption> </figure>
#
# If instead of rotations around the rotating local coordinate system we perform rotations around the fixed Global coordinate system, we will have other 12 different sequences of three elemental rotations, these are called simply rotation angles. So, in total there are 24 possible different sequences of three elemental rotations, but the 24 orders are not independent; with the 12 different sequences of Euler angles at the local coordinate system we can obtain the other 12 sequences at the Global coordinate system.
#
# The Python function `euler_rotmat.py` (code at the end of this text) determines the rotation matrix in algebraic form for any of the 24 different sequences (and sequences with only one or two axes can be inputed). This function also determines the rotation matrix in numeric form if a list of up to three angles are inputed.
#
# For instance, the rotation matrix in algebraic form for the $zxz$ order of Euler angles at the local coordinate system and the correspondent rotation matrix in numeric form after three elemental rotations by $90^o$ each are:
import sys
sys.path.insert(1, r'./../functions')
from euler_rotmat import euler_rotmat
Ra, Rn = euler_rotmat(order='zxz', frame='local', angles=[90, 90, 90])
# #### Line of nodes
#
# The second axis of rotation in the rotating coordinate system is also referred as the nodal axis or line of nodes; this axis coincides with the intersection of two perpendicular planes, one from each Global (fixed) and local (rotating) coordinate systems. The figure below shows an example of rotations and the nodal axis for the $xyz$ sequence of the Cardan angles.
#
# <div class='center-align'><figure><img src='./../images/Node.png' alt='rotations'/> <figcaption><center><i>Figure. First row: example of rotations for the $xyz$ sequence of the Cardan angles. The Global (fixed) $XYZ$ coordinate system is shown in green, the local (rotating) $xyz$ coordinate system is shown in blue. The nodal axis (<b>N</b>, shown in red) is defined by the intersection of the $YZ$ and $xy$ planes and all rotations can be described in relation to this nodal axis or to a perpendicaular axis to it. Second row: starting from no rotation, the local coordinate system is rotated by $\alpha$ around the $x$ axis, then by $\beta$ around the rotated $y$ axis, and finally by $\gamma$ around the twice rotated $z$ axis. Note that the line of nodes coincides with the $y$ axis for the second rotation. </i></center></figcaption> </figure></div>
# #### Determination of the Euler angles
#
# Once a convention is adopted, the correspoding three Euler angles of rotation can be found.
# For example, for the $\mathbf{R}_{xyz}$ rotation matrix:
R = euler_rotmat(order='xyz', frame='local')
# The correspoding Cardan angles for the `xyz` sequence can be given by:
#
# $$ \begin{array}{}
# \alpha = arctan\left(\frac{sin(\alpha)}{cos(\alpha)}\right) = arctan\left(\frac{-\mathbf{R}_{21}}{\;\;\;\mathbf{R}_{22}}\right) \\
# \\
# \beta = arctan\left(\frac{sin(\beta)}{cos(\beta)}\right) = arctan\left(\frac{\mathbf{R}_{20}}{\sqrt{\mathbf{R}_{00}^2+\mathbf{R}_{10}^2}}\right) \\
# \\
# \gamma = arctan\left(\frac{sin(\gamma)}{cos(\gamma)}\right) = arctan\left(\frac{-\mathbf{R}_{10}}{\;\;\;\mathbf{R}_{00}}\right)
# \end{array} $$
#
# Note that we prefer to use the mathematical function `arctan` rather than simply `arcsin` because the latter cannot for example distinguish $45^o$ from $135^o$ and also for better numerical accuracy. See the text [Angular kinematics in a plane (2D)](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/AngularKinematics2D.ipynb) for more on these issues.
#
# And here is a Python function to compute the Euler angles of rotations from the Global to the local coordinate system for the $xyz$ Cardan sequence:
def euler_angles_from_rot_xyz(rot_matrix, unit='deg'):
""" Compute Euler angles from rotation matrix in the xyz sequence."""
import numpy as np
R = np.array(rot_matrix, dtype=np.float64, copy=False)[:3, :3]
angles = np.zeros(3)
angles[0] = np.arctan2(-R[2, 1], R[2, 2])
angles[1] = np.arctan2( R[2, 0], np.sqrt(R[0, 0]**2 + R[1, 0]**2))
angles[2] = np.arctan2(-R[1, 0], R[0, 0])
if unit[:3].lower() == 'deg': # convert from rad to degree
angles = np.rad2deg(angles)
return angles
# For instance, consider sequential rotations of 45$^o$ around $x,y,z$. The resultant rotation matrix is:
Ra, Rn = euler_rotmat(order='xyz', frame='local', angles=[45, 45, 45], showA=False)
# Let's check that calculating back the Cardan angles from this rotation matrix using the `euler_angles_from_rot_xyz()` function:
euler_angles_from_rot_xyz(Rn, unit='deg')
# We could implement a function to calculate the Euler angles for any of the 12 sequences (in fact, plus another 12 sequences if we consider all the rotations from and to the two coordinate systems), but this is tedious. There is a smarter solution using the concept of [quaternion](http://en.wikipedia.org/wiki/Quaternion), but we wont see that now.
# Let's see a problem with using Euler angles known as gimbal lock.
# ### Gimbal lock
#
# [Gimbal lock](http://en.wikipedia.org/wiki/Gimbal_lock) is the loss of one degree of freedom in a three-dimensional coordinate system that occurs when an axis of rotation is placed parallel with another previous axis of rotation and two of the three rotations will be around the same direction given a certain convention of the Euler angles. This "locks" the system into rotations in a degenerate two-dimensional space. The system is not really locked in the sense it can't be moved or reach the other degree of freedom, but it will need an extra rotation for that.
# For instance, let's look at the $zxz$ sequence of rotations by the angles $\alpha, \beta, \gamma$:
#
# $$ \begin{array}{l l}
# \mathbf{R}_{zxz} & = \mathbf{R_{z}} \mathbf{R_{x}} \mathbf{R_{z}} \\
# \\
# & =
# \begin{bmatrix}
# cos\gamma & sin\gamma & 0\\
# -sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & cos\beta & sin\beta \\
# 0 & -sin\beta & cos\beta
# \end{bmatrix}
# \begin{bmatrix}
# cos\alpha & sin\alpha & 0\\
# -sin\alpha & cos\alpha & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \end{array} $$
#
# Which results in:
a, b, g = sym.symbols('alpha, beta, gamma')
# Elemental rotation matrices of xyz (local):
Rz = sym.Matrix([[cos(a), sin(a), 0], [-sin(a), cos(a), 0], [0, 0, 1]])
Rx = sym.Matrix([[1, 0, 0], [0, cos(b), sin(b)], [0, -sin(b), cos(b)]])
Rz2 = sym.Matrix([[cos(g), sin(g), 0], [-sin(g), cos(g), 0], [0, 0, 1]])
# Rotation matrix for the zxz sequence:
Rzxz = Rz2*Rx*Rz
Math(sym.latex(r'\mathbf{R}_{zxz}=') + sym.latex(Rzxz, mat_str='matrix'))
#
# Let's examine what happens with this rotation matrix when the rotation around the second axis ($x$) by $\beta$ is zero:
#
# $$ \begin{array}{l l}
# \mathbf{R}_{zxz}(\alpha, \beta=0, \gamma) =
# \begin{bmatrix}
# cos\gamma & sin\gamma & 0\\
# -sin\gamma & cos\gamma & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & 1 & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# cos\alpha & sin\alpha & 0\\
# -sin\alpha & cos\alpha & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \end{array} $$
#
# The second matrix is the identity matrix and has no effect on the product of the matrices, which will be:
Rzxz = Rz2*Rz
Math(sym.latex(r'\mathbf{R}_{xyz}(\alpha, \beta=0, \gamma)=') + \
sym.latex(Rzxz, mat_str='matrix'))
# Which simplifies to:
Rzxz = sym.simplify(Rzxz)
Math(sym.latex(r'\mathbf{R}_{xyz}(\alpha, \beta=0, \gamma)=') + \
sym.latex(Rzxz, mat_str='matrix'))
# Despite different values of $\alpha$ and $\gamma$ the result is a single rotation around the $z$ axis given by the sum $\alpha+\gamma$. In this case, of the three degrees of freedom one was lost (the other degree of freedom was set by $\beta=0$). For movement analysis, this means for example that one angle will be undetermined because everything we know is the sum of the two angles obtained from the rotation matrix. We can set the unknown angle to zero but this is arbitrary.
#
# In fact, we already dealt with another example of gimbal lock when we looked at the $xyz$ sequence with rotations by $90^o$. See the figure representing these rotations again and perceive that the first and third rotations were around the same axis because the second rotation was by $90^o$. Let's do the matrix multiplication replacing only the second angle by $90^o$ (and let's use the `euler_rotmat.py`:
Ra, Rn = euler_rotmat(order='xyz', frame='local', angles=[None, 90., None], showA=False)
# Once again, one degree of freedom was lost and we will not be able to uniquely determine the three angles for the given rotation matrix and sequence.
#
# Possible solutions to avoid the gimbal lock are: choose a different sequence; do not rotate the system by the angle that puts the system in gimbal lock (in the examples above, avoid $\beta=90^o$); or add an extra fourth parameter in the description of the rotation angles.
#
# But if we have a physical system where we measure or specify exactly three Euler angles in a fixed sequence to describe or control it, and we can't avoid the system to assume certain angles, then we might have to say "Houston, we have a problem". A famous situation where the problem occurred was during the Apollo 13 mission. This is an actual conversation between crew and mission control during the Apollo 13 mission (Corke, 2011):
#
# >Mission clock: 02 08 12 47
# Flight: *Go, Guidance.*
# Guido: *He’s getting close to gimbal lock there.*
# Flight: *Roger. CapCom, recommend he bring up C3, C4, B3, B4, C1 and C2 thrusters, and advise he’s getting close to gimbal lock.*
# CapCom: *Roger.*
#
# *Of note, it was not a gimbal lock that caused the accident with the the Apollo 13 mission, the problem was an oxygen tank explosion.*
# ## Determination of the rotation matrix
#
# A typical way to determine the rotation matrix for a rigid body in biomechanics is to use motion analysis to measure the position of at least three non-colinear markers placed on the rigid body, and then calculate a basis with these positions, analogue to what we have described in the notebook [Rigid-body transformations in a plane (2D)](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/Transformation2D.ipynb).
#
# ### Basis
#
# If we have the position of three markers: **m1**, **m2**, **m3**, a basis (formed by three orthogonal versors) can be found as:
#
# - First axis, **v1**, the vector **m2-m1**;
# - Second axis, **v2**, the cross product between the vectors **v1** and **m3-m1**;
# - Third axis, **v3**, the cross product between the vectors **v1** and **v2**.
#
# Then, each of these vectors are normalized resulting in three othogonal versors.
#
# For example, given the positions m1 = [1,0,0], m2 = [0,1,0], m3 = [0,0,1], a basis can be found:
# +
m1 = np.array([1, 0, 0])
m2 = np.array([0, 1, 0])
m3 = np.array([0, 0, 1])
v1 = m2 - m1
v2 = np.cross(v1, m3 - m1)
v3 = np.cross(v1, v2)
print('Versors:')
v1 = v1/np.linalg.norm(v1)
print('v1 =', v1)
v2 = v2/np.linalg.norm(v2)
print('v2 =', v2)
v3 = v3/np.linalg.norm(v3)
print('v3 =', v3)
print('\nNorm of each versor:\n',
np.linalg.norm(np.cross(v1, v2)),
np.linalg.norm(np.cross(v1, v3)),
np.linalg.norm(np.cross(v2, v3)))
# -
# Remember from the text [Rigid-body transformations in a plane (2D)](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/Transformation2D.ipynb) that the versors of this basis are the columns of the $\mathbf{R_{Gl}}$ and the rows of the $\mathbf{R_{lG}}$ rotation matrices, for instance:
RlG = np.array([v1, v2, v3])
print('Rotation matrix from Global to local coordinate system:\n', RlG)
# And the corresponding angles of rotation using the $xyz$ sequence are:
euler_angles_from_rot_xyz(RlG)
# These angles don't mean anything now because they are angles of the axes of the arbitrary basis we computed. In biomechanics, if we want an anatomical interpretation of the coordinate system orientation, we define the versors of the basis oriented with anatomical axes (e.g., for the shoulder, one versor would be aligned with the long axis of the upperarm).
# We will see how to perform this computation later. Now we will combine translation and rotation in a single transformation.
# ## Translation and Rotation
#
# Consider the case where the local coordinate system is translated and rotated in relation to the Global coordinate system as illustrated in the next figure.
# <br>
# <figure><img src='./../images/transrot3D.png' alt='translation and rotation 3D'/> <figcaption><center><i>Figure. A point in three-dimensional space represented in two coordinate systems, with one system translated and rotated.</i></center></figcaption> </figure>
#
# The position of point $\mathbf{P}$ originally described in the local coordinate system, but now described in the Global coordinate system in vector form is:
#
# $$ \mathbf{P_G} = \mathbf{L_G} + \mathbf{R_{Gl}}\mathbf{P_l} $$
#
# This means that we first *disrotate* the local coordinate system and then correct for the translation between the two coordinate systems. Note that we can't invert this order: the point position is expressed in the local coordinate system and we can't add this vector to another vector expressed in the Global coordinate system, first we have to convert the vectors to the same coordinate system.
#
# If now we want to find the position of a point at the local coordinate system given its position in the Global coordinate system, the rotation matrix and the translation vector, we have to invert the expression above:
#
# $$ \begin{array}{l l}
# \mathbf{P_G} = \mathbf{L_G} + \mathbf{R_{Gl}}\mathbf{P_l} \implies \\
# \\
# \mathbf{R_{Gl}^{-1}}\cdot\mathbf{P_G} = \mathbf{R_{Gl}^{-1}}\left(\mathbf{L_G} + \mathbf{R_{Gl}}\mathbf{P_l}\right) \implies \\
# \\
# \mathbf{R_{Gl}^{-1}}\mathbf{P_G} = \mathbf{R_{Gl}^{-1}}\mathbf{L_G} + \mathbf{R_{Gl}^{-1}}\mathbf{R_{Gl}}\mathbf{P_l} \implies \\
# \\
# \mathbf{P_l} = \mathbf{R_{Gl}^{-1}}\left(\mathbf{P_G}-\mathbf{L_G}\right) = \mathbf{R_{Gl}^T}\left(\mathbf{P_G}-\mathbf{L_G}\right) \;\;\;\;\; \text{or} \;\;\;\;\; \mathbf{P_l} = \mathbf{R_{lG}}\left(\mathbf{P_G}-\mathbf{L_G}\right)
# \end{array} $$
#
# The expression above indicates that to perform the inverse operation, to go from the Global to the local coordinate system, we first translate and then rotate the coordinate system.
# ### Transformation matrix
#
# It is possible to combine the translation and rotation operations in only one matrix, called the transformation matrix:
#
# $$ \begin{bmatrix}
# \mathbf{P_X} \\
# \mathbf{P_Y} \\
# \mathbf{P_Z} \\
# 1
# \end{bmatrix} =
# \begin{bmatrix}
# . & . & . & \mathbf{L_{X}} \\
# . & \mathbf{R_{Gl}} & . & \mathbf{L_{Y}} \\
# . & . & . & \mathbf{L_{Z}} \\
# 0 & 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# \mathbf{P}_x \\
# \mathbf{P}_y \\
# \mathbf{P}_z \\
# 1
# \end{bmatrix} $$
#
# Or simply:
#
# $$ \mathbf{P_G} = \mathbf{T_{Gl}}\mathbf{P_l} $$
#
# Remember that in general the transformation matrix is not orthonormal, i.e., its inverse is not equal to its transpose.
#
# The inverse operation, to express the position at the local coordinate system in terms of the Global reference system, is:
#
# $$ \mathbf{P_l} = \mathbf{T_{Gl}^{-1}}\mathbf{P_G} $$
#
# And in matrix form:
#
# $$ \begin{bmatrix}
# \mathbf{P_x} \\
# \mathbf{P_y} \\
# \mathbf{P_z} \\
# 1
# \end{bmatrix} =
# \begin{bmatrix}
# \cdot & \cdot & \cdot & \cdot \\
# \cdot & \mathbf{R^{-1}_{Gl}} & \cdot & -\mathbf{R^{-1}_{Gl}}\:\mathbf{L_G} \\
# \cdot & \cdot & \cdot & \cdot \\
# 0 & 0 & 0 & 1
# \end{bmatrix}
# \begin{bmatrix}
# \mathbf{P_X} \\
# \mathbf{P_Y} \\
# \mathbf{P_Z} \\
# 1
# \end{bmatrix} $$
# ### Example with actual motion analysis data
#
# *The data for this example is taken from page 183 of David Winter's book.*
# Consider the following marker positions placed on a leg (described in the laboratory coordinate system with coordinates $x, y, z$ in cm, the $x$ axis points forward and the $y$ axes points upward): lateral malleolus (**lm** = [2.92, 10.10, 18.85]), medial malleolus (**mm** = [2.71, 10.22, 26.52]), fibular head (**fh** = [5.05, 41.90, 15.41]), and medial condyle (**mc** = [8.29, 41.88, 26.52]). Define the ankle joint center as the centroid between the **lm** and **mm** markers and the knee joint center as the centroid between the **fh** and **mc** markers. An anatomical coordinate system for the leg can be defined as: the quasi-vertical axis ($y$) passes through the ankle and knee joint centers; a temporary medio-lateral axis ($z$) passes through the two markers on the malleolus, an anterior-posterior as the cross product between the two former calculated orthogonal axes, and the origin at the ankle joint center.
# a) Calculate the anatomical coordinate system for the leg as described above.
# b) Calculate the rotation matrix and the translation vector for the transformation from the anatomical to the laboratory coordinate system.
# c) Calculate the position of each marker and of each joint center at the anatomical coordinate system.
# d) Calculate the Cardan angles using the $zxy$ sequence for the orientation of the leg with respect to the laboratory (but remember that the letters chosen to refer to axes are arbitrary, what matters is the directions they represent).
# calculation of the joint centers
mm = np.array([2.71, 10.22, 26.52])
lm = np.array([2.92, 10.10, 18.85])
fh = np.array([5.05, 41.90, 15.41])
mc = np.array([8.29, 41.88, 26.52])
ajc = (mm + lm)/2
kjc = (fh + mc)/2
print('Poition of the ankle joint center:', ajc)
print('Poition of the knee joint center:', kjc)
# calculation of the anatomical coordinate system axes (basis)
y = kjc - ajc
x = np.cross(y, mm - lm)
z = np.cross(x, y)
print('Versors:')
x = x/np.linalg.norm(x)
y = y/np.linalg.norm(y)
z = z/np.linalg.norm(z)
print('x =', x)
print('y =', y)
print('z =', z)
Oleg = ajc
print('\nOrigin =', Oleg)
# Rotation matrices
RGl = np.array([x, y , z]).T
print('Rotation matrix from the anatomical to the laboratory coordinate system:\n', RGl)
RlG = RGl.T
print('\nRotation matrix from the laboratory to the anatomical coordinate system:\n', RlG)
# Translational vector
OG = np.array([0, 0, 0]) # Laboratory coordinate system origin
LG = Oleg - OG
print('Translational vector from the anatomical to the laboratory coordinate system:\n', LG)
# To get the coordinates from the laboratory (global) coordinate system to the anatomical (local) coordinate system:
#
# $$ \mathbf{P_l} = \mathbf{R_{lG}}\left(\mathbf{P_G}-\mathbf{L_G}\right) $$
# position of each marker and of each joint center at the anatomical coordinate system
mml = np.dot((mm - LG), RlG) # equivalent to the algebraic expression RlG*(mm - LG).T
lml = np.dot((lm - LG), RlG)
fhl = np.dot((fh - LG), RlG)
mcl = np.dot((mc - LG), RlG)
ajcl = np.dot((ajc - LG), RlG)
kjcl = np.dot((kjc - LG), RlG)
print('Coordinates of mm in the anatomical system:\n', mml)
print('Coordinates of lm in the anatomical system:\n', lml)
print('Coordinates of fh in the anatomical system:\n', fhl)
print('Coordinates of mc in the anatomical system:\n', mcl)
print('Coordinates of kjc in the anatomical system:\n', kjcl)
print('Coordinates of ajc in the anatomical system (origin):\n', ajcl)
# ## Problems
#
# 1. For the example about how the order of rotations of a rigid body affects the orientation shown in a figure above, deduce the rotation matrices for each of the 4 cases shown in the figure. For the first two cases, deduce the rotation matrices from the global to the local coordinate system and for the other two examples, deduce the rotation matrices from the local to the global coordinate system.
#
# 2. Consider the data from problem 7 in the notebook [Frame of reference](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ReferenceFrame.ipynb) where the following anatomical landmark positions are given (units in meters): RASIS=[0.5,0.8,0.4], LASIS=[0.55,0.78,0.1], RPSIS=[0.3,0.85,0.2], and LPSIS=[0.29,0.78,0.3]. Deduce the rotation matrices for the global to anatomical coordinate system and for the anatomical to global coordinate system.
#
# 3. For the data from the last example, calculate the Cardan angles using the $zxy$ sequence for the orientation of the leg with respect to the laboratory (but remember that the letters chosen to refer to axes are arbitrary, what matters is the directions they represent).
# ## References
#
# - <NAME> (2011) [Robotics, Vision and Control: Fundamental Algorithms in MATLAB](http://www.petercorke.com/RVC/). Springer-Verlag Berlin.
# - <NAME>, <NAME>, <NAME>, <NAME> (2013) [Research Methods in Biomechanics](http://books.google.com.br/books?id=gRn8AAAAQBAJ). 2nd Edition. Human Kinetics.
# - [Maths - Euler Angles](http://www.euclideanspace.com/maths/geometry/rotations/euler/).
# - <NAME>, <NAME>, <NAME> (1994) [A Mathematical Introduction to Robotic Manipulation](http://www.cds.caltech.edu/~murray/mlswiki/index.php/Main_Page). Boca Raton, CRC Press.
# - <NAME>, <NAME> (2013) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press.
# - <NAME>, <NAME>, <NAME>, <NAME> (2009) [Robotics - Modelling, Planning and Control](http://books.google.com.br/books/about/Robotics.html?hl=pt-BR&id=jPCAFmE-logC). Springer-Verlag London.
# - Winter DA (2009) [Biomechanics and motor control of human movement](http://books.google.com.br/books?id=_bFHL08IWfwC). 4 ed. Hoboken, USA: Wiley.
# - Zatsiorsky VM (1997) [Kinematics of Human Motion](http://books.google.com.br/books/about/Kinematics_of_Human_Motion.html?id=Pql_xXdbrMcC&redir_esc=y). Champaign, Human Kinetics.
# ## Function `euler_rotmatrix.py`
# +
# # %load ./../functions/euler_rotmat.py
# #!/usr/bin/env python
"""Euler rotation matrix given sequence, frame, and angles."""
from __future__ import division, print_function
__author__ = '<NAME>, https://github.com/demotu/BMC'
__version__ = 'euler_rotmat.py v.1 2014/03/10'
def euler_rotmat(order='xyz', frame='local', angles=None, unit='deg',
str_symbols=None, showA=True, showN=True):
"""Euler rotation matrix given sequence, frame, and angles.
This function calculates the algebraic rotation matrix (3x3) for a given
sequence ('order' argument) of up to three elemental rotations of a given
coordinate system ('frame' argument) around another coordinate system, the
Euler (or Eulerian) angles [1]_.
This function also calculates the numerical values of the rotation matrix
when numerical values for the angles are inputed for each rotation axis.
Use None as value if the rotation angle for the particular axis is unknown.
The symbols for the angles are: alpha, beta, and gamma for the first,
second, and third rotations, respectively.
The matrix product is calulated from right to left and in the specified
sequence for the Euler angles. The first letter will be the first rotation.
The function will print and return the algebraic rotation matrix and the
numerical rotation matrix if angles were inputed.
Parameters
----------
order : string, optional (default = 'xyz')
Sequence for the Euler angles, any combination of the letters
x, y, and z with 1 to 3 letters is accepted to denote the
elemental rotations. The first letter will be the first rotation.
frame : string, optional (default = 'local')
Coordinate system for which the rotations are calculated.
Valid values are 'local' or 'global'.
angles : list, array, or bool, optional (default = None)
Numeric values of the rotation angles ordered as the 'order'
parameter. Enter None for a rotation whith unknown value.
unit : str, optional (default = 'deg')
Unit of the input angles.
str_symbols : list of strings, optional (default = None)
New symbols for the angles, for instance, ['theta', 'phi', 'psi']
showA : bool, optional (default = True)
True (1) displays the Algebraic rotation matrix in rich format.
False (0) to not display.
showN : bool, optional (default = True)
True (1) displays the Numeric rotation matrix in rich format.
False (0) to not display.
Returns
-------
R : Matrix Sympy object
Rotation matrix (3x3) in algebraic format.
Rn : Numpy array or Matrix Sympy object (only if angles are inputed)
Numeric rotation matrix (if values for all angles were inputed) or
a algebraic matrix with some of the algebraic angles substituted
by the corresponding inputed numeric values.
Notes
-----
This code uses Sympy, the Python library for symbolic mathematics, to
calculate the algebraic rotation matrix and shows this matrix in latex form
possibly for using with the IPython Notebook, see [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/duartexyz/BMC/blob/master/Transformation3D.ipynb
Examples
--------
>>> # import function
>>> from euler_rotmat import euler_rotmat
>>> # Default options: xyz sequence, local frame and show matrix
>>> R = euler_rotmat()
>>> # XYZ sequence (around global (fixed) coordinate system)
>>> R = euler_rotmat(frame='global')
>>> # Enter numeric values for all angles and show both matrices
>>> R, Rn = euler_rotmat(angles=[90, 90, 90])
>>> # show what is returned
>>> euler_rotmat(angles=[90, 90, 90])
>>> # show only the rotation matrix for the elemental rotation at x axis
>>> R = euler_rotmat(order='x')
>>> # zxz sequence and numeric value for only one angle
>>> R, Rn = euler_rotmat(order='zxz', angles=[None, 0, None])
>>> # input values in radians:
>>> import numpy as np
>>> R, Rn = euler_rotmat(order='zxz', angles=[None, np.pi, None], unit='rad')
>>> # shows only the numeric matrix
>>> R, Rn = euler_rotmat(order='zxz', angles=[90, 0, None], showA='False')
>>> # Change the angles' symbols
>>> R = euler_rotmat(order='zxz', str_symbols=['theta', 'phi', 'psi'])
>>> # Negativate the angles' symbols
>>> R = euler_rotmat(order='zxz', str_symbols=['-theta', '-phi', '-psi'])
>>> # all algebraic matrices for all possible sequences for the local frame
>>> s=['xyz','xzy','yzx','yxz','zxy','zyx','xyx','xzx','yzy','yxy','zxz','zyz']
>>> for seq in s: R = euler_rotmat(order=seq)
>>> # all algebraic matrices for all possible sequences for the global frame
>>> for seq in s: R = euler_rotmat(order=seq, frame='global')
"""
import numpy as np
import sympy as sym
try:
from IPython.core.display import Math, display
ipython = True
except:
ipython = False
angles = np.asarray(np.atleast_1d(angles), dtype=np.float64)
if ~np.isnan(angles).all():
if len(order) != angles.size:
raise ValueError("Parameters 'order' and 'angles' (when " +
"different from None) must have the same size.")
x, y, z = sym.symbols('x, y, z')
sig = [1, 1, 1]
if str_symbols is None:
a, b, g = sym.symbols('alpha, beta, gamma')
else:
s = str_symbols
if s[0][0] == '-': s[0] = s[0][1:]; sig[0] = -1
if s[1][0] == '-': s[1] = s[1][1:]; sig[1] = -1
if s[2][0] == '-': s[2] = s[2][1:]; sig[2] = -1
a, b, g = sym.symbols(s)
var = {'x': x, 'y': y, 'z': z, 0: a, 1: b, 2: g}
# Elemental rotation matrices for xyz (local)
cos, sin = sym.cos, sym.sin
Rx = sym.Matrix([[1, 0, 0], [0, cos(x), sin(x)], [0, -sin(x), cos(x)]])
Ry = sym.Matrix([[cos(y), 0, -sin(y)], [0, 1, 0], [sin(y), 0, cos(y)]])
Rz = sym.Matrix([[cos(z), sin(z), 0], [-sin(z), cos(z), 0], [0, 0, 1]])
if frame.lower() == 'global':
Rs = {'x': Rx.T, 'y': Ry.T, 'z': Rz.T}
order = order.upper()
else:
Rs = {'x': Rx, 'y': Ry, 'z': Rz}
order = order.lower()
R = Rn = sym.Matrix(sym.Identity(3))
str1 = r'\mathbf{R}_{%s}( ' %frame # last space needed for order=''
#str2 = [r'\%s'%var[0], r'\%s'%var[1], r'\%s'%var[2]]
str2 = [1, 1, 1]
for i in range(len(order)):
Ri = Rs[order[i].lower()].subs(var[order[i].lower()], sig[i] * var[i])
R = Ri * R
if sig[i] > 0:
str2[i] = '%s:%s' %(order[i], sym.latex(var[i]))
else:
str2[i] = '%s:-%s' %(order[i], sym.latex(var[i]))
str1 = str1 + str2[i] + ','
if ~np.isnan(angles).all() and ~np.isnan(angles[i]):
if unit[:3].lower() == 'deg':
angles[i] = np.deg2rad(angles[i])
Rn = Ri.subs(var[i], angles[i]) * Rn
#Rn = sym.lambdify(var[i], Ri, 'numpy')(angles[i]) * Rn
str2[i] = str2[i] + '=%.0f^o' %np.around(np.rad2deg(angles[i]), 0)
else:
Rn = Ri * Rn
Rn = sym.simplify(Rn) # for trigonometric relations
try:
# nsimplify only works if there are symbols
Rn2 = sym.latex(sym.nsimplify(Rn, tolerance=1e-8).n(chop=True, prec=4))
except:
Rn2 = sym.latex(Rn.n(chop=True, prec=4))
# there are no symbols, pass it as Numpy array
Rn = np.asarray(Rn)
if showA and ipython:
display(Math(str1[:-1] + ') =' + sym.latex(R, mat_str='matrix')))
if showN and ~np.isnan(angles).all() and ipython:
str2 = ',\;'.join(str2[:angles.size])
display(Math(r'\mathbf{R}_{%s}(%s)=%s' %(frame, str2, Rn2)))
if np.isnan(angles).all():
return R
else:
return R, Rn
| notebooks/Transformation3D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="MGSXn0USOhtu" pycharm={"name": "#%% md\n"}
# # Evaluation of a QA System
#
# [](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial5_Evaluation.ipynb)
#
# To be able to make a statement about the performance of a question-answering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved.
# -
# ### Prepare environment
#
# #### Colab: Enable the GPU runtime
# Make sure you enable the GPU runtime to experience decent speed in this tutorial.
# **Runtime -> Change Runtime type -> Hardware accelerator -> GPU**
#
# <img src="https://raw.githubusercontent.com/deepset-ai/haystack/master/docs/_src/img/colab_gpu_runtime.jpg">
# + pycharm={"name": "#%%\n"}
# Make sure you have a GPU running
# !nvidia-smi
# + [markdown] colab_type="text" id="E6H_7lAmOht8"
# ## Start an Elasticsearch server
# You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source.
# + colab={} colab_type="code" id="vgmFOp82Oht_" pycharm={"name": "#%%\n"}
# Install the latest release of Haystack in your own environment
# #! pip install farm-haystack
# Install the latest master of Haystack
# !pip install grpcio-tools==1.34.1
# !pip install git+https://github.com/deepset-ai/haystack.git
# + colab={} colab_type="code" id="tNoaWcDKOhuL" pycharm={"is_executing": true, "name": "#%%\n"}
# In Colab / No Docker environments: Start Elasticsearch from source
# ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q
# ! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz
# ! chown -R daemon:daemon elasticsearch-7.9.2
import os
from subprocess import Popen, PIPE, STDOUT
es_server = Popen(['elasticsearch-7.9.2/bin/elasticsearch'],
stdout=PIPE, stderr=STDOUT,
preexec_fn=lambda: os.setuid(1) # as daemon
)
# wait until ES has started
# ! sleep 30
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="w0MHgxrYOhur" outputId="9e530bf3-44b1-4ea1-86e2-8be0bb9163ad" pycharm={"name": "#%%\n"}
from farm.utils import initialize_device_settings
device, n_gpu = initialize_device_settings(use_cuda=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 87} colab_type="code" id="tTXxr6TAOhuz" outputId="99a4e32b-e0ec-4c94-dab3-1a09c53d4dc1" pycharm={"name": "#%%\n"}
from haystack.preprocessor.utils import fetch_archive_from_http
# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents
doc_dir = "../data/nq"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset_v2.json.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# -
# make sure these indices do not collide with existing ones, the indices will be wiped clean before data is inserted
doc_index = "tutorial5_docs"
label_index = "tutorial5_labels"
# + colab={} colab_type="code" id="B_NEtezLOhu5" pycharm={"name": "#%%\n"}
# Connect to Elasticsearch
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
# Connect to Elasticsearch
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document",
create_index=False, embedding_field="emb",
embedding_dim=768, excluded_meta_data=["emb"])
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="bRFsQUAJOhu_" outputId="56b84800-c524-4418-9664-e2720b66a1af" pycharm={"name": "#%%\n"}
from haystack.preprocessor import PreProcessor
# Add evaluation data to Elasticsearch Document Store
# We first delete the custom tutorial indices to not have duplicate elements
# and also split our documents into shorter passages using the PreProcessor
preprocessor = PreProcessor(
split_length=500,
split_overlap=0,
split_respect_sentence_boundary=False,
clean_empty_lines=False,
clean_whitespace=False
)
document_store.delete_all_documents(index=doc_index)
document_store.delete_all_documents(index=label_index)
document_store.add_eval_data(
filename="../data/nq/nq_dev_subset_v2.json",
doc_index=doc_index,
label_index=label_index,
preprocessor=preprocessor
)
# Let's prepare the labels that we need for the retriever and the reader
labels = document_store.get_all_labels_aggregated(index=label_index)
# + [markdown] colab_type="text" id="gy8YwmSYOhvE" pycharm={"name": "#%% md\n"}
# ## Initialize components of QA-System
# + colab={} colab_type="code" id="JkhaPMIJOhvF" pycharm={"name": "#%%\n"}
# Initialize Retriever
from haystack.retriever.sparse import ElasticsearchRetriever
retriever = ElasticsearchRetriever(document_store=document_store)
# Alternative: Evaluate DensePassageRetriever
# Note, that DPR works best when you index short passages < 512 tokens as only those tokens will be used for the embedding.
# Here, for nq_dev_subset_v2.json we have avg. num of tokens = 5220(!).
# DPR still outperforms Elastic's BM25 by a small margin here.
# from haystack.retriever.dense import DensePassageRetriever
# retriever = DensePassageRetriever(document_store=document_store,
# query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
# passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
# use_gpu=True,
# embed_title=True,
# max_seq_len=256,
# batch_size=16,
# remove_sep_tok_from_untitled_passages=True)
#document_store.update_embeddings(retriever, index=doc_index)
# + colab={"base_uri": "https://localhost:8080/", "height": 725, "referenced_widgets": ["398e8dc496594a5f8e65daecc3ddad4a", "ee436c1e0fb24802b5d0706299ce7e81", "9f5b31e32c5c4398a6eede32431ad55e", "479f015ad2e8412d96a34ddb59a054d1", "<KEY>", "<KEY>", "93e868a4b6384840b3d245391ee2915a", "<KEY>", "18250ec6840147658d1e038138f8aba0", "<KEY>", "49daa700f016433e88b492490b1a8d89", "08adb4accbe649fd9d21da2246c00d63", "c840113abcc44f84a88e6120fe198fba", "<KEY>", "75ed8c01b4ed4c37a024a6c24036e35a", "<KEY>", "ec3c00e615164fb488ebfb51d8ac9d9e", "9da0f76f26294b27982e080b8af6e28b", "ef78454fe89347ea8f6f3148e23440db", "<KEY>", "cb53b988f7df44d29ff7efcb0a236fce", "<KEY>", "<KEY>", "<KEY>", "9597bad322c34d02a0c10dd66b21813e", "62661d74e0ea462cb6b8e574aa17dc2f", "71416028306340ee8987d703b506245b", "100352c2499749e48ed10f3dee4f569d", "dd518bb2a8d141f58118e2cf7e140ffb", "5957d73fcef34606ac8eac654b9584d4", "5c8d0662ec12422a83525292cc6a51ac", "45469f8ddb3b4df8aab3e7ca3fbe6921", "<KEY>", "1314e2ff61264f81acaf0042efd0cf5a", "fce574414e154590a4a642362db98421", "<KEY>", "706c079c7d484975ad69a91b3f95e9f3", "<KEY>", "ef088eda6fe04abe958059620da9b5d0", "e1819597e4d840b19cfa8aba00ef3ef1", "19173b1d00314580a4f40efa0df4b174", "<KEY>", "9da1585dcdd440fa8b2e8351ffeba348", "<KEY>", "01d9cd6656494aef83a3947945c0acdd", "<KEY>", "<KEY>", "<KEY>"]} colab_type="code" id="cW3Ypn_gOhvK" outputId="89ad5598-1017-499f-c986-72bba2a3a6cb" pycharm={"name": "#%%\n"}
# Initialize Reader
from haystack.reader.farm import FARMReader
reader = FARMReader("deepset/roberta-base-squad2", top_k=4, return_no_answer=True)
# + pycharm={"name": "#%%\n"}
from haystack.eval import EvalAnswers, EvalDocuments
# Here we initialize the nodes that perform evaluation
eval_retriever = EvalDocuments()
eval_reader = EvalAnswers()
# + [markdown] colab_type="text" id="qwkBgzh5OhvR" pycharm={"name": "#%% md\n"}
# ## Evaluation of Retriever
# Here we evaluate only the retriever, based on whether the gold_label document is retrieved.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="YzvLhnx3OhvS" outputId="1d45f072-0ae0-4864-8ccc-aa12303a8d04" pycharm={"name": "#%%\n"}
## Evaluate Retriever on its own
retriever_eval_results = retriever.eval(top_k=20, label_index=label_index, doc_index=doc_index)
## Retriever Recall is the proportion of questions for which the correct document containing the answer is
## among the correct documents
print("Retriever Recall:", retriever_eval_results["recall"])
## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank
print("Retriever Mean Avg Precision:", retriever_eval_results["map"])
# + [markdown] colab_type="text" id="fjZRnB6bOhvW" pycharm={"name": "#%% md\n"}
# ## Evaluation of Reader
# Here we evaluate only the reader in a closed domain fashion i.e. the reader is given one query
# and one document and metrics are calculated on whether the right position in this text is selected by
# the model as the answer span (i.e. SQuAD style)
# + colab={"base_uri": "https://localhost:8080/", "height": 203} colab_type="code" id="Lgsgf4KaOhvY" outputId="24d3755e-bf2e-4396-f1a2-59c925cc54d3" pycharm={"name": "#%%\n"}
# Evaluate Reader on its own
reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)
# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
#reader_eval_results = reader.eval_on_file("../data/nq", "nq_dev_subset_v2.json", device=device)
## Reader Top-N-Accuracy is the proportion of predicted answers that match with their corresponding correct answer
print("Reader Top-N-Accuracy:", reader_eval_results["top_n_accuracy"])
## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer
print("Reader Exact Match:", reader_eval_results["EM"])
## Reader F1-Score is the average overlap between the predicted answers and the correct answers
print("Reader F1-Score:", reader_eval_results["f1"])
# + [markdown] colab_type="text" id="7i84KXONOhvc" pycharm={"name": "#%% md\n"}
# ## Evaluation of Retriever and Reader (Open Domain)
# Here we evaluate retriever and reader in open domain fashion i.e. a document is considered
# correctly retrieved if it contains the answer string within it. The reader is evaluated based purely on the
# predicted string, regardless of which document this came from and the position of the extracted span.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="yLpMHAexOhvd" outputId="fd74be7d-5c8e-4eb9-a653-062427b74347" pycharm={"name": "#%%\n"}
from haystack import Pipeline
# Here is the pipeline definition
p = Pipeline()
p.add_node(component=retriever, name="ESRetriever", inputs=["Query"])
p.add_node(component=eval_retriever, name="EvalRetriever", inputs=["ESRetriever"])
p.add_node(component=reader, name="QAReader", inputs=["EvalRetriever"])
p.add_node(component=eval_reader, name="EvalReader", inputs=["QAReader"])
results = []
# + pycharm={"name": "#%%\n"}
# This is how to run the pipeline
for l in labels:
res = p.run(
query=l.question,
top_k_retriever=10,
labels=l,
top_k_reader=10,
index=doc_index,
)
results.append(res)
# + pycharm={"name": "#%%\n"}
# When we have run evaluation using the pipeline, we can print the results
n_queries = len(labels)
eval_retriever.print()
print()
retriever.print_time()
print()
eval_reader.print(mode="reader")
print()
reader.print_time()
print()
eval_reader.print(mode="pipeline")
# -
# ## About us
#
# This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany
#
# We bring NLP to the industry via open source!
# Our focus: Industry specific language models & large scale QA systems.
#
# Some of our other work:
# - [German BERT](https://deepset.ai/german-bert)
# - [GermanQuAD and GermanDPR](https://deepset.ai/germanquad)
# - [FARM](https://github.com/deepset-ai/FARM)
#
# Get in touch:
# [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
#
# By the way: [we're hiring!](https://apply.workable.com/deepset/)
| tutorials/Tutorial5_Evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DATA ANALYTICS LGM VIRTUAL INTERNSHIP PROGRAM 2021
# ## Task: Exploratory Data Analysis on Dataset - Terrorism (1970 - 2017)
#
#
# - ### As a security/defense analyst, task is trying to find out the hot zone of terrorism.
# 
#
# ### Author: <NAME>
# ## Import Library
# +
# Importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# -
# ## Data Import
# read the csv file
tr =pd.read_csv("globalterrorism.csv")
tr
tr.head()
# +
#check the basic functions like count, mean ,min, max, std var, quartile
tr.describe()
# +
#check the datatype of dataset and also can check total null values
tr.info()
# -
#check the datatype of dataset and also can check total null values
tr.shape
# +
#checking the null values
tr.isnull().sum()
# -
# ## Data Cleaning
# rename the column names
tr.rename(columns={'iyear':'Year','imonth':'Month','iday':"day",'gname':'Group','country_txt':'Country','region_txt':'Region','provstate':'State','city':'City','latitude':'latitude',
'longitude':'longitude','summary':'summary','attacktype1_txt':'Attacktype','targtype1_txt':'Targettype','weaptype1_txt':'Weapon','nkill':'kill',
'nwound':'Wound'},inplace=True)
tr = tr[['Year','Month','day','Country','State','Region','City','latitude','longitude',"Attacktype",'kill',
'Wound','target1','summary','Group','Targettype','Weapon','motive']]
tr.head()
# replace the null values with 0
tr['Wound'] = tr['Wound'].fillna(0)
tr['kill'] = tr['kill'].fillna(0)
# add a new column concatenate kill and wound
tr['Casualities'] = tr['kill'] + tr['Wound']
tr.info()
# ## Data Visualization
# ### Number of Attacking Each Year
# +
year = tr['Year'].unique()
years_count = tr['Year'].value_counts(dropna = False).sort_index()
plt.figure(figsize = (18,10))
sns.barplot(x = year,y = years_count,palette = "tab10")
plt.xticks(rotation = 50,fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Attacking Year',fontsize=20)
plt.ylabel('Number of Attacks Each Year',fontsize=20)
plt.title('Attacks In Years',fontsize=30)
plt.show()
# -
# ### Terrorist Activities By Region In Each Year
pd.crosstab(tr.Year, tr.Region).plot(kind='area',stacked=False,figsize=(20,10))
plt.title('Terrorist Activities By Region In Each Year',fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylabel('Number of Attacks',fontsize=15)
plt.xlabel("Year",fontsize=15)
plt.show()
# ### People Died Due To Attack
df = tr[['Year','kill']].groupby(['Year']).sum()
fig, ax4 = plt.subplots(figsize=(20,10))
df.plot(kind='bar',alpha=0.7,ax=ax4)
plt.xticks(rotation = 50,fontsize=15)
plt.yticks(fontsize=15)
plt.title("People Died Due To Attack",fontsize=22)
plt.ylabel("Number of killed peope",fontsize=20)
plt.xlabel('Year',fontsize=20)
top_side = ax4.spines["top"]
top_side.set_visible(False)
right_side = ax4.spines["right"]
right_side.set_visible(False)
# ### Top 10 Affected Countries
plt.subplots(figsize=(20,10))
sns.barplot(tr['Country'].value_counts()[:10].index,tr['Country'].value_counts()[:10].values,palette='YlOrBr_r')
plt.title("Top 10 Affected Countries ",fontsize=20)
plt.xlabel('Countries', fontsize=18)
plt.ylabel('Count',fontsize=18)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
# ### Total count of attacks
attack = tr.Country.value_counts()[:10]
attack
# tr['City'].value_counts().to_frame().sort_values('City',axis=0,ascending=False).head(10).plot(kind='bar',figsize=(20,10),color='blue')
# plt.xticks(rotation = 50)
# plt.xlabel("City",fontsize=15)
# plt.ylabel("Number of attack",fontsize=15)
# plt.title("Top 10 most effected city",fontsize=20)
# plt.show()
# ### Number of Killed
tr[['Attacktype','kill']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['greenyellow'])
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.title("Number of Killed ",fontsize=20)
plt.ylabel('Number of People',fontsize=15)
plt.xlabel('Attack Type',fontsize=15)
plt.show()
# ### Name of Attack Type
tr['Attacktype'].value_counts().plot(kind='bar',figsize=(20,10),color='magenta')
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("Attack Type",fontsize=18)
plt.ylabel("Number of Attack",fontsize=18)
plt.title("Name of Attack Type",fontsize=20)
plt.show()
# ### Total Number of wounded
tr[['Attacktype','Wound']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['darkred'])
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.title("Number of wounded ",fontsize=20)
plt.ylabel('Number of people',fontsize=15)
plt.xlabel('Attack type',fontsize=15)
plt.show()
# ### Total Attack per year
plt.subplots(figsize=(20,10))
sns.countplot(tr["Targettype"],order=tr['Targettype'].value_counts().index,palette="gist_heat",edgecolor=sns.color_palette("mako"));
plt.xticks(rotation=90,fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("Attacktype",fontsize=15)
plt.ylabel("count",fontsize=15)
plt.title("Attack per year",fontsize=20)
plt.show()
# ### Top 10 terrorist group attack
tr['Group'].value_counts().to_frame().drop('Unknown').head(10).plot(kind='bar',color='olive',figsize=(20,10))
plt.title("Top 10 terrorist group attack",fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("terrorist group name",fontsize=18)
plt.ylabel("Attack number",fontsize=18)
plt.show()
tr.Group.value_counts().drop('Unknown')[0:10]
# ### Top 10 terrorist group kill people
tr[['Group','kill']].groupby(['Group'],axis=0).sum().drop('Unknown').sort_values('kill',ascending=False).head(10).plot(kind='bar',color='crimson',figsize=(20,10))
plt.title("Top 10 terrorist group kill people",fontsize=22)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("terrorist group name",fontsize=20)
plt.ylabel("No of killed people",fontsize=20)
plt.show()
#total number of kill per country by group
df=tr[['Group','Country','kill']]
df=df.groupby(['Group','Country'],axis=0).sum().sort_values('kill',ascending=False).drop('Unknown').reset_index().head(10)
df
#Number of people killed by terror attack
kill = tr.loc[:,'kill']
print('Number of people killed by terror attack:', int(sum(kill.dropna())))
#pivot table of attack type
typeKill = tr.pivot_table(columns='Attacktype', values='kill', aggfunc='sum')
typeKill
#pivot table of total number of kill per country
countryKill = tr.pivot_table(columns='Country', values='kill', aggfunc='sum')
countryKill
# ### Conclusion :
#
# - Country with the most attacks:
# **Iraq**
#
# - Total no. of attacks in Iraq:
# **24636**
#
# - City with the most attacks:
# **Baghdad**
#
# - Region with the most attacks:
# **Middle East & North Africa**
#
# - Year with the most attacks:
# **2014**
#
# - Month with the most attacks:
# **5**
#
# - Group with the most attacks:
# **Taliban**
#
# - Total no. of attacks by Taliban:
# **7478**
#
# - The most target types per year:
# **Private citizens & Property**
#
# - Group with the most kills:
# **Islamic State of Iraq and the Levant (ISIL)**
#
# - Most Attack Types:
# **Bomb Explosion**
#
# - Number of people killed by terror attack:
# **411868**
# ## Thank You!!
#
# ### <NAME>
| Terrorism Analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Obtaining the Efficient Frontier - Part II
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# Ok, let’s continue the exercise from the last lecture.
# You already downloaded the data and generated two random weightings.
# +
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
# %matplotlib inline
assets = ['WMT', 'FB']
pf_data = pd.DataFrame()
for a in assets:
pf_data[a] = wb.DataReader(a, data_source = 'yahoo', start = '2014-1-1')['Adj Close']
# +
log_returns = np.log(pf_data / pf_data.shift(1))
num_assets = len(assets)
weights = np.random.random(num_assets)
weights /= np.sum(weights)
weights
# -
# Now, estimate the expected Portfolio Return, Variance, and Volatility.
# Expected Portfolio Return:
np.sum(weights * log_returns.mean()) * 250
# Expected Portfolio Variance:
np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))
# Expected Portfolio Volatility:
np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))
# ***
# The rest of this exercise will be a reproduction of what we did in the previous video.
# 1) Create two empty lists. Name them pf_returns and pf_volatilites.
pfolio_returns = []
pfolio_volatilities = []
# 2) Create a loop with 1,000 iterations that will generate random weights, summing to 1, and will append the obtained values for the portfolio returns and the portfolio volatilities to pf_returns and pf_volatilities, respectively.
# +
for x in range (1000):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250)
pfolio_volatilities.append(np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights))))
pfolio_returns, pfolio_volatilities
# -
# 3) Transform the obtained lists into NumPy arrays and reassign them to pf_returns and pf_volatilites. Once you have done that, the two objects will be NumPy arrays.
# +
pfolio_returns = np.array(pfolio_returns)
pfolio_volatilities = np.array(pfolio_volatilities)
pfolio_returns, pfolio_volatilities
| 23 - Python for Finance/5_Markowitz Portfolio Optimization/3_Obtaining the Efficient Frontier in Python - Part II (5:18)/Obtaining the Efficient Frontier - Part II - Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="t3Xq_P3TGwTx" colab_type="text"
# # DQN implementation with PyTorch using PongNoFrameskip-v4 benchmark.
#
# In this notebook, we implement Deep Q-Network (DQN), one of the rainforcement learning algorithm, using `PyTorch`.
# This code refers to [jmichaux/dqn-pytorch](https://github.com/jmichaux/dqn-pytorch).
# + [markdown] id="1QfzACsx863h" colab_type="text"
# In this code, we propose the new method of improve the escape from local-minimum of rainforcement learning. Our method takeing multi-agent approach. In addition, we also define the agent durabiity inspired by Evolutionary computation.
# + [markdown] id="iKSJOvllG1Iu" colab_type="text"
# ## Setup
# + id="FytgoxIUPaIp" colab_type="code" outputId="bc8c2e3a-2339-476a-f3b0-e337e503ee1f" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !apt-get install -y cmake zlib1g-dev libjpeg-dev xvfb ffmpeg xorg-dev python-opengl libboost-all-dev libsdl2-dev swig freeglut3-dev
# !pip install -U gym imageio PILLOW pyvirtualdisplay 'gym[atari]' 'pyglet==1.3.2' pyopengl scipy JSAnimation opencv-python pillow h5py pyyaml hyperdash pyvirtualdisplay hyperdash
# !apt-get install xvfb
# + id="kFlP_AMJKDp_" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + id="AvKEUG38QgWK" colab_type="code" colab={}
# !cp /content/drive/My\ Drive/Colab\ Notebooks/MT/Utils/xdpyinfo /usr/bin/
# !cp /content/drive/My\ Drive/Colab\ Notebooks/MT/Utils/libXxf86dga.* /usr/lib/x86_64-linux-gnu/
# !chmod +x /usr/bin/xdpyinfo
# + id="8O2eiLR1ceiF" colab_type="code" colab={}
# !hyperdash signup --github
# + [markdown] id="JPU0ipdJHy5S" colab_type="text"
# ## Package Import
# + id="02TShsX7QtEU" colab_type="code" colab={}
import copy
from collections import namedtuple
from itertools import count
import math
import random
import numpy as np
import os
import time
import json
import gym
from collections import deque
from hyperdash import Experiment
import cv2
import pyvirtualdisplay
import base64
import IPython
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
# + [markdown] id="BDl5L2b0RPOZ" colab_type="text"
# ## Hyper parameters
# + id="xD9eJ4egQ3fg" colab_type="code" colab={}
# Runtime settings
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Transition = namedtuple('Transion', ('state', 'action', 'next_state', 'reward'))
cv2.ocl.setUseOpenCL(False)
time_stamp = str(int(time.time()))
random.seed(0)
np.random.seed(0)
# Hyper parameters
BATCH_SIZE = 32 # @param
GAMMA = 0.99 # @param
EPS_START = 1 # @param
EPS_END = 0.02 # @param
EPS_DECAY = 1000000 # @param
TARGET_UPDATE = 1000 # @param
DEFAULT_DURABILITY = 1000 # @param
LEARNING_RATE = 1e-4 # @param
INITIAL_MEMORY = 10000 # @param
MEMORY_SIZE = 10 * INITIAL_MEMORY # @param
DEFAULT_DURABILITY_DECREASED_LEVEL = 1 # @param
DEFAULT_DURABILITY_INCREASED_LEVEL = 1 # @param
DURABILITY_CHECK_FREQUENCY = 80 # @param
# Some settings
ENV_NAME = "PongNoFrameskip-v4" # @param
EXP_NAME = "PongNoFrameskip-v4_" + time_stamp # @param
RENDER = False # @param
RUN_NAME = "videos_proposal" # @param
output_directory = os.path.abspath(
os.path.join(os.path.curdir, "/content/drive/My Drive/Colab Notebooks/MT/Runs", ENV_NAME + "_" + RUN_NAME + "_" + time_stamp))
TRAIN_LOG_FILE_PATH = output_directory + "/" + ENV_NAME + "_train_" + time_stamp + ".log" # @param
TEST_LOG_FILE_PATH = output_directory + "/" + ENV_NAME + "_test_" + time_stamp + ".log" # @param
PARAMETER_LOG_FILE_PATH = output_directory + "/" + ENV_NAME + "_params_" + time_stamp + ".json" # @param
if not os.path.exists(output_directory):
os.makedirs(output_directory)
hyper_params = {"BATCH_SIZE": BATCH_SIZE, "GAMMA": GAMMA, "EPS_START": EPS_START,
"EPS_END": EPS_END, "EPS_DECAY": EPS_DECAY,
"TARGET_UPDATE": TARGET_UPDATE,
"DEFAULT_DURABILITY": DEFAULT_DURABILITY,
"LEARNING_RATE": LEARNING_RATE,
"INITIAL_MEMORY": INITIAL_MEMORY, "MEMORY_SIZE": MEMORY_SIZE,
"DEFAULT_DURABILITY_DECREASED_LEVEL": DEFAULT_DURABILITY_DECREASED_LEVEL,
"DURABILITY_CHECK_FREQUENCY": DURABILITY_CHECK_FREQUENCY,
"ENV_NAME" : ENV_NAME, "EXP_NAME": EXP_NAME,
"TRAIN_LOG_FILE_PATH": TRAIN_LOG_FILE_PATH,
"TEST_LOG_FILE_PATH": TEST_LOG_FILE_PATH,
"PARAMETER_LOG_FILE_PATH": PARAMETER_LOG_FILE_PATH,
"RENDER": str(RENDER)}
# + [markdown] id="Y42MelxTn2q1" colab_type="text"
# ## Define the Replay memory
# + id="bzrZB3BZRUH0" colab_type="code" colab={}
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class PrioritizedReplay(object):
def __init__(self, capacity):
pass
# + [markdown] id="yLtkGPWnn8kJ" colab_type="text"
# ## Define the DQNs
#
# Now we define the two types of DQN. One is simple q-network using 3 layers CNN. On the other one is batch normalaized 4 layers CNN.
# + id="4GlLM0L5R6HG" colab_type="code" colab={}
SQRT2 = math.sqrt(2.0)
ACT = nn.ReLU
class DQN(torch.jit.ScriptModule):
def __init__(self, in_channels=4, n_actions=14):
super(DQN, self).__init__()
self.convs = nn.Sequential(
nn.Conv2d(in_channels, 32, kernel_size=8, stride=4),
ACT(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
ACT(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
ACT()
)
self.fc = nn.Sequential(
nn.Linear(7 * 7 * 64, 512),
ACT(),
nn.Linear(512, n_actions)
)
@torch.jit.script_method
def forward(self, x):
x = x.float() / 255
x = self.convs(x)
x = x.view(x.size(0), -1)
return self.fc(x)
class DDQN(torch.jit.ScriptModule):
def __init__(self, in_channels=4, n_actions=14):
__constants__ = ['n_actions']
super(DDQN, self).__init__()
self.n_actions = n_actions
self.convs = nn.Sequential(
nn.Conv2d(in_channels, 32, kernel_size=8, stride=4),
ACT(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
ACT(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
ACT()
)
self.fc_adv = nn.Sequential(
nn.Linear(7 * 7 * 64, 512),
ACT(),
nn.Linear(512, n_actions)
)
self.fc_val = nn.Sequential(
nn.Linear(7 * 7 * 64, 512),
ACT(),
nn.Linear(512, 1)
)
def scale_grads_hook(module, grad_out, grad_in):
"""scale gradient by 1/sqrt(2) as in the original paper"""
grad_out = tuple(map(lambda g: g / SQRT2, grad_out))
return grad_out
self.fc_adv.register_backward_hook(scale_grads_hook)
self.fc_val.register_backward_hook(scale_grads_hook)
@torch.jit.script_method
def forward(self, x):
x = x.float() / 255
x = self.convs(x)
x = x.view(x.size(0), -1)
adv = self.fc_adv(x)
val = self.fc_val(x)
return val + adv - adv.mean(1).unsqueeze(1)
@torch.jit.script_method
def value(self, x):
x = x.float() / 255
x = self.convs(x)
x = x.view(x.size(0), -1)
return self.fc_val(x)
class LanderDQN(torch.jit.ScriptModule):
def __init__(self, n_state, n_actions, nhid=64):
super(LanderDQN, self).__init__()
self.layers = nn.Sequential(
nn.Linear(n_state, nhid),
ACT(),
nn.Linear(nhid, nhid),
ACT(),
nn.Linear(nhid, n_actions)
)
@torch.jit.script_method
def forward(self, x):
x = self.layers(x)
return x
class RamDQN(torch.jit.ScriptModule):
def __init__(self, n_state, n_actions):
super(RamDQN, self).__init__()
self.layers = nn.Sequential(
nn.Linear(n_state, 256),
ACT(),
nn.Linear(256, 128),
ACT(),
nn.Linear(128, 64),
ACT(),
nn.Linear(64, n_actions)
)
@torch.jit.script_method
def forward(self, x):
return self.layers(x)
class DQNbn(nn.Module):
def __init__(self, in_channels=4, n_actions=14):
"""
Initialize Deep Q Network
Args:
in_channels (int): number of input channels
n_actions (int): number of outputs
"""
super(DQNbn, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(64)
self.fc4 = nn.Linear(7 * 7 * 64, 512)
self.head = nn.Linear(512, n_actions)
def forward(self, x):
x = x.float() / 255
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.fc4(x.view(x.size(0), -1)))
return self.head(x)
# class DQN(nn.Module):
# def __init__(self, in_channels=4, n_actions=14):
# """
# Initialize Deep Q Network
# Args:
# in_channels (int): number of input channels
# n_actions (int): number of outputs
# """
# super(DQN, self).__init__()
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# # self.bn1 = nn.BatchNorm2d(32)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# # self.bn2 = nn.BatchNorm2d(64)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# # self.bn3 = nn.BatchNorm2d(64)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.head = nn.Linear(512, n_actions)
# def forward(self, x):
# x = x.float() / 255
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.view(x.size(0), -1)))
# return self.head(x)
# + [markdown] id="4FPJHZkWoCis" colab_type="text"
# ## Define the Agent
# + id="g77AFM28oDRy" colab_type="code" colab={}
class Agent:
def __init__(self, policy_net, target_net, durability, optimizer, name):
self.policy_net = policy_net
self.target_net = target_net
self.target_net.load_state_dict(policy_net.state_dict())
self.durability = durability
self.optimizer = optimizer
self.name = name
self.memory = ReplayMemory(MEMORY_SIZE)
self.steps_done = 0
self.total_reward = 0.0
def select_action(self, state):
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * self.steps_done / EPS_DECAY)
self.steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
return self.policy_net(state.to('cuda')).max(1)[1].view(1,1)
else:
return torch.tensor([[random.randrange(4)]], device=device, dtype=torch.long)
def optimize_model(self):
if len(self.memory) < BATCH_SIZE:
return
transitions = self.memory.sample(BATCH_SIZE)
"""
zip(*transitions) unzips the transitions into
Transition(*) creates new named tuple
batch.state - tuple of all the states (each state is a tensor)
batch.next_state - tuple of all the next states (each state is a tensor)
batch.reward - tuple of all the rewards (each reward is a float)
batch.action - tuple of all the actions (each action is an int)
"""
batch = Transition(*zip(*transitions))
actions = tuple((map(lambda a: torch.tensor([[a]], device='cuda'), batch.action)))
rewards = tuple((map(lambda r: torch.tensor([r], device='cuda'), batch.reward)))
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch.next_state)),
device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None]).to('cuda')
state_batch = torch.cat(batch.state).to('cuda')
action_batch = torch.cat(actions)
reward_batch = torch.cat(rewards)
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach()
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def set_env(self, env):
self.env = env
def get_env(self):
return self.env
def set_action(self, action):
self.action = action
def get_action(self):
return self.action
def get_durability(self):
return self.durability
def get_policy_net(self):
return self.policy_net
def reduce_durability(self, value):
self.durability = self.durability - value
def heal_durability(self, value):
self.durability = self.durability + value
def set_done_state(self, done):
self.done = done
def set_total_reward(self, reward):
self.reward = self.reward + reward
def get_total_reward(self):
return self.total_reward
def set_step_retrun_value(self, obs, reward, done, info):
self.obs = obs
self.reward = reward
self.done = done
self.info = info
def is_done(self):
return self.done
# + [markdown] id="yLx6x64KoE9Y" colab_type="text"
# ## Define the Environment
#
# **TODO: Make sure to create environment class**
# + id="MF7Ve2SdR6dL" colab_type="code" colab={}
# def make_env(env, stack_frames=True, episodic_life=True, clip_rewards=False, scale=False):
# if episodic_life:
# env = EpisodicLifeEnv(env)
#
# env = NoopResetEnv(env, noop_max=30)
# env = MaxAndSkipEnv(env, skip=4)
# if 'FIRE' in env.unwrapped.get_action_meanings():
# env = FireResetEnv(env)
#
# env = WarpFrame(env)
# if stack_frames:
# env = FrameStack(env, 4)
# if clip_rewards:
# env = ClipRewardEnv(env)
# return env
def get_state(obs):
state = np.array(obs)
state = state.transpose((2, 0, 1))
state = torch.from_numpy(state)
return state.unsqueeze(0)
class Environment:
def __init__(self):
self.env = gym.make(ENV_NAME)
self.env = self.make_env(self.env)
def get_env(self):
return self.env
def make_env(self, env, stack_frames=True, episodic_life=True, clip_rewards=False, scale=False):
if episodic_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(self.env, noop_max=30)
env = MaxAndSkipEnv(self.env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(self.env)
env = WarpFrame(env)
if stack_frames:
env = FrameStack(env, 4)
if clip_rewards:
env = ClipRewardEnv(env)
return env
def get_state(obs):
state = np.array(obs)
state = state.transpose((2, 0, 1))
state = torch.from_numpy(state)
return state.unsqueeze(0)
class RewardScaler(gym.RewardWrapper):
def reward(self, reward):
return reward * 0.1
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
return self.env.step(action)
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def step(self, action):
return self.env.step(action)
def reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = np.random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
# + [markdown] id="hv-Dqojqm7Nh" colab_type="text"
# ## Deprecated code
#
# **Thease code move to agent class.**
# + [markdown] id="mTc_yZ_YSNqk" colab_type="text"
# @deprecated
# def select_action(state):
# global steps_done
# sample = random.random()
# eps_threshold = EPS_END + (EPS_START - EPS_END)* \
# math.exp(-1. * steps_done / EPS_DECAY)
# steps_done += 1
# if sample > eps_threshold:
# with torch.no_grad():
# return policy_net(state.to('cuda')).max(1)[1].view(1,1)
# else:
# return torch.tensor([[random.randrange(4)]], device=device, dtype=torch.long)
#
#
# @deprecated
# def optimize_model():
# if len(memory) < BATCH_SIZE:
# return
# transitions = memory.sample(BATCH_SIZE)
# """
# zip(*transitions) unzips the transitions into
# Transition(*) creates new named tuple
# batch.state - tuple of all the states (each state is a tensor)
# batch.next_state - tuple of all the next states (each state is a tensor)
# batch.reward - tuple of all the rewards (each reward is a float)
# batch.action - tuple of all the actions (each action is an int)
# """
# batch = Transition(*zip(*transitions))
#
# actions = tuple((map(lambda a: torch.tensor([[a]], device='cuda'), batch.action)))
# rewards = tuple((map(lambda r: torch.tensor([r], device='cuda'), batch.reward)))
#
# non_final_mask = torch.tensor(
# tuple(map(lambda s: s is not None, batch.next_state)),
# device=device, dtype=torch.uint8)
#
# non_final_next_states = torch.cat([s for s in batch.next_state
# if s is not None]).to('cuda')
#
#
# state_batch = torch.cat(batch.state).to('cuda')
# action_batch = torch.cat(actions)
# reward_batch = torch.cat(rewards)
#
# state_action_values = policy_net(state_batch).gather(1, action_batch)
#
# next_state_values = torch.zeros(BATCH_SIZE, device=device)
# next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# expected_state_action_values = (next_state_values * GAMMA) + reward_batch
#
# loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
#
# optimizer.zero_grad()
# loss.backward()
# for param in policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
# optimizer.step()
#
# + [markdown] id="lTymzujboNgi" colab_type="text"
# ## Degine the train steps
#
# In my research, make this code multi-agent (**Note**: Multi-agent here means multiple independent agents sharing a task environment)
# + id="b0rY86vwSU3c" colab_type="code" colab={}
# TODO : To change the deprecated function to Agent clsss fuction
def train(envs, agents, core_env, core_agent, n_episodes, agent_n, exp, render=False):
"""
Training step.
In this code, we use the multi-agents to create candidate for core agent.
The core agent and environment is main RL set. In addition, each agent has
own environment and durabiliry. Each agent's reward is checked for the
specified number of episodes, and if an agent is not selected as the
best-agent, that agent's durability is reduced.
Parameters
----------
envs: list of Environment
List of environment for multi-agent
agents: list of Agent
List of multi-agents to create candidates for core_agent
core_env: Environment
Main environment of this train step
core_agent: Agent
Main agent of this train step
n_episodes: int
The number of episodes
agent_n : int
The number of agent
exp: Experiment
The Experiment object used by hyperdash
render: boolean, default False
Flag for whether to render the environment
"""
for episode in range(n_episodes):
print("episode: {}".format(episode));
# 0. Initalize the environment, state and agent params
obs = core_env.reset()
core_state = get_state(obs)
core_agent.total_reward = 0.0
core_agent.set_state(core_state)
for agent in agents:
obs = agent.get_env().reset()
state = get_state(obs)
agent.set_state(state)
agent.total_reward = 0.0
# agent.durability = DEFAULT_DURABILITY
for t in count():
#if t % 20 != 0:
# print(str(t) + " ", end='')
#else:
# print("\n")
# print([agent.get_durability() for agent in agents])
# print(str(t) + " ", end='') """"
# 1. Select action from environment of each agent
for agent in agents:
agent.set_env(core_agent.get_env())
action = agent.select_action(agent.get_state())
agent.set_action(action)
# 2. Proceed step of each agent
for agent in agents:
obs, reward, done, info = agent.get_env().step(agent.get_action())
agent.set_step_retrun_value(obs, reward, done, info)
agent.set_total_reward(reward)
if not done:
next_state = get_state(obs)
else:
next_state = None
reward = torch.tensor([reward], device=device)
agent.memory.push(agent.get_state(), agent.get_action().to('cpu'),
next_state, reward.to('cpu'))
agent.set_state(next_state)
if agent.steps_done > INITIAL_MEMORY:
agent.optimize_model()
if agent.steps_done % TARGET_UPDATE == 0:
agent.target_net.load_state_dict(agent.policy_net.state_dict())
# ---------------
# Proposal method
# ---------------
# 3. Select best agent in this step
reward_list = [agent.get_total_reward() for agent in agents]
best_agents = [i for i, v in enumerate(reward_list) if v == max(reward_list)]
best_agent_index = random.choice(best_agents)
best_agent = agents[best_agent_index]
best_agent.heal_durability(DEFAULT_DURABILITY_INCREASED_LEVEL)
# Best_agent infomation
# exp.log("Current best agent: {}".format(best_agent.name))
# 4. Check the agent durability in specified step
if t % DURABILITY_CHECK_FREQUENCY == 0:
if len(agents) > 1:
index = [i for i in range(len(agents)) if i not in best_agents]
for i in index:
agents[i].reduce_durability(DEFAULT_DURABILITY_DECREASED_LEVEL)
# 5. Main step of core agent
core_agent_action = best_agent.get_action()
core_agent.set_action(core_agent_action)
core_obs, core_reward, core_done, core_info = core_agent.get_env().step(
core_agent.get_action())
core_agent.set_step_retrun_value(core_obs, core_reward, core_done, core_info)
core_agent.set_done_state(core_done)
core_agent.set_total_reward(core_reward)
if not core_done:
core_next_state = get_state(core_obs)
else:
core_next_state = None
core_reward = torch.tensor([core_reward], device=device)
core_agent.memory.push(core_agent.get_state(),
core_agent.get_action().to('cpu'),
core_next_state, core_reward.to('cpu'))
core_agent.set_state(core_next_state)
if core_agent.steps_done > INITIAL_MEMORY:
core_agent.optimize_model()
if core_agent.steps_done % TARGET_UPDATE == 0:
core_agent.target_net.load_state_dict(core_agent.policy_net.state_dict())
if core_agent.is_done():
print("\n")
break
# 6. Swap agent
if len(agents) > 1 and episode % DURABILITY_CHECK_FREQUENCY == 0:
for agent, i in zip(agents, range(len(agents))):
if agent.durability <= 0:
del agents[i]
# ----------------------
# End of proposal method
# ----------------------
exp.metric("total_reward", core_agent.get_total_reward())
out_str = 'Total steps: {} \t Episode: {}/{} \t Total reward: {}'.format(
core_agent.steps_done, episode, t, core_agent.total_reward)
if episode % 20 == 0:
print(out_str)
out_str = str("\n" + out_str + "\n")
exp.log(out_str)
else:
print(out_str)
exp.log(out_str)
#with open(TRAIN_LOG_FILE_PATH, 'wt') as f:
# f.write(out_str)
env.close()
# + [markdown] id="8r34-vdKoRXs" colab_type="text"
# ## Define the test steps
# + id="jUzw2mQ3T--m" colab_type="code" colab={}
# TODO : To change the deprecated function to Agent clsss fuction
def test(env, n_episodes, policy, exp, render=True):
# Save video as mp4 on specified directory
env = gym.wrappers.Monitor(env, './videos/' + 'dqn_pong_video')
for episode in range(n_episodes):
obs = env.reset()
state = env.get_state(obs)
total_reward = 0.0
for t in count():
action = policy(state.to('cuda')).max(1)[1].view(1,1)
if render:
env.render()
time.sleep(0.02)
obs, reward, done, info = env.step(action)
total_reward += reward
if not done:
next_state = env.get_state(obs)
else:
next_state = None
state = next_state
if done:
out_str = "Finished Episode {} with reward {}".format(
episode, total_reward)
print(out_str)
exp.log(out_str)
with open(TEST_LOG_FILE_NAME, 'wt') as f:
f.write(out_str)
break
env.close()
# + [markdown] id="Y89M-d5CoSgs" colab_type="text"
# ## Main steps
# + colab_type="code" id="P3NprG-6BBgW" colab={}
# Create Agent
agents = []
policy_net_0 = DQN(n_actions=4).to(device)
target_net_0 = DQN(n_actions=4).to(device)
optimizer_0 = optim.Adam(policy_net_0.parameters(), lr=LEARNING_RATE)
agents.append(Agent(policy_net_0, target_net_0, DEFAULT_DURABILITY,
optimizer_0, "cnn-dqn0"))
agents.append(Agent(policy_net_0, target_net_0, DEFAULT_DURABILITY,
optimizer_0, "cnn-dqn1"))
policy_net_1 = DDQN(n_actions=4).to(device)
target_net_1 = DDQN(n_actions=4).to(device)
optimizer_1 = optim.Adam(policy_net_1.parameters(), lr=LEARNING_RATE)
agents.append(Agent(policy_net_1, target_net_1, DEFAULT_DURABILITY,
optimizer_1, "cnn-ddqn0"))
agents.append(Agent(policy_net_1, target_net_1, DEFAULT_DURABILITY,
optimizer_1, "cnn-ddqn1"))
core_agent = Agent(policy_net_0, target_net_0, DEFAULT_DURABILITY, optimizer_0,
"core")
AGENT_N = len(agents)
# + id="c3E3SgmPxxDM" colab_type="code" colab={}
# time_stamp = str(int(time.time()))
hyper_params["AGENT_N"] = AGENT_N
json_params = json.dumps(hyper_params)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(PARAMETER_LOG_FILE_PATH, 'wt') as f:
f.write(json_params)
# + id="C_X0a09mUKtx" colab_type="code" colab={}
# Deprecated code
# create networks
# policy_net = DQN(n_actions=4).to(device)
# target_net = DQN(n_actions=4).to(device)
# target_net.load_state_dict(policy_net.state_dict())
# + id="CaUBflT4zvZJ" colab_type="code" colab={}
# create environment
# TODO: Create Environment class
# env = gym.make(ENV_NAME)
# env = make_env(env)
envs = []
for i in range(AGENT_N):
env = Environment()
env = env.get_env()
envs.append(env)
core_env = Environment()
core_env = core_env.get_env()
for agent, env in zip(agents, envs):
agent.set_env(env)
core_agent.set_env(core_env)
# + id="wvzpJPerUSIe" colab_type="code" outputId="5a104bdf-d799-4851-9fc7-695b3d279156" colab={"base_uri": "https://localhost:8080/", "height": 238}
# setup optimizer
# optimizer = optim.Adam(policy_net.parameters(), lr=LEARNING_RATE)
# steps_done = 0
# Deprecated
# initialize replay memory
# memory = ReplayMemory(MEMORY_SIZE)
# Hyperdash experiment
exp = Experiment(EXP_NAME, capture_io=False)
print("Learning rate:{}".format(LEARNING_RATE))
exp.param("Learning rate", LEARNING_RATE)
exp.param("Environment", ENV_NAME)
exp.param("Batch size", BATCH_SIZE)
exp.param("Gamma", GAMMA)
exp.param("Episode start", EPS_START)
exp.param("Episode end", EPS_END)
exp.param("Episode decay", EPS_DECAY)
exp.param("Target update", TARGET_UPDATE)
exp.param("Render", str(RENDER))
exp.param("Initial memory", INITIAL_MEMORY)
exp.param("Memory size", MEMORY_SIZE)
# + id="RD3exzskzIiY" colab_type="code" colab={}
# train model
train(envs, agents, core_env, core_agent, 400, AGENT_N, exp)
exp.end()
torch.save(policy_net, output_directory + "/dqn_pong_model")
# + id="X9XQKShT4C2_" colab_type="code" colab={}
# EB
exp.end()
# + id="dY3kuj5wUUTY" colab_type="code" colab={}
# test model
test_env = Environment()
test_env = env.get_env()
policy_net = torch.load(output_directory + "/dqn_pong_model")
exp_test = Experiment(str(EXP_NAME + "_test_step"), capture_io=False)
test(test_env, 1, policy_net, exp_test, render=False)
exp_test.end()
# + [markdown] id="0X4JjgzwoYwk" colab_type="text"
# ## Video vidualization
# + colab_type="code" id="BSXD_DlKwl70" colab={}
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
os.environ["DISPLAY"] = ":" + str(display.display) + "." + str(display.screen)
# + id="1Ewt5ZVWUgIH" colab_type="code" colab={}
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
# + id="9Uem40STcclw" colab_type="code" colab={}
embed_mp4("/content/videos/dqn_pong_video/openaigym.video.0.122.video000000.mp4")
# + colab_type="code" id="ONlurrP13Xgu" colab={}
# # !mv /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1567682751
# # !mv /content/dqn_pong_model /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1567682751/
# + id="dkMzJ4wU11Rl" colab_type="code" colab={}
# !mkdir /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1568005544
# + id="BvsieYOEBfP1" colab_type="code" colab={}
# !mv ./PongNoFrameskip-v4_*.log /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1568005544/
# !mv ./dqn_pong_model /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1568005544/
# !mv ./videos /content/drive/My\ Drive/Colab\ Notebooks/MT/pong_videos_1568005544/
| Notebooks/Proposal_SF_PongNFS_0_20191018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 10 - eigenvalues and eigenvectors
#
# An eigenvector $\boldsymbol{x}$ and corrsponding eigenvalue $\lambda$ of a square matrix $\boldsymbol{A}$ satisfy
#
# $$
# \boldsymbol{A} \boldsymbol{x} = \lambda \boldsymbol{x}
# $$
#
# Rearranging this expression,
#
# $$
# \left( \boldsymbol{A} - \lambda \boldsymbol{I}\right) \boldsymbol{x} = \boldsymbol{0}
# $$
#
# The above equation has solutions (other than $\boldsymbol{x} = \boldsymbol{0}$) if
#
# $$
# \det \left( \boldsymbol{A} - \lambda \boldsymbol{I}\right) = 0
# $$
#
# Computing the determinant of an $n \times n$ matrix requires solution of an $n$th degree polynomial. It is known how to compute roots of polynomials up to and including degree four (e.g., see <http://en.wikipedia.org/wiki/Quartic_function>). For matrices with $n > 4$, numerical methods must be used to compute eigenvalues and eigenvectors.
#
# An $n \times n$ will have $n$ eigenvalue/eigenvector pairs (eigenpairs).
#
#
# ## Computing eigenvalues with NumPy
#
# NumPy provides a function to compute eigenvalues and eigenvectors. To demonstrate how to compute eigpairs, we first create a $5 \times 5$ symmetric matrix:
# +
# Import NumPy and seed random number generator to make generated matrices deterministic
import numpy as np
np.random.seed(1)
# Create a symmetric matrix with random entries
A = np.random.rand(5, 5)
A = A + A.T
print(A)
# -
# We can compute the eigenvectors and eigenvalues using the NumPy function `linalg.eig`
# +
# Compute eigenvectors of A
evalues, evectors = np.linalg.eig(A)
print("Eigenvalues: {}".format(evalues))
print("Eigenvectors: {}".format(evectors))
# -
# The $i$th column of `evectors` is the $i$th eigenvector.
#
# ## Symmetric matrices
#
# Note that the above eigenvalues and the eigenvectors are real valued. This is always the case for symmetric matrices. Another features of symmetric matrices is that the eigenvectors are orthogonal. We can verify this for the above matrix:
# We can also check that the second eigenpair is indeed an eigenpair (Python/NumPy use base 0, so the second eiegenpair has index 1):
# +
import itertools
# Build pairs (0,0), (0,1), . . . (0, n-1), (1, 2), (1, 3), . . .
pairs = itertools.combinations_with_replacement(range(len(evectors)), 2)
# Compute dot product of eigenvectors x_{i} \cdot x_{j}
for p in pairs:
e0, e1 = p[0], p[1]
print ("Dot product of eigenvectors {}, {}: {}".format(e0, e1, evectors[:, e0].dot(evectors[:, e1])))
# -
print("Testing Ax and (lambda)x: \n {}, \n {}".format(A.dot(evectors[:,1]), evalues[1]*evectors[:,1]))
# ## Non-symmetric matrices
#
# In general, the eigenvalues and eigenvectors of a non-symmetric, real-valued matrix are complex. We can see this by example:
# +
B = np.random.rand(5, 5)
evalues, evectors = np.linalg.eig(B)
print("Eigenvalues: {}".format(evalues))
print("Eigenvectors: {}".format(evectors))
# -
# Unlike symmetric matrices, the eigenvectors are in general not orthogonal, which we can test:
# Compute dot product of eigenvectors x_{i} \cdot x_{j}
pairs = itertools.combinations_with_replacement(range(len(evectors)), 2)
for p in pairs:
e0, e1 = p[0], p[1]
print ("Dot product of eigenvectors {}, {}: {}".format(e0, e1, evectors[:, e0].dot(evectors[:, e1])))
| Lecture10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Generalizing Failure Circumstances
#
# One central question in debugging is: _Does this bug occur in other situations, too?_ In this chapter, we present a technique that is set to _generalize_ the circumstances under which a failure occurs. The DDSET algorithm takes a failure-inducing input, breaks it into individual elements. For each element, it tries to find whether it can be replaced by others in the same category, and if so, it _generalizes_ the concrete element to the very category. The result is a _pattern_ that characterizes the failure condition: "The failure occurs for all inputs of the form `(<expr> * <expr>)`.
# -
from bookutils import YouTubeVideo
YouTubeVideo("PV22XtIQU1s")
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# **Prerequisites**
#
# * You should have read the [chapter on _delta debugging_](DeltaDebugger.ipynb).
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import bookutils
# -
import DeltaDebugger
# + [markdown] slideshow={"slide_type": "skip"}
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from debuggingbook.DDSetDebugger import <identifier>
# ```
#
# and then make use of the following features.
#
#
# This chapter provides a class `DDSetDebugger`, implementing the DDSET algorithm for generalizing failure-inducing inputs. The `DDSetDebugger` is used as follows:
#
# ```python
# with DDSetDebugger(grammar) as dd:
# function(args...)
# dd
# ```
#
# Here, `function(args...)` is a failing function call (= raises an execption) that takes at least one string argument; `grammar` is an [input grammar in fuzzingbook format](https://www.fuzzingbook.org/html/Grammars.html) that matches the format of this argument.
#
# The result is a call of `function()` with an _abstract failure-inducing input_ – a variant of the conrete input in which parts are replaced by placeholders in the form `<name>`, where `<name>` is a nonterminal in the grammar. The failure has been verified to occur for a number of instantiations of `<name>`.
#
# Here is an example of how `DDSetDebugger` works. The concrete failing input `<foo>"bar</foo>` is generalized to an _abstract failure-inducing input_:
#
# ```python
# >>> with DDSetDebugger(SIMPLE_HTML_GRAMMAR) as dd:
# >>> remove_html_markup('<foo>"bar</foo>')
# >>> dd
# remove_html_markup(s='<opening-tag>"<plain-text><closing-tag>')
# ```
# The abstract input tells us that the failure occurs for whatever opening and closing HTML tags as long as there is a double quote between them.
#
# A programmatic interface is available as well. `generalize()` returns a mapping of argument names to (generalized) values:
#
# ```python
# >>> dd.generalize()
# {'s': '<opening-tag>"<plain-text><closing-tag>'}
# ```
# Using `fuzz()`, the abstract input can be instantiated to further concrete inputs, all set to produce the failure again:
#
# ```python
# >>> for i in range(10):
# >>> print(dd.fuzz())
# remove_html_markup(s='<R W5128555F35xrs=\'\'>"</hmarQ9>')
# remove_html_markup(s='<K18 s="" g=\'\'>"Y</V0>')
# remove_html_markup(s='<M0D74 m=\'\' R=\'\'>"</xXD01>')
# remove_html_markup(s='<do>"</Z>')
# remove_html_markup(s='<I j=\'\'>"X</r9>')
# remove_html_markup(s='<I7>"l </e>')
# remove_html_markup(s='<o3 B2l4v="Z" P="\t">"</X7P>')
# remove_html_markup(s='<i9 J="" B=\'\'>"</m>')
# remove_html_markup(s='<V>"</lM2>')
# remove_html_markup(s='<P4n>"</uj1>')
#
# ```
# `DDSetDebugger` can be customized by passing a subclass of `TreeGeneralizer`, which does the gist of the work; for details, see its constructor.
# The full class hierarchy is shown below.
#
# 
#
#
# -
# ## A Failing Program
#
# As with previous chapters, we use `remove_html_markup()` as our ongoing example. This function is set to remove HTML markup tags (like `<em>`) from a given string `s`, returning the plain text only. We use the version from [the chapter on asssertions](Assertions.ipynb), using an assertion as postcondition checker.
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
# postcondition
assert '<' not in out and '>' not in out
return out
# For the most inputs, `remove_html_markup()` works just as expected:
remove_html_markup("Be <em>quiet</em>, he said")
# There are inputs, however, for which it fails:
BAD_INPUT = '<foo>"bar</foo>'
from ExpectError import ExpectError
with ExpectError(AssertionError):
remove_html_markup(BAD_INPUT)
from bookutils import quiz
# In contrast to the other chapters, our aim now is not to immediately go and debug `remove_html_markup()`. Instead, we focus on another important question:
#
# > Under which conditions precisely does `remove_html_markup()` fail?
# This question can be generalized to
#
# > What is the set of inputs for which `remove_html_markup()` fails?
# Our plan for this is to _generalize_ concrete inputs (such as `BAD_INPUTS`) into an *abstract failure-inducing inputs*. These are patterns formed from a concrete input, but in which specific _placeholders_ indicate sets of inputs that are permitted. In the abstract failure-inducing input
#
# ```html
# <opening-tag>"bar<closing-tag>
# ```
#
# for instance, `<opening-tag>` and `<closing-tag>` are placeholders for opening and closing HTML tags, respectively. The pattern indicates that any opening HTML tag and closing HTML tag can be present in the input, as long as the enclosed text reads `"bar`.
# Given a concrete failure-inducing input, our aim is to _generalize_ it as much as possible to such an abstract failure-inducing input. The resulting pattern should then
#
# * capture the _circumstances_ under which the program fails;
# * allow for _test generation_ by instantiating the placeholders;
# * help ensuring our fix is as _general as possible_.
quiz("If `s = '<foo>\"bar</foo>'` (i.e., `BAD_INPUT`), "
"what is the value of `out` such that the assertion fails?",
[
'`bar`',
'`bar</foo>`',
'`"bar</foo>`',
'`<foo>"bar</foo>`',
], '9999999 // 4999999')
# ## Grammars
#
# To determine abstract failure-inducing inputs, we need means to determine and characterize _sets of inputs_ – known in computer science as _languages_. To formally describe languages, the field of *formal languages* has devised a number of *language specifications* that describe a language. *Regular expressions* represent the simplest class of these languages to denote sets of strings: The regular expression `[a-z]*`, for instance, denotes a (possibly empty) sequence of lowercase letters. *Automata theory* connects these languages to automata that accept these inputs; *finite state machines*, for instance, can be used to specify the language of regular expressions.
# Regular expressions are great for not-too-complex input formats, and the associated finite state machines have many properties that make them great for reasoning. To specify more complex inputs, though, they quickly encounter limitations. At the other end of the language spectrum, we have *universal grammars* that denote the language accepted by *Turing machines*. A Turing machine can compute anything that can be computed; and with Python being Turing-complete, this means that we can also use a Python program $p$ to specify or even enumerate legal inputs. But then, computer science theory also tells us that each such program has to be written specifically for the input to be considered, which is not the level of automation we want.
# The middle ground between regular expressions and Turing machines is covered by *grammars*. Grammars are among the most popular (and best understood) formalisms to formally specify input languages. Using a grammar, one can express a wide range of the properties of an input language. Grammars are particularly great for expressing the *syntactical structure* of an input, and are the formalism of choice to express nested or recursive inputs. The grammars we use are so-called *context-free grammars*, one of the easiest and most popular grammar formalisms.
# A grammar is defined as a mapping of _nonterminal_ symbols (denoted in `<angle brackets>` to lists of alternative _expansions_, which are strings containing _terminal_ symbols and possibly more _nonterminal_ symbols. To make the writing of grammars as simple as possible, we adopt the [fuzzingbook](https://www.fuzzingbook.org/) format that is based on strings and lists.
import fuzzingbook
# Fuzzingbook grammars take the format of a _mapping_ between symbol names and expansions, where expansions are _lists_ of alternatives.
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Union, List, cast, Generator
Grammar = Dict[str, # A grammar maps strings...
List[
Union[str, # to list of strings...
Tuple[str, Dict[str, Any]] # or to pairs of strings and attributes.
]
]
]
# A one-rule grammar for digits thus takes the form
DIGIT_GRAMMAR: Grammar = {
"<start>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
# which means that the `<start>` symbol can be expanded into any of the digits listed.
# A full grammar for arithmetic expressions looks like this:
EXPR_GRAMMAR: Grammar = {
"<start>":
["<expr>"],
"<expr>":
["<term> + <expr>", "<term> - <expr>", "<term>"],
"<term>":
["<factor> * <term>", "<factor> / <term>", "<factor>"],
"<factor>":
["+<factor>",
"-<factor>",
"(<expr>)",
"<integer>.<integer>",
"<integer>"],
"<integer>":
["<digit><integer>", "<digit>"],
"<digit>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
# From such a grammar, one can easily generate inputs that conform to the grammar.
from fuzzingbook.GrammarFuzzer import GrammarFuzzer
simple_expr_fuzzer = GrammarFuzzer(EXPR_GRAMMAR)
for i in range(10):
fuzz_expr = simple_expr_fuzzer.fuzz()
print(fuzz_expr)
# Nonterminals as found in the grammar make natural _placeholders_ in abstract failure-inducing inputs. If we know, for instance, that it is not just the concrete failure-inducing input
#
# ```python
# (2 * 3)
# ```
#
# but the abstract failure-inducing input
#
# ```html
# (<expr> * <expr>)
# ```
#
# that causes the failure, we immediately see that the error is due to the multiplication operator rather than its operands.
# Coming back to our `remove_html_markup()` example, let us create a simple grammar for HTML expressions. A `<html>` element is either plain text or tagged text.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
SIMPLE_HTML_GRAMMAR: Grammar = {
"<start>":
["<html>"],
"<html>":
["<plain-text>", "<tagged-text>"],
}
# -
# Plain text is a simple (possibly empty) sequence of letter, digits, punctuation, and whitespace. (Note how `<plain-text>` is either empty or some character followed by more plain text.) The characters `<` and `>` are not allowed, though.
import string
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
SIMPLE_HTML_GRAMMAR.update({
"<plain-text>":
["", "<plain-char><plain-text>"],
"<plain-char>":
["<letter>", "<digit>", "<other>", "<whitespace>"],
"<letter>": list(string.ascii_letters),
"<digit>": list(string.digits),
"<other>": list(string.punctuation.replace('<', '').replace('>', '')),
"<whitespace>": list(string.whitespace)
})
# -
# Tagged text is a bit more complicated. We have opening tags `<foo>`, followed by some more HTML material, and then closed by a closing tag `</foo>`. (We do not insist that the two tags match.) A self-closing tag has the form `<br/>`. For compatibility reasons, we also allow just opening tags without closing tags, as in `<img>`.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
SIMPLE_HTML_GRAMMAR.update({
"<tagged-text>":
["<opening-tag><html><closing-tag>",
"<self-closing-tag>",
"<opening-tag>"],
})
# -
# Since the characters `<` and `>` are already reserved for denoting nonterminal symbols, we use the special nonterminal symbols `<lt>` and `<gt>` that expand into `<` and `>`, respectively,
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
SIMPLE_HTML_GRAMMAR.update({
"<opening-tag>":
["<lt><id><gt>",
"<lt><id><attrs><gt>"],
"<lt>": ["<"],
"<gt>": [">"],
"<id>":
["<letter>", "<id><letter>", "<id><digit>"],
"<closing-tag>":
["<lt>/<id><gt>"],
"<self-closing-tag>":
["<lt><id><attrs>/<gt>"],
})
# -
# Finally, HTML tags can have attributes, which are enclosed in quotes.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
SIMPLE_HTML_GRAMMAR.update({
"<attrs>":
["<attr>", "<attr><attrs>" ],
"<attr>":
[" <id>='<plain-text>'",
' <id>="<plain-text>"'],
})
# -
# Again, we can generate inputs from the grammar.
simple_html_fuzzer = GrammarFuzzer(SIMPLE_HTML_GRAMMAR)
for i in range(10):
fuzz_html = simple_html_fuzzer.fuzz()
print(repr(fuzz_html))
# Such inputs, of course, are great for systematic testing. Our sister book, [the fuzzing book](https://www.fuzzingbook.org/), covers these and more.
# ## Derivation Trees
#
# To produce inputs from a grammar, the fuzzingbook `GrammarFuzzer` makes use of a structure called a *derivation tree* (also known as *syntax tree*). A derivation tree encodes the individual expansion steps undertaken while producing the output.
DerivationTree = Tuple[str, Optional[List[Any]]]
# Let us illustrate derivation trees by example, using the last HTML output we produced.
fuzz_html
# The `GrammarFuzzer` attribute `derivation_tree` holds the last tree used to produced this input. We can visualize the tree as follows:
# ignore
from graphviz import Digraph
# ignore
def display_tree(tree: DerivationTree) -> Digraph:
def graph_attr(dot: Digraph) -> None:
dot.attr('node', shape='box', color='white', margin='0.0,0.0')
dot.attr('node',
fontname="'Fira Mono', 'Source Code Pro', 'Courier', monospace")
def node_attr(dot: Digraph, nid: str, symbol: str, ann: str) -> None:
fuzzingbook.GrammarFuzzer.default_node_attr(dot, nid, symbol, ann)
if symbol.startswith('<'):
dot.node(repr(nid), fontcolor='#0060a0')
else:
dot.node(repr(nid), fontcolor='#00a060')
dot.node(repr(nid), scale='2')
return fuzzingbook.GrammarFuzzer.display_tree(tree,
node_attr=node_attr,
graph_attr=graph_attr)
display_tree(simple_html_fuzzer.derivation_tree)
# From top to bottom, we see that the input was constructed from a `<start>` symbol, which then expanded into `html`, which then expanded into HTML text, and so on. Multiple children in a tree stand for a concatenation of individual symbols.
# Internally, these trees come as pairs `(symbol, children)`, where `symbol` is the name of a node (say, `<html>`), and `children` is a (possibly empty) list of subtrees. Here are the topmost nodes of the above tree:
import pprint
pp = pprint.PrettyPrinter(depth=7)
pp.pprint(simple_html_fuzzer.derivation_tree)
# To produce abstract failure-inducing patterns, we will work on this very structure. The idea is to
#
# 1. systematically replace subtrees by other, generated, compatible subtrees (e.g. replace one `<html>` subtree in the concrete input by some other generated `<html>` subtree);
# 2. see whether these subtrees also result in failures; and
# 3. if they do, use the nonterminal (`<html>`) as a placeholder in the pattern.
#
# This will involve some subtree manipulation, construction, and finally testing. First of all, though, we need to be able to turn an _existing input_ into a derivation tree.
# ## Parsing
#
# The activity of creating a structure out of an unstructured input is called _parsing_. Generally speaking, a _parser_ uses a _grammar_ to create a _derivation tree_ (also called *parse tree* in parsing contexts) from a string input.
# Again, there's a whole body of theory (and practice!) around constructing parsers. We make our life simple by using an existing parser (again, from [the fuzzing book](https://www.fuzzingbook.org/Parser.html)), which does just what we want. The `EarleyParser` is instantiated with a grammar such as `SIMPLE_HTML_GRAMMAR`:
from fuzzingbook.Parser import Parser, EarleyParser # minor dependency
simple_html_parser = EarleyParser(SIMPLE_HTML_GRAMMAR)
# Its method `parse()` returns an iterator over multiple possible derivation trees. (There can be multiple trees because the grammar could be ambiguous). We are only interested in the first such tree. Let us parse `BAD_INPUT` and inspect the resulting ~parse tree~ ~syntax tree~ derivation tree:
bad_input_tree = list(simple_html_parser.parse(BAD_INPUT))[0]
display_tree(bad_input_tree)
# This derivation tree has the same structure as the one created from our `GrammarFuzzer` above. We see how the `<tagged-text>` is composed of three elements:
#
# 1. an`<opening-tag>` (`<foo>`);
# 2. a `<html>` element which becomes `<plain-text>` (`"bar`); and
# 3. a `<closing-tag>` (`</foo>`).
# We can easily turn the tree back into a string. The method `tree_to_string()` traverses the tree left-to-right and joins all nonterminal symbols.
from fuzzingbook.GrammarFuzzer import tree_to_string, all_terminals
tree_to_string(bad_input_tree)
assert tree_to_string(bad_input_tree) == BAD_INPUT
# With this, we can now
#
# * parse an input into a tree structure;
# * (re-)create parts of the tree structure; and
# * turn the tree back into an input string.
# ## Mutating the Tree
#
# We introduce a class `TreeMutator` that is set to mutate a tree. Its constructor takes a grammar and a tree.
from fuzzingbook.Grammars import is_valid_grammar
class TreeMutator:
"""Grammar-based mutations of derivation trees."""
def __init__(self, grammar: Grammar, tree: DerivationTree,
fuzzer: Optional[GrammarFuzzer] = None, log: Union[bool, int] = False):
"""
Constructor.
`grammar` is the underlying grammar;
`tree` is the tree to work on.
`fuzzer` is the grammar fuzzer to use (default: `GrammarFuzzer`)
"""
assert is_valid_grammar(grammar)
self.grammar = grammar
self.tree = tree
self.log = log
if fuzzer is None:
fuzzer = GrammarFuzzer(grammar)
self.fuzzer = fuzzer
# ### Referencing Subtrees
# To reference individual elements in the tree, we introduce the concept of a _path_. A path is a list of numbers indicating the children (starting with 0) we should follow. A path `[0, 0, 0, ..., 0]` stands for the leftmost child in a tree.
TreePath = List[int]
# The method `get_subtree()` returns the subtree for a given path.
class TreeMutator(TreeMutator):
def get_subtree(self, path: TreePath, tree: Optional[DerivationTree] = None) -> DerivationTree:
"""Access a subtree based on `path` (a list of children numbers)"""
if tree is None:
tree = self.tree
symbol, children = tree
if not path or children is None:
return tree
return self.get_subtree(path[1:], children[path[0]])
# Here's `get_subtree()` in action. We instantiate a `TreeMutator` on the `BAD_INPUT` tree as shown above and return the element at the path `[0, 0, 1, 0]` – i.e. follow the leftmost edge twice, than the second-to-leftmost edge, then the leftmost edge again. This gives us the `<plain-text>` subtree representing the string `"bar`:
def bad_input_tree_mutator() -> TreeMutator:
return TreeMutator(SIMPLE_HTML_GRAMMAR, bad_input_tree, log=2)
plain_text_subtree = bad_input_tree_mutator().get_subtree([0, 0, 1, 0])
pp.pprint(plain_text_subtree)
tree_to_string(plain_text_subtree)
# ignore
def primes_generator() -> Generator[int, None, None]:
# Adapted from https://www.python.org/ftp/python/doc/nluug-paper.ps
primes = [2]
yield 2
i = 3
while True:
for p in primes:
if i % p == 0 or p * p > i:
break
if i % p != 0:
primes.append(i)
yield i
i += 2
# ignore
prime_numbers = primes_generator()
quiz("In `bad_input_tree`, what is "
" the subtree at the path `[0, 0, 2, 1]` as string?",
[
f"`{tree_to_string(bad_input_tree_mutator().get_subtree([0, 0, 2, 0]))}`",
f"`{tree_to_string(bad_input_tree_mutator().get_subtree([0, 0, 2, 1]))}`",
f"`{tree_to_string(bad_input_tree_mutator().get_subtree([0, 0, 2]))}`",
f"`{tree_to_string(bad_input_tree_mutator().get_subtree([0, 0, 0]))}`",
], 'next(prime_numbers)', globals()
)
# ### Creating new Subtrees
#
# The method `new_tree()` creates a new subtree for the given `<start_symbol>` according to the rules of the grammar. It invokes `expand_tree()` on the given `GrammarFuzzer` – a method that takes an initial (empty) tree and expands it until no more expansions are left.
class TreeMutator(TreeMutator):
def new_tree(self, start_symbol: str) -> DerivationTree:
"""Create a new subtree for <start_symbol>."""
if self.log >= 2:
print(f"Creating new tree for {start_symbol}")
tree = (start_symbol, None)
return self.fuzzer.expand_tree(tree)
# Here is an example of `new_tree()`:
plain_text_tree = cast(TreeMutator, bad_input_tree_mutator()).new_tree('<plain-text>')
display_tree(plain_text_tree)
tree_to_string(plain_text_tree)
# ### Mutating the Tree
#
# With us now being able to
# * access a particular path in the tree (`get_subtree()`) and
# * create a new subtree (`new_tree()`),
#
# we can mutate the tree at a given path. This is the task of `mutate()`.
class TreeMutator(TreeMutator):
def mutate(self, path: TreePath, tree: Optional[DerivationTree] = None) -> DerivationTree:
"""Return a new tree mutated at `path`"""
if tree is None:
tree = self.tree
assert tree is not None
symbol, children = tree
if not path or children is None:
return self.new_tree(symbol)
head = path[0]
new_children = (children[:head] +
[self.mutate(path[1:], children[head])] +
children[head + 1:])
return symbol, new_children
# Here is an example of `mutate()` in action. We mutate `bad_input_tree` at the path `[0, 0, 1, 0]` – that is, `<plain-text>`:
mutated_tree = cast(TreeMutator, bad_input_tree_mutator()).mutate([0, 0, 1, 0])
display_tree(mutated_tree)
# We see that the `<plain-text>` subtree is now different, which also becomes evident in the string representation.
tree_to_string(mutated_tree)
# ## Generalizing Trees
#
# Now for the main part – finding out which parts of a tree can be generalized. Our idea is to _test_ a finite number of mutations to a subtree (say, 10). If all of these tests fail as well, then we assume we can generalize the subtree to a placeholder.
# We introduce a class `TreeGeneralizer` for this purpose. On top of `grammar` and `tree` already used for the `TreeMutator` constructor, the `TreeGeneralizer` also takes a `test` function.
class TreeGeneralizer(TreeMutator):
"""Determine which parts of a derivation tree can be generalized."""
def __init__(self, grammar: Grammar, tree: DerivationTree, test: Callable,
max_tries_for_generalization: int = 10, **kwargs: Any) -> None:
"""
Constructor. `grammar` and `tree` are as in `TreeMutator`.
`test` is a function taking a string that either
* raises an exception, indicating test failure;
* or not, indicating test success.
`max_tries_for_generalization` is the number of times
an instantiation has to fail before it is generalized.
"""
super().__init__(grammar, tree, **kwargs)
self.test = test
self.max_tries_for_generalization = max_tries_for_generalization
# The `test` function is used in `test_tree()`, returning `False` if the test fails (raising an exception), and `True` if the test passes (no exception).
class TreeGeneralizer(TreeGeneralizer):
def test_tree(self, tree: DerivationTree) -> bool:
"""Return True if testing `tree` passes, else False"""
s = tree_to_string(tree)
if self.log:
print(f"Testing {repr(s)}...", end="")
try:
self.test(s)
except Exception as exc:
if self.log:
print(f"FAIL ({type(exc).__name__})")
ret = False
else:
if self.log:
print(f"PASS")
ret = True
return ret
# ### Testing for Generalization
#
# The `can_generalize()` method brings the above methods together. It creates a number of tree mutations at the given path, and returns True if all of them produce a failure. (Note that this is not as sophisticated as our [delta debugger](DeltaDebugger.ipynb) implementation, which also checks that the _same_ error occurs.)
class TreeGeneralizer(TreeGeneralizer):
def can_generalize(self, path: TreePath, tree: Optional[DerivationTree] = None) -> bool:
"""Return True if the subtree at `path` can be generalized."""
for i in range(self.max_tries_for_generalization):
mutated_tree = self.mutate(path, tree)
if self.test_tree(mutated_tree):
# Failure no longer occurs; cannot abstract
return False
return True
# Let us put `TreeGeneralizer` into action. We can directly use `remove_html_markup()` as test function.
def bad_input_tree_generalizer(**kwargs: Any) -> TreeGeneralizer:
return TreeGeneralizer(SIMPLE_HTML_GRAMMAR, bad_input_tree,
remove_html_markup, **kwargs)
# On our `BAD_INPUT` (and its tree), can we generalize the root `<html>` node? In other words, does the failure occur for all possible `<html>` inputs?
bad_input_tree_generalizer(log=True).can_generalize([0])
# The answer is no. The first alternative passes the test; hence no generalization.
# How about the middle `<plain_text>` part? Can we generalize this?
bad_input_tree_generalizer(log=True).can_generalize([0, 0, 1, 0])
# The answer is no – just as above.
# How about the closing tag? Can we generalize this one?
bad_input_tree_generalizer(log=True).can_generalize([0, 0, 2])
# Yes, we can! All alternate instantiations of `<closing-tag>` result in a failure.
quiz("Is this also true for `<opening-tag>`?",
[
"Yes",
"No"
], '("Yes" == "Yes") + ("No" == "No")')
# Note that the above does not hold for `<opening-tag>`. If the attribute value contains a quote character, it will extend to the end of the input. This is another error, but not caught by our assertion; hence, the input will be flagged as passing:
BAD_ATTR_INPUT = '<foo attr="\'">bar</foo>'
remove_html_markup(BAD_ATTR_INPUT)
# The effect of this is that there are patterns for `<opening-tag>` which do not cause the failure to occur; hence, `<opening-tag>` is not a fully valid generalization.
# This, however, becomes apparent only if one of our generated tests includes a quote character in the attribute value. Since quote characters are as likely (or as unlikely) to appear as other characters, this effect may not become apparent in our default 10 tests:
bad_input_tree_generalizer().can_generalize([0, 0, 0])
# It will become apparent, however, as we increase the number of tests:
bad_input_tree_generalizer(max_tries_for_generalization=100, log=True).can_generalize([0, 0, 0])
# We see that our approach may _overgeneralize_ – producing a generalization that may be too lenient. In practice, this is not too much of a problem, as we would be interested in characterizing cases that trigger the failure, rather than characterizing a small subset that does not trigger the failure.
# ### Generalizable Paths
#
# Using `can_generalize()`, we can devise a method `generalizable_paths()` that returns all paths in the tree that can be generalized.
class TreeGeneralizer(TreeGeneralizer):
def find_paths(self,
predicate: Callable[[TreePath, DerivationTree], bool],
path: Optional[TreePath] = None,
tree: Optional[DerivationTree] = None) -> List[TreePath]:
"""
Return a list of all paths for which `predicate` holds.
`predicate` is a function `predicate`(`path`, `tree`), where
`path` denotes a subtree in `tree`. If `predicate()` returns
True, `path` is included in the returned list.
"""
if path is None:
path = []
assert path is not None
if tree is None:
tree = self.tree
assert tree is not None
symbol, children = self.get_subtree(path)
if predicate(path, tree):
return [path]
paths = []
if children is not None:
for i, child in enumerate(children):
child_symbol, _ = child
if child_symbol in self.grammar:
paths += self.find_paths(predicate, path + [i])
return paths
def generalizable_paths(self) -> List[TreePath]:
"""Return a list of all paths whose subtrees can be generalized."""
return self.find_paths(self.can_generalize)
# Here is `generalizable_paths()` in action. We obtain all (paths to) subtrees that can be generalized:
bad_input_generalizable_paths = \
cast(TreeGeneralizer, bad_input_tree_generalizer()).generalizable_paths()
bad_input_generalizable_paths
# To convert these subtrees into abstract failure-inducing patterns, the method `generalize_path()` returns a copy of the tree with the subtree replaced by a nonterminal without children:
class TreeGeneralizer(TreeGeneralizer):
def generalize_path(self, path: TreePath,
tree: Optional[DerivationTree] = None) -> DerivationTree:
"""Return a copy of the tree in which the subtree at `path`
is generalized (= replaced by a nonterminal without children)"""
if tree is None:
tree = self.tree
assert tree is not None
symbol, children = tree
if not path or children is None:
return symbol, None # Nonterminal without children
head = path[0]
new_children = (children[:head] +
[self.generalize_path(path[1:], children[head])] +
children[head + 1:])
return symbol, new_children
# The function `all_terminals()` expands these placeholders:
all_terminals(cast(TreeGeneralizer, bad_input_tree_generalizer()).generalize_path([0, 0, 0]))
# Finally, the method `generalize()` obtains a tree in which all generalizable paths actually are generalized:
class TreeGeneralizer(TreeGeneralizer):
def generalize(self) -> DerivationTree:
"""Returns a copy of the tree in which all generalizable subtrees
are generalized (= replaced by nonterminals without children)"""
tree = self.tree
assert tree is not None
for path in self.generalizable_paths():
tree = self.generalize_path(path, tree)
return tree
abstract_failure_inducing_input = cast(TreeGeneralizer, bad_input_tree_generalizer()).generalize()
# This gives us the final generalization of `BAD_INPUT`. In the abstract failure-inducing input, all generalizable elements are generalized.
all_terminals(abstract_failure_inducing_input)
# We see that to obtain the failure, it suffices to have an `<opening-tag>`, followed by a quote and (any) `<plain-text>` and (any) `<closing-tag>`. Clearly, all that it takes to produce the failure is to have a double quote in the plain text.
# Also note how this diagnosis was reached through _experiments_ only – just as with [delta debugging](DeltaDebugger.ipynb), we could treat the program under test as a black box. In contrast to delta debugging, however, we obtain an _abstraction_ that generalizes the circumstances under which a given failure occurs.
# ## Fuzzing with Patterns
#
# One neat side effect of abstract failure-inducing patterns is that they can be easily instantiated into further test cases, all set to reproduce the failure in question. This gives us a test suite we can later test our fix against.
# The method `fuzz_tree()` takes a tree representing an abstract failure-inducing input and instantiates all missing subtrees.
import copy
class TreeGeneralizer(TreeGeneralizer):
def fuzz_tree(self, tree: DerivationTree) -> DerivationTree:
"""Return an instantiated copy of `tree`."""
tree = copy.deepcopy(tree)
return self.fuzzer.expand_tree(tree)
bitg = cast(TreeGeneralizer, bad_input_tree_generalizer())
for i in range(10):
print(all_terminals(bitg.fuzz_tree(abstract_failure_inducing_input)))
# We can take these inputs and see whether they reproduce the failure in question:
# +
successes = 0
failures = 0
trials = 1000
for i in range(trials):
test_input = all_terminals(
bitg.fuzz_tree(abstract_failure_inducing_input))
try:
remove_html_markup(test_input)
except AssertionError:
successes += 1
else:
failures += 1
# -
successes, failures
# We get an overall failure rate of ~98%, which is not bad at all.
failures / 1000
# In our case, it is _overgeneralization_ (as discussed above) that is responsible for not reaching a 100% rate. (In all generality, we are trying to approximate the behavior of a Turing machine with a context free grammar, which is, well, always an approximation.) However, even a lower rate would still be useful, as any additional test case that reproduces a failure helps in ensuring the final fix is complete.
# ## Putting it all Together
#
# Let us now put together all this in a more convenient package that does not require the user to parse and unparse derivation trees.
# Our `DDSetDebugger` is modeled after the `DeltaDebugger` from [the chapter on delta debugging](DeltaDebugger.ipynb). It is to be used as
#
# ```python
# with DDSetDebugger(grammar) as dd:
# some_failing_function(...)
# ```
#
# After that, evaluating `dd` yields a generalized abstract failure-inducing input as a string.
# Since `DDSetDebugger` accepts only one grammar, the function to be debugged should have exactly one string argument (besides other arguments); this string must fit the grammar.
# ### Constructor
# The constructor puts together the various components. It allows for customization by subclassing.
from DeltaDebugger import CallCollector
class DDSetDebugger(CallCollector):
"""
Debugger implementing the DDSET algorithm for abstracting failure-inducing inputs.
"""
def __init__(self, grammar: Grammar,
generalizer_class: Type = TreeGeneralizer,
parser: Optional[Parser] = None,
**kwargs: Any) -> None:
"""Constructor.
`grammar` is an input grammar in fuzzingbook format.
`generalizer_class` is the tree generalizer class to use
(default: `TreeGeneralizer`)
`parser` is the parser to use (default: `EarleyParser(grammar)`).
All other keyword args are passed to the tree generalizer, notably:
`fuzzer` - the fuzzer to use (default: `GrammarFuzzer`), and
`log` - enables debugging output if True.
"""
super().__init__()
self.grammar = grammar
assert is_valid_grammar(grammar)
self.generalizer_class = generalizer_class
if parser is None:
parser = EarleyParser(grammar)
self.parser = parser
self.kwargs = kwargs
# These save state for further fuzz() calls
self.generalized_args: Dict[str, Any] = {}
self.generalized_trees: Dict[str, DerivationTree] = {}
self.generalizers: Dict[str, TreeGeneralizer] = {}
# ### Generalizing Arguments
# The method `generalize()` is the many entry point. For all string arguments collected in the first function call, it generalizes the arguments and returns an abstract failure-inducing string.
class DDSetDebugger(DDSetDebugger):
def generalize(self) -> Dict[str, Any]:
"""
Generalize arguments seen. For each function argument,
produce an abstract failure-inducing input that characterizes
the set of inputs for which the function fails.
"""
if self.generalized_args:
return self.generalized_args
self.generalized_args = copy.deepcopy(self.args())
self.generalized_trees = {}
self.generalizers = {}
for arg in self.args():
def test(value: Any) -> Any:
return self.call({arg: value})
value = self.args()[arg]
if isinstance(value, str):
tree = list(self.parser.parse(value))[0]
gen = self.generalizer_class(self.grammar, tree, test,
**self.kwargs)
generalized_tree = gen.generalize()
self.generalizers[arg] = gen
self.generalized_trees[arg] = generalized_tree
self.generalized_args[arg] = all_terminals(generalized_tree)
return self.generalized_args
class DDSetDebugger(DDSetDebugger):
def __repr__(self) -> str:
"""Return a string representation of the generalized call."""
return self.format_call(self.generalize())
# Here is an example of how `DDSetDebugger` would be used on our `BAD_INPUT` example. Simply evaluating the debugger yields a call with a generalized input.
with DDSetDebugger(SIMPLE_HTML_GRAMMAR) as dd:
remove_html_markup(BAD_INPUT)
dd
# ### Fuzzing
#
# The `fuzz()` method produces instantiations of the abstract failure-inducing pattern.
class DDSetDebugger(DDSetDebugger):
def fuzz_args(self) -> Dict[str, Any]:
"""
Return arguments randomly instantiated
from the abstract failure-inducing pattern.
"""
if not self.generalized_trees:
self.generalize()
args = copy.deepcopy(self.generalized_args)
for arg in args:
if arg not in self.generalized_trees:
continue
tree = self.generalized_trees[arg]
gen = self.generalizers[arg]
instantiated_tree = gen.fuzz_tree(tree)
args[arg] = all_terminals(instantiated_tree)
return args
def fuzz(self) -> str:
"""
Return a call with arguments randomly instantiated
from the abstract failure-inducing pattern.
"""
return self.format_call(self.fuzz_args())
# Here are some axamples of `fuzz()` in action:
with DDSetDebugger(SIMPLE_HTML_GRAMMAR) as dd:
remove_html_markup(BAD_INPUT)
dd.fuzz()
dd.fuzz()
dd.fuzz()
# These can be fed into `eval()`, set to produce more failing calls.
with ExpectError(AssertionError):
eval(dd.fuzz())
# ## More Examples
#
# Let us apply `DDSetDebugger` on more examples.
# ### Square Root
#
# Our first example is the `square_root()` function from [the chapter on assertions](Assertions.ipynb).
from Assertions import square_root # minor dependency
# The `square_root()` function fails on a value of `-1`:
with ExpectError(AssertionError):
square_root(-1)
# We define a grammar for its arguments:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
INT_GRAMMAR: Grammar = {
"<start>":
["<int>"],
"<int>":
["<positive-int>", "-<positive-int>"],
"<positive-int>":
["<digit>", "<nonzero-digit><positive-int>"],
"<nonzero-digit>": list("123456789"),
"<digit>": list(string.digits),
}
# -
# The test function takes a string and converts it into an integer:
def square_root_test(s: str) -> None:
return square_root(int(s))
# With this, we can go and see whether we can generalize a failing input:
with DDSetDebugger(INT_GRAMMAR, log=True) as dd_square_root:
square_root_test("-1")
dd_square_root
# Success! Using `DDSetDebugger`, we have nicely generalized the failure-inducing input to a pattern `-<positive-int>` that translates into "any negative number".
# ### Middle
#
# The `middle()` function from [the chapter on statistical debugging](StatisticalDebugger.ipynb) returns the middle of three numerical values `x`, `y`, and `z`.
from StatisticalDebugger import middle # minor dependency
# We set up a test function that evaluates a string – a tuple of three arguments – and then tests `middle()`:
def middle_test(s: str) -> None:
x, y, z = eval(s)
assert middle(x, y, z) == sorted([x, y, z])[1]
# The grammar for the three numbers simply puts three integers together:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
XYZ_GRAMMAR: Grammar = {
"<start>":
["<int>, <int>, <int>"],
"<int>":
["<positive-int>", "-<positive-int>"],
"<positive-int>":
["<digit>", "<nonzero-digit><positive-int>"],
"<nonzero-digit>": list("123456789"),
"<digit>": list(string.digits),
}
# -
# Here is an example of `middle()` failing:
with ExpectError(AssertionError):
middle_test("2, 1, 3")
# What happens if we debug this with `DDSetDebugger`? We see that there is no abstraction at the syntax level that could characterize this failure:
with DDSetDebugger(XYZ_GRAMMAR, log=True) as dd_middle:
middle_test("2, 1, 3")
dd_middle
# So, while there are failures that can be nicely characterized using abstractions of input elements, `middle()` is not one of them. Which is good, because this means that all our other techniques such as [statistical debugging](StatisticalDebugger.ipynb) and [dynamic invariants](DynamicInvariants.ipynb) still have a use case :-)
# ## Synopsis
# This chapter provides a class `DDSetDebugger`, implementing the DDSET algorithm for generalizing failure-inducing inputs. The `DDSetDebugger` is used as follows:
#
# ```python
# with DDSetDebugger(grammar) as dd:
# function(args...)
# dd
# ```
#
# Here, `function(args...)` is a failing function call (= raises an execption) that takes at least one string argument; `grammar` is an [input grammar in fuzzingbook format](https://www.fuzzingbook.org/html/Grammars.html) that matches the format of this argument.
#
# The result is a call of `function()` with an _abstract failure-inducing input_ – a variant of the conrete input in which parts are replaced by placeholders in the form `<name>`, where `<name>` is a nonterminal in the grammar. The failure has been verified to occur for a number of instantiations of `<name>`.
# Here is an example of how `DDSetDebugger` works. The concrete failing input `<foo>"bar</foo>` is generalized to an _abstract failure-inducing input_:
with DDSetDebugger(SIMPLE_HTML_GRAMMAR) as dd:
remove_html_markup('<foo>"bar</foo>')
dd
# The abstract input tells us that the failure occurs for whatever opening and closing HTML tags as long as there is a double quote between them.
# A programmatic interface is available as well. `generalize()` returns a mapping of argument names to (generalized) values:
dd.generalize()
# Using `fuzz()`, the abstract input can be instantiated to further concrete inputs, all set to produce the failure again:
for i in range(10):
print(dd.fuzz())
# `DDSetDebugger` can be customized by passing a subclass of `TreeGeneralizer`, which does the gist of the work; for details, see its constructor.
# The full class hierarchy is shown below.
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([DDSetDebugger, TreeGeneralizer],
public_methods=[
CallCollector.__init__,
CallCollector.__enter__,
CallCollector.__exit__,
CallCollector.function,
CallCollector.args,
CallCollector.exception,
CallCollector.call, # type: ignore
DDSetDebugger.__init__,
DDSetDebugger.__repr__,
DDSetDebugger.fuzz,
DDSetDebugger.fuzz_args,
DDSetDebugger.generalize,
], project='debuggingbook')
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Lessons Learned
#
# * Generalizing failure-inducing inputs can yield important information for which inputs and under which circumstances a failure occurs.
# * Generalizing failure-inducing inputs is most useful if the input can be split into multiple elements, of which only a part are relevant for producing the error.
# * As they help in _parsing_ and _producing_ input, _grammars_ can play an important role in testing and debugging.
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Next Steps
#
# Our [next chapter](Repairer.ipynb) introduces _automated repair_ of programs, building on the fault localization and generalization mechanisms introduced so far.
# -
# ## Background
#
# Our `DDSetDebugger` class implements the DDSET algorithm as introduced by Gopinath et al. in \cite{Gopinath2020}. A [full-fledged implementation of DDSET](https://rahul.gopinath.org/post/2020/07/15/ddset/) with plenty of details and experiments is available as a Jupyter Notebook. Our implementation follows the [simplified implementation of DDSET, as described by Gopinath](https://rahul.gopinath.org/post/2020/08/03/simple-ddset/).
#
# The potential for determining how input features relate to bugs is not nearly explored yet.
# The ALHAZEN work by Kampmann et al. \cite{Kampmann2020} generalizes over DDSET in a different way, by investigating _semantic_ features of input elements such as their numeric interpretation or length and their correlation with failures. Like DDSET, ALHAZEN also uses a feedback loop to strengthen or refute its hypotheses.
#
# In recent work \cite{Gopinath2021}, Gopinath has extended the concept of DDSET further. His work on _evocative expressions_ introduces a _pattern language_ in which arbitrary DDSET-like patterns can be combined into Boolean formula that even more precisely capture and produce failure circumstances. In particular, evocative expressions can _specialize_ grammars towards Boolean pattern combinations, thus allowing for great flexibility in testing and debugging.
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Exercises
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Exercise 1: Generalization and Specialization
#
# Consider the abstract failure-inducing input for `BAD_INPUT` we determined:
# -
all_terminals(abstract_failure_inducing_input)
# + [markdown] button=false new_sheet=false run_control={"read_only": false} solution2="hidden" solution2_first=true
# 1. How does it change if you increase the number of test runs, using `max_tries_for_generalization`?
# 2. What is the success rate of the new pattern?
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# **Solution.** We can compute this by increasing `max_tries_for_generalization`:
# + slideshow={"slide_type": "skip"} solution2="hidden"
more_precise_bitg = \
cast(TreeGeneralizer, bad_input_tree_generalizer(max_tries_for_generalization=100))
more_precise_abstract_failure_inducing_input = \
more_precise_bitg.generalize()
# + cell_style="split" slideshow={"slide_type": "skip"} solution2="hidden"
all_terminals(more_precise_abstract_failure_inducing_input)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# We see that we still have an opening tag; however, it no longer assumes attributes.
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# The success rate can be computed as before:
# + slideshow={"slide_type": "skip"} solution2="hidden"
successes = 0
failures = 0
trials = 1000
for i in range(trials):
test_input = all_terminals(
more_precise_bitg.fuzz_tree(
more_precise_abstract_failure_inducing_input))
try:
remove_html_markup(test_input)
except AssertionError:
successes += 1
else:
failures += 1
# + slideshow={"slide_type": "skip"} solution2="hidden"
successes, failures
# + slideshow={"slide_type": "skip"} solution2="hidden"
failures / 1000
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# We see that the success rate is now more than 99%, which is better than before. On the other hand, the pattern is now overly _special_, since there are `<opening-tags>` with attributes such that the failure occurs (but also some that cancel out the error).
| notebooks/DDSetDebugger.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # NEF summary
#
# The Neural Engineering Framework (NEF)
# is one set of theoretical methods that are used in
# Nengo for constructing neural models.
# The NEF is based on [<NAME>'s (2003) book](
# https://mitpress.mit.edu/books/neural-engineering) from MIT Press.
# This notebook introduces the three main principles
# discussed in that book and implemented in Nengo.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import nengo
from nengo.dists import Uniform
from nengo.processes import WhiteSignal
from nengo.utils.ensemble import tuning_curves
from nengo.utils.ipython import hide_input
from nengo.utils.matplotlib import rasterplot
def aligned(n_neurons, radius=0.9):
intercepts = np.linspace(-radius, radius, n_neurons)
encoders = np.tile([[1], [-1]], (n_neurons // 2, 1))
intercepts *= encoders[:, 0]
return intercepts, encoders
hide_input()
# -
# ## Principle 1: Representation
#
# ### Encoding
#
# Neural populations represent time-varying signals
# through their spiking responses.
# A signal is a vector of real numbers of arbitrary length.
# This example is a 1D signal going from -1 to 1 in 1 second.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(lambda t: t * 2 - 1)
input_probe = nengo.Probe(input)
# +
with nengo.Simulator(model) as sim:
sim.run(1.0)
plt.figure()
plt.plot(sim.trange(), sim.data[input_probe], lw=2)
plt.title("Input signal")
plt.xlabel("Time (s)")
plt.xlim(0, 1);
hide_input()
# -
# These signals drive neural populations
# based on each neuron's *tuning curve*
# (which is similar to the current-frequency curve,
# if you're familiar with that).
#
# The tuning curve describes how much
# a particular neuron will fire as a function of the input signal.
intercepts, encoders = aligned(8) # Makes evenly spaced intercepts
with model:
A = nengo.Ensemble(
8,
dimensions=1,
intercepts=intercepts,
max_rates=Uniform(80, 100),
encoders=encoders)
# +
with nengo.Simulator(model) as sim:
eval_points, activities = tuning_curves(A, sim)
plt.figure()
plt.plot(eval_points, activities, lw=2)
plt.xlabel("Input signal")
plt.ylabel("Firing rate (Hz)");
hide_input()
# -
# We can drive these neurons with our input signal
# and observe their spiking activity over time.
with model:
nengo.Connection(input, A)
A_spikes = nengo.Probe(A.neurons)
# +
with nengo.Simulator(model) as sim:
sim.run(1)
plt.figure()
ax = plt.subplot(1, 1, 1)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
ax.set_xlim(0, 1)
ax.set_ylabel('Neuron')
ax.set_xlabel('Time (s)');
hide_input()
# -
# ### Decoding
#
# We can estimate the input signal
# originally encoded by decoding the pattern of spikes.
# To do this, we first filter the spike train
# with a temporal filter that accounts for
# postsynaptic current (PSC) activity.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(lambda t: t * 2 - 1)
input_probe = nengo.Probe(input)
intercepts, encoders = aligned(8) # Makes evenly spaced intercepts
A = nengo.Ensemble(8, dimensions=1,
intercepts=intercepts,
max_rates=Uniform(80, 100),
encoders=encoders)
nengo.Connection(input, A)
A_spikes = nengo.Probe(A.neurons, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(1)
scale = 180
plt.figure()
for i in range(A.n_neurons):
plt.plot(sim.trange(), sim.data[A_spikes][:, i] - i * scale)
plt.xlim(0, 1)
plt.ylim(scale * (-A.n_neurons + 1), scale)
plt.ylabel("Neuron")
plt.yticks(
np.arange(scale / 1.8, (-A.n_neurons + 1) * scale, -scale),
np.arange(A.n_neurons))
hide_input()
# -
# Then we mulitply those filtered spike trains
# with decoding weights and sum them together
# to give an estimate of the input based on the spikes.
#
# The decoding weights are determined
# by minimizing the squared difference
# between the decoded estimate and the actual input signal.
with model:
A_probe = nengo.Probe(A, synapse=0.01) # 10ms PSC filter
# +
with nengo.Simulator(model) as sim:
sim.run(1)
plt.figure()
plt.plot(sim.trange(), sim.data[input_probe], label="Input signal")
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded estimate")
plt.legend(loc="best")
plt.xlim(0, 1)
hide_input()
# -
# The accuracy of the decoded estimate increases
# as the number of neurons increases.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(lambda t: t * 2 - 1)
input_probe = nengo.Probe(input)
A = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
nengo.Connection(input, A)
A_spikes = nengo.Probe(A.neurons)
A_probe = nengo.Probe(A, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(1)
plt.figure(figsize=(15, 3.5))
plt.subplot(1, 3, 1)
eval_points, activities = tuning_curves(A, sim)
plt.plot(eval_points, activities, lw=2)
plt.xlabel("Input signal")
plt.ylabel("Firing rate (Hz)")
ax = plt.subplot(1, 3, 2)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
plt.xlim(0, 1)
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
plt.subplot(1, 3, 3)
plt.plot(sim.trange(), sim.data[input_probe], label="Input signal")
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded esimate")
plt.legend(loc="best")
plt.xlabel("Time (s)")
plt.xlim(0, 1)
hide_input()
# -
# Any smooth signal can be encoded and decoded.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(WhiteSignal(1, high=5), size_out=1)
input_probe = nengo.Probe(input)
A = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
nengo.Connection(input, A)
A_spikes = nengo.Probe(A.neurons)
A_probe = nengo.Probe(A, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(1)
plt.figure(figsize=(10, 3.5))
plt.subplot(1, 2, 1)
plt.plot(sim.trange(), sim.data[input_probe], label="Input signal")
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded esimate")
plt.legend(loc="best")
plt.xlabel("Time (s)")
plt.xlim(0, 1)
ax = plt.subplot(1, 2, 2)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
plt.xlim(0, 1)
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
hide_input()
# -
# ## Principle 2: Transformation
#
# Encoding and decoding allow us to encode signals over time,
# and decode transformations of those signals.
#
# In fact, we can decode arbitrary transformations of the input signal,
# not just the signal itself (as in the previous example).
#
# Let's decode the square of our white noise input.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(WhiteSignal(1, high=5), size_out=1)
input_probe = nengo.Probe(input, )
A = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
Asquare = nengo.Node(size_in=1)
nengo.Connection(input, A)
nengo.Connection(A, Asquare, function=np.square)
A_spikes = nengo.Probe(A.neurons)
Asquare_probe = nengo.Probe(Asquare, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(1)
plt.figure(figsize=(10, 3.5))
plt.subplot(1, 2, 1)
plt.plot(
sim.trange(),
sim.data[input_probe],
label="Input signal")
plt.plot(
sim.trange(),
sim.data[Asquare_probe],
label="Decoded esimate")
plt.plot(
sim.trange(),
np.square(sim.data[input_probe]),
label="Input signal squared")
plt.legend(loc="best", fontsize='medium')
plt.xlabel("Time (s)")
plt.xlim(0, 1)
ax = plt.subplot(1, 2, 2)
rasterplot(sim.trange(), sim.data[A_spikes])
plt.xlim(0, 1)
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
hide_input()
# -
# Notice that the spike trains are exactly the same.
# The only difference is how we're interpreting those spikes.
# We told Nengo to compute a new set of decoders
# that estimate the function $x^2$.
#
# In general, the transformation principle
# determines how we can decode spike trains
# to compute linear and nonlinear transformations of signals
# encoded in a population of neurons.
# We can then project those transformed signals
# into another population, and repeat the process.
# Essentially, this provides a means of
# computing the neural connection weights
# to compute an arbitrary function between populations.
#
# Suppose we are representing a sine wave.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(lambda t: np.sin(np.pi * t))
A = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
nengo.Connection(input, A)
A_spikes = nengo.Probe(A.neurons)
A_probe = nengo.Probe(A, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(2)
plt.figure(figsize=(10, 3.5))
plt.subplot(1, 2, 1)
plt.plot(sim.trange(), sim.data[A_probe])
plt.title("A")
plt.xlabel("Time (s)")
plt.xlim(0, 2)
ax = plt.subplot(1, 2, 2)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
plt.xlim(0, 2)
plt.title("A")
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
hide_input()
# -
# Linear transformations of that signal
# involve solving for the usual decoders,
# and scaling those decoding weights.
# Let us flip this sine wave upside down
# as it is transmitted between two populations
# (i.e. population A and population -A).
with model:
minusA = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
nengo.Connection(A, minusA, function=lambda x: -x)
minusA_spikes = nengo.Probe(minusA.neurons)
minusA_probe = nengo.Probe(minusA, synapse=0.01)
# +
with nengo.Simulator(model) as sim:
sim.run(2)
plt.figure(figsize=(10, 5))
plt.subplot(2, 2, 1)
plt.plot(sim.trange(), sim.data[A_probe])
plt.title("A")
plt.xticks(())
plt.xlim(0, 2)
plt.subplot(2, 2, 3)
plt.plot(sim.trange(), sim.data[minusA_probe])
plt.title("-A")
plt.xlabel("Time (s)")
plt.xlim(0, 2)
ax = plt.subplot(2, 2, 2)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
plt.xlim(0, 2)
plt.title("A")
plt.xticks(())
plt.ylabel("Neuron")
ax = plt.subplot(2, 2, 4)
rasterplot(sim.trange(), sim.data[minusA_spikes], ax)
plt.xlim(0, 2)
plt.title("-A")
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
hide_input()
# -
# Nonlinear transformations involve
# solving for a new set of decoding weights.
# Let us add a third population connected
# to the second one and use it to compute $(-A)^2$.
with model:
A_squared = nengo.Ensemble(30, dimensions=1, max_rates=Uniform(80, 100))
nengo.Connection(minusA, A_squared, function=lambda x: x ** 2)
A_squared_spikes = nengo.Probe(A_squared.neurons)
A_squared_probe = nengo.Probe(A_squared, synapse=0.02)
# +
with nengo.Simulator(model) as sim:
sim.run(2)
plt.figure(figsize=(10, 6.5))
plt.subplot(3, 2, 1)
plt.plot(sim.trange(), sim.data[A_probe])
plt.axhline(0, color='k')
plt.title("A")
plt.xticks(())
plt.xlim(0, 2)
plt.subplot(3, 2, 3)
plt.plot(sim.trange(), sim.data[minusA_probe])
plt.axhline(0, color='k')
plt.title("-A")
plt.xticks(())
plt.xlim(0, 2)
plt.subplot(3, 2, 5)
plt.plot(sim.trange(), sim.data[A_squared_probe])
plt.axhline(0, color='k')
plt.title("(-A)^2")
plt.xlabel("Time (s)")
plt.xlim(0, 2)
ax = plt.subplot(3, 2, 2)
rasterplot(sim.trange(), sim.data[A_spikes], ax)
plt.xlim(0, 2)
plt.title("A")
plt.xticks(())
plt.ylabel("Neuron")
ax = plt.subplot(3, 2, 4)
rasterplot(sim.trange(), sim.data[minusA_spikes], ax)
plt.xlim(0, 2)
plt.title("-A")
plt.xticks(())
plt.ylabel("Neuron")
ax = plt.subplot(3, 2, 6)
rasterplot(sim.trange(), sim.data[A_squared_spikes], ax)
plt.xlim(0, 2)
plt.title("(-A)^2")
plt.xlabel("Time (s)")
plt.ylabel("Neuron")
hide_input()
# -
# ## Principle 3: Dynamics
#
# So far, we have been considering the values
# represented by ensembles as generic "signals."
# However, if we think of them instead
# as state variables in a dynamical system,
# then we can apply the methods of control theory
# or dynamic systems theory to brain models.
# Nengo automatically translates from standard dynamical systems descriptions
# to descriptions consistent with neural dynamics.
#
# In order to get interesting dynamics,
# we can connect populations recurrently (i.e., to themselves).
#
# Below is a simple harmonic oscillator
# implemented using this third principle.
# It needs is a bit of input to get it started.
model = nengo.Network(label="NEF summary")
with model:
input = nengo.Node(lambda t: [1, 0] if t < 0.1 else [0, 0])
oscillator = nengo.Ensemble(200, dimensions=2)
nengo.Connection(input, oscillator)
nengo.Connection(
oscillator, oscillator, transform=[[1, 1], [-1, 1]], synapse=0.1)
oscillator_probe = nengo.Probe(oscillator, synapse=0.02)
# +
with nengo.Simulator(model) as sim:
sim.run(3)
plt.figure(figsize=(10, 3.5))
plt.subplot(1, 2, 1)
plt.plot(sim.trange(), sim.data[oscillator_probe])
plt.ylim(-1.2, 1.2)
plt.xlabel('Time (s)')
plt.subplot(1, 2, 2)
plt.plot(sim.data[oscillator_probe][:, 0], sim.data[oscillator_probe][:, 1])
plt.grid()
plt.axis([-1.2, 1.2, -1.2, 1.2])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
hide_input()
| docs/examples/advanced/nef-summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import gc
import pretty_html_table as phtm
#import sys
# To be able to import from lib directory
#sys.path.append(r'..\lib\\')
#do not show warnings
import warnings
warnings.filterwarnings("ignore")
gc.collect()
# ## Datos crudos
#Cargar todos los datos
saber11_20171 = pd.read_csv(r'data\Saber_11__2017-1.csv')
saber11_20172 = pd.read_csv(r'data\Saber_11__2017-2.csv')
saber11_20181 = pd.read_csv(r'data\Saber_11__2018-1.csv')
saber11_20182 = pd.read_csv(r'data\Saber_11__2018-2.csv')
saber11_20191 = pd.read_csv(r'data\Saber_11__2019-1.csv')
saber11_20192 = pd.read_csv(r'data\Saber_11__2019-2.csv')
#Lista de DB
datos = [saber11_20171,
saber11_20172,
saber11_20181,
saber11_20182,
saber11_20191,
saber11_20192]
#Columnas a minúscula
for peri in datos:
peri.columns = [i.lower() for i in peri.columns]
# +
variables_cole = [i for i in saber11_20171.columns if 'cole' in i]
variables_estu = [i for i in saber11_20171.columns if 'estu' in i]
variables_fami = [i for i in saber11_20171.columns if 'fami' in i]
key_cole = ['cole_cod_dane_establecimiento',
'cole_cod_dane_sede',
'cole_cod_mcpio_ubicacion',
'cole_cod_depto_ubicacion',
'cole_naturaleza',
'cole_calendario']
key_estu = key_cole + ['fami_estratovivienda','punt_global','estu_horassemanatrabaja','periodo','estu_consecutivo']
# -
# ## Colegios
#Concat de todos los periodos: cada fila es un colegio
key = key_cole + ['cole_nombre_sede', 'cole_mcpio_ubicacion','cole_depto_ubicacion']
colegios_2017_2019 = pd.concat([saber11_20171[key].drop_duplicates(),
saber11_20172[key].drop_duplicates(),
saber11_20181[key].drop_duplicates(),
saber11_20182[key].drop_duplicates(),
saber11_20191[key].drop_duplicates(),
saber11_20192[key].drop_duplicates()])
#Algunos duplicados por un cambio en el nombre
colegios_2017_2019.drop_duplicates(inplace=True)
#Revisar duplicados
colegios_2017_2019[key_cole].drop_duplicates().shape[0] == colegios_2017_2019.shape[0]
#Un solo nombre por colegio, escogido de forma alfabética
colegios_2017_2019_ = pd.pivot_table(data=colegios_2017_2019,
index=key_cole,
aggfunc={'cole_nombre_sede':'min','cole_mcpio_ubicacion':'min','cole_depto_ubicacion':'min'}).reset_index()
colegios_2017_2019_[key_cole].drop_duplicates().shape[0] == colegios_2017_2019_.shape[0]
# ## Estudiantes
#Concat de todos los periodos: cada fila es un estudiante
estudiantes_2017_2019 = pd.concat([saber11_20171[key_estu],
saber11_20172[key_estu],
saber11_20181[key_estu],
saber11_20182[key_estu],
saber11_20191[key_estu],
saber11_20192[key_estu]])
#Null check
estudiantes_2017_2019.isna().sum()
#Fill nulls
estudiantes_2017_2019['fami_estratovivienda'].fillna('AA-Sin info',inplace=True)
estudiantes_2017_2019['estu_horassemanatrabaja'].fillna('AA-Sin info',inplace=True)
#Juntar las dos sedes del Marymount en Atlántico
estudiantes_2017_2019['cole_cod_dane_establecimiento'] = estudiantes_2017_2019['cole_cod_dane_establecimiento'].apply(lambda x: 308001004209 if x == 308573800031 else x)
estudiantes_2017_2019['cole_cod_dane_sede'] = estudiantes_2017_2019['cole_cod_dane_sede'].apply(lambda x: 308001004209 if x == 308573800031 else x)
mask = (estudiantes_2017_2019['cole_cod_dane_sede'] == 308001004209) & (estudiantes_2017_2019['cole_cod_mcpio_ubicacion'] == 8573)
estudiantes_2017_2019['cole_cod_mcpio_ubicacion'][mask] = 8001
#Duplicates check
estudiantes_2017_2019[['estu_consecutivo']].drop_duplicates().shape[0] == estudiantes_2017_2019.shape[0]
# ## Cálculos
# + code_folding=[0]
#Método 1: Promedio por institución sobre todos los periodos
#Cálculo del promedio de todos lo periodos por colegio y su sigma
#Promedio
cole_acumulado = pd.pivot_table(data=estudiantes_2017_2019,
index=key_cole,
aggfunc={'punt_global':'mean','estu_consecutivo':'count'}).reset_index()
cole_acumulado.rename(columns={"punt_global": "prom_punt_global","estu_consecutivo": "num_evaluados"},inplace=True)
#Desviación estándar
cole_acumulado1 = pd.pivot_table(data=estudiantes_2017_2019,
index=key_cole,
aggfunc={'punt_global':np.std}).reset_index()
cole_acumulado1.rename(columns={"punt_global": "stdve_punt_global"},inplace=True)
#Resultados
cole_acumulado_final = cole_acumulado.merge(cole_acumulado1[key_cole+['stdve_punt_global']],
how='left',
left_on=key_cole,
right_on=key_cole)
cole_acumulado_final['stdve_punt_global'].fillna(0, inplace=True)
#Criterio de Andes = Promedio - 1.25(Desviaciones estándar)
cole_acumulado_final['criterio_andes'] = cole_acumulado_final['prom_punt_global'] - 1.25*cole_acumulado_final['stdve_punt_global']
#True: Criterio Andes >= 310 , False: Criterio Andes < 310*
#Acá se puso en 295 porque es el valor mínimo de un colegio confirmado. Más adelante se cambia el valor con la condición correcta
cole_acumulado_final['entrada_directa'] = [False]*cole_acumulado_final.shape[0]
cole_acumulado_final['entrada_directa'][cole_acumulado_final.criterio_andes >= 295] = True
# -
resultados_cole = cole_acumulado_final[(cole_acumulado_final.entrada_directa) & (cole_acumulado_final.cole_calendario == 'B')].sort_values(by='criterio_andes', ascending=False)
# + code_folding=[]
resultados_final = resultados_cole.merge(colegios_2017_2019_,
left_on=key_cole,
right_on=key_cole)
# -
resultados_final[resultados_final.entrada_directa=='No']
resultados_final[resultados_final['cole_nombre_sede'].str.contains("CORALES")]
# + code_folding=[0]
#Confirmados por El Uniandino
confirmados_el_uniandino = [311769001781,
311848001011,
425758800009,
311769003474,
311001105430,
311769000866,
368001000516,
376001026968,
311848000341,
311001087288,
311848000278,
425377800011,
311769000921,
311769003342,
311001019941,
311001045160,
311848002424,
311769000165,
311848000936,
425377000529,
368276000826,
319001002895,
308001073952,
311848000812,
311848002262,
311769001846,
311848000286,
311769000475,
376001002279,
311769003938,
311769004292,
466001005641,
311001019568,
311001010820,
311769002818]
nombres_confirmados_el_uniandino = ['<NAME>',
'COL <NAME>',
'COL CAMPOALEGRE LTDA - SEDE PRINCIPAL',
'COL ANDINO',
'COL LA COLINA',
'FUND NUEVO MARYMOUNT',
'COL NUEVO CAMBRIDGE',
'COLEGIO FREINET',
'GIMN CAMPESTRE',
'GIMN LA MONTAÑA',
'<NAME>',
'COL. <NAME>',
'COL HELVETIA',
'COL LOS NOGALES',
'COL <NAME>',
'FUND GIMN LOS PORTALES',
'COL S<NAME>',
'COL SAN JORGE DE INGLATERRA',
'COL <NAME>',
'COL TILATA - SEDE PRINCIPAL',
'COL LA QUINTA DEL PUENTE',
'COL GIMNASIO CALIBIO',
'COLEGIO ALTAMIRA',
'GIMN FEMENINO',
'COL <NAME>',
'COL SAN TARSICIO',
'GIMN DE LOS CERROS',
'ASPAEN GIMN IRAGUA',
'COL COLOMBO BRITÁNICO',
'COL <NAME>',
'COL SAN MATEO APOSTOL',
'LIC CAMPESTRE DE PEREIRA',
'LIC FRANCES LOUIS PASTEUR',
'COL SAN BARTOLOME LA MERCED',
'COL. INTERNACIONAL DE EDUCACIÓN INTEGRAL - CIEDI LTDA - SEDE PRINCIPAL']
# -
len(confirmados_el_uniandino)
# + code_folding=[0]
#Pretty results
#Nombres
resultados_final['cole_nombre_sede'] = [i.replace('COLEGIO','COL') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('COL.','COL') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('SEDE','') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('PRINCIPAL','') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('PRINCIPAL','') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('GIMNASIO','GIMN') for i in resultados_final['cole_nombre_sede']]
resultados_final['cole_nombre_sede'] = [i.replace('GIMN.','GIMN') for i in resultados_final['cole_nombre_sede']]
#Números
resultados_final['prom_punt_global'] = [round(i,2) for i in resultados_final['prom_punt_global']]
resultados_final['stdve_punt_global'] = [round(i,2) for i in resultados_final['stdve_punt_global']]
resultados_final['criterio_andes'] = [round(i,2) for i in resultados_final['criterio_andes']]
#Booleanos
resultados_final['confirmados_el_uniandino'] = ['Sin información']*resultados_final.shape[0]
resultados_final['confirmados_el_uniandino'][resultados_final.cole_cod_dane_establecimiento.isin(confirmados_el_uniandino)] = 'Entrada directa'
resultados_final['entrada_directa'][resultados_final.entrada_directa] = 'Sí'
#Condición correcta
resultados_final['entrada_directa'][resultados_final.criterio_andes<310] = 'No'
# -
pd.set_option('display.max_colwidth', 0)
export = ['cole_nombre_sede','cole_depto_ubicacion','prom_punt_global','stdve_punt_global','entrada_directa']
# +
table_pretty = resultados_final[export+['confirmados_el_uniandino']]
table_pretty.rename(columns={"cole_nombre_sede": "Colegio",
"cole_depto_ubicacion": "Departamento",
"prom_punt_global": "Promedio Saber 11",
"stdve_punt_global": "$\sigma$ Saber 11",
"criterio_andes":"Criterio de Los Andes",
"entrada_directa":"¿Entra según criterio?",
"confirmados_el_uniandino":"Confirmado por El Uniandino"},inplace=True)
table_pretty.index += 1
# -
len(confirmados_el_uniandino) == len(nombres_confirmados_el_uniandino)
# + code_folding=[]
#Html table
<p><table border="0" class="dataframe">
<thead>
<tr style="text-align: right;">
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">Colegio</th>
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">Departamento</th>
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">Promedio Saber 11</th>
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">Sigma Saber 11</th>
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">¿Entra según criterio?</th>
<th style = "background-color: #FFFFFF;font-family: Arial;color: #305496;text-align: left;border-bottom: 2px solid #305496;padding: 0px 20px 0px 0px">Confirmado por El Uniandino</th>
</tr>
</thead>
<tbody>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL LA QUINTA DEL PUENTE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">SANTANDER</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">394.46</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.03</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL NUEVO CAMBRIDGE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">SANTANDER</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">395.42</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.15</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BILINGUE DIANA OESE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">393.16</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.66</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL PHILADELPHIA INTERNACIONAL -</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">373.85</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">22.03</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">375.27</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">24.16</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL LOS NOGALES</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">371.65</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">23.38</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">374.22</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.07</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">370.35</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">24.68</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">368.39</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">24.02</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL CAMPOALEGRE LTDA -</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">365.04</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">21.79</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL LA COLINA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">372.46</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.5</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">366.99</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.16</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BILING BUCKINGHAM</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">361.92</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">22.14</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL MONTESSORI BRITISH SCHOOL</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">366.9</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.88</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">368.01</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.93</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> -</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">362.89</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.53</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL FREINET</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">363.32</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.76</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ANTIOQUIA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">361.71</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.72</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">354.41</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">21.89</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL HISPANOAMERICANO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">361.03</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.54</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">LIC CAMPESTRE DE PEREIRA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">RISARALDA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">357.06</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">24.6</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN LA MONTAÑA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">364.34</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.22</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ANTIOQUIA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">357.11</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.05</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN DE LOS CERROS</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">361.14</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.56</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">FUND NUEVO MARYMOUNT</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">309.52</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">25.61</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">355.07</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.22</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN CAMPESTRE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">356.33</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.41</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">FUND GIMN LOS PORTALES</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">355.66</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.19</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">360.37</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.05</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL INTERNACIONAL LA SIERRA -</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CESAR</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">348.92</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">22.23</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL SAN TARSICIO</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">355.7</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.1</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">352.8</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">25.94</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN CARTAGENA-ASPAEN</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOLIVAR</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">362.88</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">34.16</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BILING CLERMONT -</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">352.31</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">25.77</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CORP. EDUC. GIMN ALTAIR DE C/GENA.</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOLIVAR</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">355.16</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.06</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">352.43</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">25.95</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">354.64</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.95</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL GIMN CALIBIO</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CAUCA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">354.2</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.27</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL INTERNACIONAL DE EDUCACIÓN INTEGRAL - CIEDI LTDA -</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">353.02</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.39</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL PANAMERICANO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">SANTANDER</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">356.16</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.95</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">354.84</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.5</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> CARTAGENA DE INDIAS</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOLIVAR</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">356.71</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.59</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">RISARALDA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">354.12</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.93</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">350.94</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.86</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">350.43</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.95</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">351.56</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.98</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL TILATA -</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">350.94</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.39</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN INGLES</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">QUINDIO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">352.83</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.03</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BRITANICO DE CARTAGENA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOLIVAR</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">357.33</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">35.79</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL COLOMBO BRITÁNICO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">352.3</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.96</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL HEBREO UNION -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">348.63</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.33</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BERCHMANS</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">353.23</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.05</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ASPAEN GIMN IRAGUA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">349.23</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.93</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN FEMENINO</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">346.04</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.47</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">337.25</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">21.06</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CORPORACIÓN CULTURAL COL ALEMÁN - PRICIPAL</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">349.83</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.24</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #E1FFE3;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL EL CAMINO ACADEMY</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">349.27</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.97</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL CUMBRES</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">348.2</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.44</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sí</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL MARYMOUNT</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.77</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.67</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ASPAEN LICEO TACURÍ</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.77</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.84</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL ANDINO</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">344.81</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.61</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL FUND CARDENAL JHON HENRY NEWMAN</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">343.64</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.85</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN BRITANICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">342.6</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.1</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL SANTA MARIA</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">343.97</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.27</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL REAL (ROYAL SCHOOL) -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">349.46</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.67</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN INGLES</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">QUINDIO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">348.46</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.13</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL GIMN VERMONT</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ANTIOQUIA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.37</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">32.59</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL VICTORIA -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">340.92</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">27.85</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.03</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.45</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOLIVAR</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.9</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">34.22</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME> -</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">337.75</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">26.21</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">343.01</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.71</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BOLIVAR</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">346.72</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.87</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL <NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">340.97</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.3</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CORPORACION COL CUMBRES</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ANTIOQUIA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">344.78</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">32.39</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL DE INGLATERRA (THE ENGLISH SCHOOL) -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.21</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.6</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL CAMPESTRE AMERICANO -</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CAUCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">332.21</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">22.51</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL MAYOR DE LOS ANDES -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.53</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.19</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL INTERNACIONAL LOS CAÑAVERALES</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">338.68</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">28.09</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ASPAEN GIMN LOS CORALES</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.53</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.59</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL FUND. BILING . DE VALLEDUPAR</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CESAR</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">348.53</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">36.4</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN LOS CAOBOS</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">339.93</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">30.21</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CENT EDUC Y CULT ESPAÑOL REYES CATOLICOS</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.81</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">32.49</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">337.48</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.06</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">338.47</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.91</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL FUNDACION LIC INGLES</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">RISARALDA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">346.79</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">36.87</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">343.98</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">34.71</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL LOS ANDES</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CAUCA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.61</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">37.67</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL INTERNACIONAL SEK COLOMBIA - TRINIDAD DEL MONTE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CUNDINAMARCA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">339.43</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.13</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL LA ARBOLEDA -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">347.4</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">37.91</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN FONTANA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">339.9</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">32.09</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">GIMN DEL NORTE -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">338.66</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">31.15</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">LICEO CAMPESTRE INGLES DEL SUR -</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">325.0</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">20.3</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ASPAEN COL JUANAMBÚ -</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">346.25</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">37.45</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL BRITANICO INTERNACIONAL</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ATLANTICO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.08</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.32</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">VALLE</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">342.71</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">35.25</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME>EBREO</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">334.24</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">29.14</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">ASOCIACION COL GRANADINO</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">CALDAS</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">342.08</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">35.44</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL <NAME></td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">338.77</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">33.77</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FFFFFF;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">COL ALBANIA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">LA GUAJIRA</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.89</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">37.38</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #D9E1F2;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Sin información</td>
</tr>
<tr>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px"><NAME></td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">BOGOTA</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">341.81</td>
<td style = "background-color: #FFDAB9;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">37.41</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">No</td>
<td style = "background-color: #FF8578;font-family: Arial;text-align: left;padding: 0px 0px 0px 0px">Entrada directa</td>
</tr>
</tbody>
</table></p>
# + code_folding=[0]
#Método 2: Promedio por periodo-institución y luego por institución
#Cálculo del promedio de cada periodo por colegio, luego sacar el promedio por colegio y su sigma
#Promedio periodo-colegio
cole_acumulado_periodo = pd.pivot_table(data=estudiantes_2017_2019,
index=key_cole + ['periodo'],
aggfunc={'punt_global':'mean','estu_consecutivo':'count'}).reset_index()
cole_acumulado_periodo.rename(columns={"punt_global": "prom_punt_global","estu_consecutivo": "num_evaluados"},inplace=True)
#Promedio colegio
cole_acumulado_2 = pd.pivot_table(data=cole_acumulado_periodo,
index=key_cole,
aggfunc={'prom_punt_global':'mean','num_evaluados':'sum'}).reset_index()
#Desviación estándar
cole_acumulado_2_1 = pd.pivot_table(data=cole_acumulado_periodo,
index=key_cole,
aggfunc={'prom_punt_global':np.std}).reset_index()
cole_acumulado_2_1.rename(columns={"prom_punt_global": "stdve_punt_global"},inplace=True)
#Resultados
cole_acumulado_final_1 = cole_acumulado_2.merge(cole_acumulado_2_1[key_cole+['stdve_punt_global']],
how='left',
left_on=key_cole,
right_on=key_cole)
cole_acumulado_final_1.fillna(0, inplace=True)
#Criterio de Andes = Promedio - 1.25(Desviaciones estándar)
cole_acumulado_final_1['criterio_andes'] = cole_acumulado_final_1['prom_punt_global'] - 1.25*cole_acumulado_final_1['stdve_punt_global']
#True: Criterio Andes >= 310 , False: Criterio Andes < 310
cole_acumulado_final_1['entrada_directa'] = [False]*cole_acumulado_final_1.shape[0]
cole_acumulado_final_1['entrada_directa'][cole_acumulado_final_1.criterio_andes >= 310] = True
# -
resultados_cole_1 = cole_acumulado_final_1[(cole_acumulado_final_1.entrada_directa) & (cole_acumulado_final_1.cole_calendario == 'B')].sort_values(by='criterio_andes', ascending=False)
resultados_final_1 = resultados_cole_1.merge(colegios_2017_2019_,
left_on=key_cole,
right_on=key_cole)
coles = ['cole_depto_ubicacion',
'cole_mcpio_ubicacion',
'cole_nombre_sede',
'cole_calendario',
'num_evaluados',
'prom_punt_global',
'stdve_punt_global',
'criterio_andes',
'entrada_directa']
resultados_final[coles].to_csv(r'data\59_colegios.csv')
# ## Insights
# + code_folding=[0]
#Saber11_Histórico_vs_Estrato
estudiantes_2017_2019['fami_estratovivienda'][estudiantes_2017_2019['fami_estratovivienda']=='-'] = ['AA-Sin info']*estudiantes_2017_2019.shape[0]
estudiantes_2017_2019['fami_estratovivienda'][estudiantes_2017_2019['fami_estratovivienda']=='Sin Estrato'] = ['A-Sin Estrato']*estudiantes_2017_2019.shape[0]
data = estudiantes_2017_2019.sort_values(by="fami_estratovivienda")
data['estrato'] = data['fami_estratovivienda']
distribucion = pd.pivot_table(data=data,index=['fami_estratovivienda'],aggfunc={'estrato':'count'}).reset_index()
fig = plt.figure(figsize=(11,12))
gs = fig.add_gridspec(4, 4)
ax1 = plt.subplot(gs[0:3, :])
ax3 = plt.subplot(gs[3, :])
ax2 = ax1.twinx()
sns.boxplot(x="fami_estratovivienda",
y="punt_global",
data=data,
color='lightblue',
ax=ax1)
ax1.set_title('Saber 11 vs. Estrato - Histórico Nacional 2017-2019',y=1,fontsize=15)
ax1.set_xlabel('', fontsize=14)
ax1.set_ylabel('Promedio del puntaje global en el Saber 11', fontsize=14)
ax1.set_ylim(-10,500)
ax1.tick_params(axis='both', which='major', labelsize=13)
ax2.plot([310]*8,
color='r',
ls='--',
linewidth=2,
label='Corte mínimo de Los Andes (310)')
ax2.set_ylim(-10,500)
ax2.legend(loc="best",fontsize=15)
ax2.tick_params(axis='both', which='major', labelsize=13)
ax3.bar(x=distribucion['fami_estratovivienda'],
height=distribucion['estrato']/sum(distribucion['estrato']),
color='mistyrose')
ax3.set_xlabel('Estrato de la familia del estudiante', fontsize=14,y=15)
ax3.set_ylabel('%Población evaluada', fontsize=14)
fig.tight_layout()
plt.savefig(r'C:\Users\Usuario\Pictures\Saber11\saber11_estrato_H.png')
plt.close()
# + code_folding=[0]
#Saber11_B_vs_Estrato
data = estudiantes_2017_2019[estudiantes_2017_2019.periodo.isin([20191,20181,20171])]
data['fami_estratovivienda'][data['fami_estratovivienda']=='-'] = ['AA-Sin info']*data.shape[0]
data['fami_estratovivienda'][data['fami_estratovivienda']=='Sin Estrato'] = ['A-Sin Estrato']*data.shape[0]
data = data.sort_values(by="fami_estratovivienda")
data['estrato'] = data['fami_estratovivienda']
distribucion = pd.pivot_table(data=data,index=['fami_estratovivienda'],aggfunc={'estrato':'count'}).reset_index()
fig = plt.figure(figsize=(11,12))
gs = fig.add_gridspec(4, 4)
ax1 = plt.subplot(gs[0:3, :])
ax3 = plt.subplot(gs[3, :])
ax2 = ax1.twinx()
sns.boxplot(x="fami_estratovivienda",
y="punt_global",
data=data,
color='lightblue',
ax=ax1)
ax1.set_title('Saber 11 vs. Estrato - Histórico calendario B 2017-2019',y=1,fontsize=15)
ax1.set_xlabel('', fontsize=14)
ax1.set_ylabel('Promedio del puntaje global en el Saber 11', fontsize=14)
ax1.set_ylim(-10,500)
ax1.tick_params(axis='both', which='major', labelsize=13)
ax2.plot([310]*8,
color='r',
ls='--',
linewidth=2,
label='Corte mínimo de Los Andes (310)')
ax2.set_ylim(-10,500)
ax2.legend(loc="best",fontsize=15)
ax2.tick_params(axis='both', which='major', labelsize=13)
ax3.bar(x=distribucion['fami_estratovivienda'],
height=distribucion['estrato']/sum(distribucion['estrato']),
color='mistyrose')
ax3.set_xlabel('Estrato de la familia del estudiante', fontsize=14,y=15)
ax3.set_ylabel('%Población evaluada', fontsize=14)
fig.tight_layout()
plt.savefig(r'C:\Users\Usuario\Pictures\Saber11\saber11_estrato_B.png')
plt.close()
# + code_folding=[0]
#Estrato_Confirmados
top_59 = resultados_final[resultados_final.criterio_andes>=310].cole_cod_dane_establecimiento.drop_duplicates().values
data = estudiantes_2017_2019[(estudiantes_2017_2019.cole_cod_dane_establecimiento.isin(confirmados_el_uniandino+list(top_59)))].sort_values(by='fami_estratovivienda')
#data = data[data.cole_cod_dane_establecimiento.isin([top_59])]
data['estrato'] = data['fami_estratovivienda']
distribucion = pd.pivot_table(data=data,index=['fami_estratovivienda'],aggfunc={'estrato':'count'}).reset_index()
data_all = estudiantes_2017_2019.sort_values(by='fami_estratovivienda')
data_all['estrato'] = data_all['fami_estratovivienda']
distribucion_all = pd.pivot_table(data=data_all,index=['fami_estratovivienda'],aggfunc={'estrato':'count'}).reset_index()
fig = plt.figure(figsize=(11,6))
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
ax1.set_title('Estrato de los colegios con admisión automática según criterio + confirmados', y=1, fontsize=15)
ax1.bar(x=distribucion['fami_estratovivienda'],
height=distribucion['estrato']/sum(distribucion['estrato']),
color='mistyrose')
ax1.set_ylabel('%Población', fontsize=14)
ax1.set_ylim(0,0.5)
ax2.set_title('Estrato estudiantes de los colegios a nivel nacional',y=1,fontsize=15)
ax2.bar(x=distribucion_all['fami_estratovivienda'],
height=distribucion_all['estrato']/sum(distribucion_all['estrato']),
color='mistyrose')
ax2.set_xlabel('Estrato de la familia del estudiante', fontsize=14,y=15)
ax2.set_ylabel('%Población', fontsize=14)
ax2.set_ylim(0,0.5)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.savefig(r'C:\Users\Usuario\Pictures\Saber11\estratp_auto.png')
plt.close()
# -
len(top_59)
# + code_folding=[0]
#Regiones_Confirmados
data = estudiantes_2017_2019[estudiantes_2017_2019.cole_cod_dane_establecimiento.isin(confirmados_el_uniandino+list(top_59))]
data = data.merge(colegios_2017_2019_,
how='left',
left_on=key_cole,
right_on=key_cole).sort_values(by='cole_depto_ubicacion')
data['depto'] = data['cole_depto_ubicacion']
distribucion = pd.pivot_table(data=data,index=['cole_depto_ubicacion'],aggfunc={'depto':'count'}).reset_index()
data_all = estudiantes_2017_2019
data_all = data_all.merge(colegios_2017_2019_,
how='left',
left_on=key_cole,
right_on=key_cole).sort_values(by='cole_depto_ubicacion')
data_all['depto'] = data_all['cole_depto_ubicacion']
distribucion_all = pd.pivot_table(data=data_all,index=['cole_depto_ubicacion'],aggfunc={'depto':'count'}).reset_index()
fig = plt.figure(figsize=(11,6))
ax1 = plt.subplot(211)
ax1.set_title('Departamento de los colegios con admisión automática según criterio + confirmados',y=1,fontsize=15)
ax1.bar(x=distribucion['cole_depto_ubicacion'],
height=distribucion['depto']/sum(distribucion['depto']),
color='mistyrose')
ax1.set_ylabel('%Población', fontsize=14)
ax1.set_ylim(0,1)
ax1.tick_params(axis='x', which='major', labelsize=9,rotation=25)
ax2 = plt.subplot(212)
ax2.set_title('Departamento de los colegios a nivel nacional', y=1, fontsize=15)
ax2.bar(x=distribucion_all['cole_depto_ubicacion'],
height=distribucion_all['depto']/sum(distribucion_all['depto']),
color='mistyrose')
ax2.set_xlabel('Departamento del colegio del estudiante', fontsize=14,y=15)
ax2.set_ylabel('%Población', fontsize=14)
ax2.tick_params(axis='x', which='major', labelsize=9,rotation=90)
ax2.set_ylim(0,0.3)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.6)
plt.savefig(r'C:\Users\Usuario\Pictures\Saber11\depto_auto.png')
plt.close()
| criterios_admision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Hosting Models on SageMaker and Automate the Workflow
#
# In this module you will:
# - Host a pretrained SKLearn model on SageMaker
# - Enable autoscaling on your endpoint
# - Monitor your model
# - Perform hyperparameter tuning
# - Redploy a new model to the endpoint
# - Automate the pipeline using the notebook runner toolkit
#
# Let's get started!
#
# ---
# ### 1. Access your model artifact
# First, you should see a `model.tar.gz` file in this repository. Let's get that in your S3 bucket.
# +
import sagemaker
import os
sess = sagemaker.Session()
# sagemaker will check to make sure this is a valid tar.gz object
local_model_file = 'model.tar.gz'
bucket = sess.default_bucket()
prefix = 'model-hosting'
s3_path = 's3://{}/{}/'.format(bucket, prefix)
msg = 'aws s3 cp {} {}'.format(local_model_file, s3_path)
os.system(msg)
# -
# ### 2. Load your pretrained model artifact into SageMaker
# Now, we know that this model was trained using the SKLearn container within SageMaker. All we need to do get this into a SageMaker-managed endpoint is set it up as a model. Let's do that here!
model_data = '{}{}'.format(s3_path, local_model_file)
print (model_data)
# +
# %%writefile train.py
import argparse
import pandas as pd
import numpy as np
import os
from sklearn.metrics import confusion_matrix
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
def model_fn(model_dir):
"""Deserialized and return fitted model
Note that this should have the same name as the serialized model in the main method
"""
regr = joblib.load(os.path.join(model_dir, "model.joblib"))
return regr
def predict_fn(input_data, model):
'''return the class and the probability of the class'''
prediction = model.predict(input_data)
pred_prob = model.predict_proba(input_data) #a numpy array
return np.array(pred_prob)
def parse_args():
# Hyperparameters are described here. In this simple example we are just including one hyperparameter.
parser = argparse.ArgumentParser()
parser.add_argument('--max_leaf_nodes', type=int, default=-1)
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test', type=str, default = os.environ['SM_CHANNEL_TEST'])
# hyperparameters for tuning
parser.add_argument('--batch-size', type=int, default=256)
parser.add_argument('--lr', type=float, default = 0.001)
args = parser.parse_args()
return args
def train(args):
# Take the set of files and read them all into a single pandas dataframe
train_data=pd.read_csv(os.path.join(args.train, 'train_set.csv'), engine='python')
# labels are in the first column
train_y = train_data['truth']
train_X = train_data[train_data.columns[1:len(train_data)]]
# Now use scikit-learn's MLP Classifier to train the model.
regr = MLPClassifier(random_state=1, max_iter=500, batch_size = args.batch_size, learning_rate_init = args.lr, solver='lbfgs').fit(train_X, train_y)
regr.get_params()
# Print the coefficients of the trained classifier, and save the coefficients
joblib.dump(regr, os.path.join(args.model_dir, "model.joblib"))
return regr
def accuracy(y_pred, y_true):
cm = confusion_matrix(y_pred, y_true)
diagonal_sum = cm.trace()
sum_of_all_elements = cm.sum()
rt = diagonal_sum / sum_of_all_elements
print ('Accuracy: {}'.format(rt))
return rt
def test(regr, args):
test_data=pd.read_csv(os.path.join(args.test, 'test_set.csv'), engine='python')
# labels are in the first column
y_true = test_data['truth']
test_x = test_data[test_data.columns[1:len(test_data)]]
y_pred = regr.predict(test_x)
accuracy(y_pred, y_true)
if __name__ == '__main__':
args = parse_args()
regr = train(args)
test(regr, args)
# +
from sagemaker.sklearn.model import SKLearnModel
role = sagemaker.get_execution_role()
model = SKLearnModel(model_data = model_data,
role = role,
framework_version = '0.20.0',
py_version='py3',
entry_point = 'train.py')
# -
# ### 3. Create an Endpoint on SageMaker
# Now, here comes the complex maneuver. Kidding, it's dirt simple. Let's turn your model into a RESTful API!
predictor = model.deploy(1, 'ml.m4.2xlarge')
# +
import sagemaker
from sagemaker.sklearn.model import SKLearnPredictor
sess = sagemaker.Session()
# optional. If your kernel times out, or your need to refresh, here's how you can easily point to an existing endpoint
endpoint_name = 'sagemaker-scikit-learn-2020-10-14-15-12-50-644'
predictor = SKLearnPredictor(endpoint_name = endpoint_name, sagemaker_session = sess)
# -
# Now let's get some predictions from that endpoint.
test_set = pd.read_csv('test_set.csv')
# +
y_true = test_set['truth']
test_set.drop('truth', inplace=True, axis=1)
# +
import pandas as pd
y_pred = pd.DataFrame(predictor.predict(test_set))
assert len(y_pred) == test_set.shape[0]
# -
# ### 4. Enable Autoscaling on your Endpoint
# For the sake of argument, let's say we're happy with this model and want to continue supporting it in prod. Our next step might be to enable autoscaling. Let's do that right here.
# +
import boto3
def get_resource_id(endpoint_name):
client = boto3.client('sagemaker')
response = client.describe_endpoint(
EndpointName=endpoint_name)
variant_name = response['ProductionVariants'][0]['VariantName']
resource_id = 'endpoint/{}/variant/{}'.format(endpoint_name, variant_name)
return resource_id
resource_id = get_resource_id(endpoint_name)
# +
import boto3
role = sagemaker.get_execution_role()
def set_scaling_policy(resource_id, min_capacity = 1, max_capacity = 8, role = role):
scaling_client = boto3.client('application-autoscaling')
response = scaling_client.register_scalable_target(
ServiceNamespace='sagemaker',
ResourceId=resource_id,
ScalableDimension='sagemaker:variant:DesiredInstanceCount',
MinCapacity=min_capacity,
MaxCapacity=max_capacity,
RoleARN=role)
return response
res = set_scaling_policy(resource_id)
# -
# ### 5. Enable Model Monitor on your Endpoint
# Now that you have a model up and running, with autoscaling enabled, let's set up model monitor on that endpoint.
# +
import sagemaker
import os
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'model-hosting'
s3_capture_upload_path = 's3://{}/{}/model-monitor'.format(bucket, prefix)
print ('about to set up monitoring for endpoint named {}'.format(endpoint_name))
# -
# Now, let's set up a data capture config.
# +
from sagemaker.model_monitor import DataCaptureConfig
data_capture_config = DataCaptureConfig(
enable_capture = True,
sampling_percentage=50,
destination_s3_uri=s3_capture_upload_path,
capture_options=["REQUEST", "RESPONSE"],
csv_content_types=["text/csv"],
json_content_types=["application/json"])
# Now it is time to apply the new configuration and wait for it to be applied
predictor.update_data_capture_config(data_capture_config=data_capture_config)
sess.wait_for_endpoint(endpoint=endpoint_name)
# -
# Next step here is to pass in our training data, and ask SageMaker to learn baseline thresholds for all of our features.
#
# First, let's make sure the data we used to train our model is stored in S3.
msg = 'aws s3 cp train_set.csv s3://{}/{}/train/'.format(bucket, prefix)
os.system(msg)
# +
# todo - show them how to get access to this training data
s3_training_data_path = 's3://{}/{}/train/train_set.csv'.format(bucket, prefix)
s3_baseline_results = 's3://{}/{}/model-monitor/baseline-results'.format(bucket, prefix)
# +
from sagemaker.model_monitor import DefaultModelMonitor
from sagemaker.model_monitor.dataset_format import DatasetFormat
my_default_monitor = DefaultModelMonitor(
role=role,
instance_count=1,
instance_type='ml.m5.xlarge',
volume_size_in_gb=20,
max_runtime_in_seconds=3600,
)
my_default_monitor.suggest_baseline(
baseline_dataset=s3_training_data_path,
# change header to false if not included
dataset_format=DatasetFormat.csv(header=False),
output_s3_uri=s3_baseline_results,
wait=True
)
# -
# If you like, you can download the results from S3 and analyze. In the interest of time, we'll move on to setting up the monitoring schedule.
# +
from sagemaker.model_monitor import CronExpressionGenerator
from time import gmtime, strftime
mon_schedule_name = 'bi-hourly'
s3_report_path = 's3://{}/{}/model-monitor/monitoring-job-results'.format(bucket, prefix)
my_default_monitor.create_monitoring_schedule(
monitor_schedule_name=mon_schedule_name,
endpoint_input=endpoint_name,
output_s3_uri=s3_report_path,
statistics=my_default_monitor.baseline_statistics(),
constraints=my_default_monitor.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.daily(),
enable_cloudwatch_metrics=True)
# -
# ---
# # Tune your model and re-deploy onto the SageMaker Endpoint
#
# Alright, we made it pretty far already! Now that we have monitoring enabled on this endpoint, let's imagine that something goes awry. We realize that we need a new model hosted on this RESTful API. How are we going to do that?
# First, let's go about getting a new model. Given that the dataset here is pretty small, less than even 500 rows on the training set, why not try out AutoGluon? AutoGluon is a competitive choice here because it will actually augment our data for us. Said another way, Autogluon will make our original dataset larger by using Transformers and masking columns. Pretty cool!
# !mkdir src
# +
# %%writefile src/requirements.txt
autogluon
sagemaker
awscli
boto3
PrettyTable
bokeh
numpy==1.16.1
matplotlib
sagemaker-experiments
# +
# %%writefile src/train.py
import ast
import argparse
import logging
import warnings
import os
import json
import glob
import subprocess
import sys
import boto3
import pickle
import pandas as pd
from collections import Counter
from timeit import default_timer as timer
import time
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
sys.path.insert(0, 'package')
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from prettytable import PrettyTable
import autogluon as ag
from autogluon import TabularPrediction as task
from autogluon.task.tabular_prediction import TabularDataset
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
def __load_input_data(path: str) -> TabularDataset:
"""
Load training data as dataframe
:param path:
:return: DataFrame
"""
input_data_files = os.listdir(path)
try:
input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files]
return task.Dataset(df=pd.concat(input_dfs))
except:
print(f'No csv data in {path}!')
return None
def train(args):
is_distributed = len(args.hosts) > 1
host_rank = args.hosts.index(args.current_host)
dist_ip_addrs = args.hosts
dist_ip_addrs.pop(host_rank)
ngpus_per_trial = 1 if args.num_gpus > 0 else 0
# load training and validation data
print(f'Train files: {os.listdir(args.train)}')
train_data = __load_input_data(args.train)
print(f'Label counts: {dict(Counter(train_data[args.label]))}')
predictor = task.fit(
train_data=train_data,
label=args.label,
output_directory=args.model_dir,
problem_type=args.problem_type,
eval_metric=args.eval_metric,
stopping_metric=args.stopping_metric,
auto_stack=args.auto_stack, # default: False
hyperparameter_tune=args.hyperparameter_tune, # default: False
feature_prune=args.feature_prune, # default: False
holdout_frac=args.holdout_frac, # default: None
num_bagging_folds=args.num_bagging_folds, # default: 0
num_bagging_sets=args.num_bagging_sets, # default: None
stack_ensemble_levels=args.stack_ensemble_levels, # default: 0
cache_data=args.cache_data,
time_limits=args.time_limits,
num_trials=args.num_trials, # default: None
search_strategy=args.search_strategy, # default: 'random'
search_options=args.search_options,
visualizer=args.visualizer,
verbosity=args.verbosity
)
# Results summary
predictor.fit_summary(verbosity=1)
# Leaderboard on optional test data
if args.test:
print(f'Test files: {os.listdir(args.test)}')
test_data = __load_input_data(args.test)
print('Running model on test data and getting Leaderboard...')
leaderboard = predictor.leaderboard(dataset=test_data, silent=True)
def format_for_print(df):
table = PrettyTable(list(df.columns))
for row in df.itertuples():
table.add_row(row[1:])
return str(table)
print(format_for_print(leaderboard), end='\n\n')
# Files summary
print(f'Model export summary:')
print(f"/opt/ml/model/: {os.listdir('/opt/ml/model/')}")
models_contents = os.listdir('/opt/ml/model/models')
print(f"/opt/ml/model/models: {models_contents}")
print(f"/opt/ml/model directory size: {du('/opt/ml/model/')}\n")
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1')
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.register('type','bool',str2bool) # add type keyword to registries
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR']) # /opt/ml/model
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--test', type=str, default='') # /opt/ml/input/data/test
parser.add_argument('--label', type=str, default='truth',
help="Name of the column that contains the target variable to predict.")
parser.add_argument('--problem_type', type=str, default=None,
help=("Type of prediction problem, i.e. is this a binary/multiclass classification or "
"regression problem options: 'binary', 'multiclass', 'regression'). "
"If `problem_type = None`, the prediction problem type is inferred based "
"on the label-values in provided dataset."))
parser.add_argument('--eval_metric', type=str, default=None,
help=("Metric by which predictions will be ultimately evaluated on test data."
"AutoGluon tunes factors such as hyperparameters, early-stopping, ensemble-weights, etc. "
"in order to improve this metric on validation data. "
"If `eval_metric = None`, it is automatically chosen based on `problem_type`. "
"Defaults to 'accuracy' for binary and multiclass classification and "
"'root_mean_squared_error' for regression. "
"Otherwise, options for classification: [ "
" 'accuracy', 'balanced_accuracy', 'f1', 'f1_macro', 'f1_micro', 'f1_weighted', "
" 'roc_auc', 'average_precision', 'precision', 'precision_macro', 'precision_micro', 'precision_weighted', "
" 'recall', 'recall_macro', 'recall_micro', 'recall_weighted', 'log_loss', 'pac_score']. "
"Options for regression: ['root_mean_squared_error', 'mean_squared_error', "
"'mean_absolute_error', 'median_absolute_error', 'r2']. "
"For more information on these options, see `sklearn.metrics`: "
"https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics "
"You can also pass your own evaluation function here as long as it follows formatting of the functions "
"defined in `autogluon/utils/tabular/metrics/`. "))
parser.add_argument('--stopping_metric', type=str, default=None,
help=("Metric which models use to early stop to avoid overfitting. "
"`stopping_metric` is not used by weighted ensembles, instead weighted ensembles maximize `eval_metric`. "
"Defaults to `eval_metric` value except when `eval_metric='roc_auc'`, where it defaults to `log_loss`."))
parser.add_argument('--auto_stack', type='bool', default=False,
help=("Whether to have AutoGluon automatically attempt to select optimal "
"num_bagging_folds and stack_ensemble_levels based on data properties. "
"Note: Overrides num_bagging_folds and stack_ensemble_levels values. "
"Note: This can increase training time by up to 20x, but can produce much better results. "
"Note: This can increase inference time by up to 20x."))
parser.add_argument('--hyperparameter_tune', type='bool', default=False,
help=("Whether to tune hyperparameters or just use fixed hyperparameter values "
"for each model. Setting as True will increase `fit()` runtimes."))
parser.add_argument('--feature_prune', type='bool', default=False,
help="Whether or not to perform feature selection.")
parser.add_argument('--holdout_frac', type=float, default=None,
help=("Fraction of train_data to holdout as tuning data for optimizing hyperparameters "
"(ignored unless `tuning_data = None`, ignored if `num_bagging_folds != 0`). "
"Default value is selected based on the number of rows in the training data. "
"Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows. "
"Default value is doubled if `hyperparameter_tune = True`, up to a maximum of 0.2. "
"Disabled if `num_bagging_folds >= 2`."))
parser.add_argument('--num_bagging_folds', type=int, default=0,
help=("Number of folds used for bagging of models. When `num_bagging_folds = k`, "
"training time is roughly increased by a factor of `k` (set = 0 to disable bagging). "
"Disabled by default, but we recommend values between 5-10 to maximize predictive performance. "
"Increasing num_bagging_folds will result in models with lower bias but that are more prone to overfitting. "
"Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting. "
"To further improve predictions, avoid increasing num_bagging_folds much beyond 10 "
"and instead increase num_bagging_sets. "))
parser.add_argument('--num_bagging_sets', type=int, default=None,
help=("Number of repeats of kfold bagging to perform (values must be >= 1). "
"Total number of models trained during bagging = num_bagging_folds * num_bagging_sets. "
"Defaults to 1 if time_limits is not specified, otherwise 20 "
"(always disabled if num_bagging_folds is not specified). "
"Values greater than 1 will result in superior predictive performance, "
"especially on smaller problems and with stacking enabled. "
"Increasing num_bagged_sets reduces the bagged aggregated variance without "
"increasing the amount each model is overfit."))
parser.add_argument('--stack_ensemble_levels', type=int, default=0,
help=("Number of stacking levels to use in stack ensemble. "
"Roughly increases model training time by factor of `stack_ensemble_levels+1` "
"(set = 0 to disable stack ensembling). "
"Disabled by default, but we recommend values between 1-3 to maximize predictive performance. "
"To prevent overfitting, this argument is ignored unless you have also set `num_bagging_folds >= 2`."))
parser.add_argument('--hyperparameters', type=lambda s: ast.literal_eval(s), default=None,
help="Refer to docs: https://autogluon.mxnet.io/api/autogluon.task.html")
parser.add_argument('--cache_data', type='bool', default=True,
help=("Whether the predictor returned by this `fit()` call should be able to be further trained "
"via another future `fit()` call. "
"When enabled, the training and validation data are saved to disk for future reuse."))
parser.add_argument('--time_limits', type=int, default=None,
help=("Approximately how long `fit()` should run for (wallclock time in seconds)."
"If not specified, `fit()` will run until all models have completed training, "
"but will not repeatedly bag models unless `num_bagging_sets` is specified."))
parser.add_argument('--num_trials', type=int, default=None,
help=("Maximal number of different hyperparameter settings of each "
"model type to evaluate during HPO. (only matters if "
"hyperparameter_tune = True). If both `time_limits` and "
"`num_trials` are specified, `time_limits` takes precedent."))
parser.add_argument('--search_strategy', type=str, default='random',
help=("Which hyperparameter search algorithm to use. "
"Options include: 'random' (random search), 'skopt' "
"(SKopt Bayesian optimization), 'grid' (grid search), "
"'hyperband' (Hyperband), 'rl' (reinforcement learner)"))
parser.add_argument('--search_options', type=lambda s: ast.literal_eval(s), default=None,
help="Auxiliary keyword arguments to pass to the searcher that performs hyperparameter optimization.")
parser.add_argument('--nthreads_per_trial', type=int, default=None,
help="How many CPUs to use in each training run of an individual model. This is automatically determined by AutoGluon when left as None (based on available compute).")
parser.add_argument('--ngpus_per_trial', type=int, default=None,
help="How many GPUs to use in each trial (ie. single training run of a model). This is automatically determined by AutoGluon when left as None.")
parser.add_argument('--dist_ip_addrs', type=list, default=None,
help="List of IP addresses corresponding to remote workers, in order to leverage distributed computation.")
parser.add_argument('--visualizer', type=str, default='none',
help=("How to visualize the neural network training progress during `fit()`. "
"Options: ['mxboard', 'tensorboard', 'none']."))
parser.add_argument('--verbosity', type=int, default=2,
help=("Verbosity levels range from 0 to 4 and control how much information is printed during fit(). "
"Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings). "
"If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`, "
"where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, "
"opposite of verbosity levels"))
parser.add_argument('--debug', type='bool', default=False,
help=("Whether to set logging level to DEBUG"))
parser.add_argument('--feature_importance', type='bool', default=True)
return parser.parse_args()
def set_experiment_config(experiment_basename = None):
'''
Optionally takes an base name for the experiment. Has a hard dependency on boto3 installation.
Creates a new experiment using the basename, otherwise simply uses autogluon as basename.
May run into issues on Experiments' requirements for basename config downstream.
'''
now = int(time.time())
if experiment_basename:
experiment_name = '{}-autogluon-{}'.format(experiment_basename, now)
else:
experiment_name = 'autogluon-{}'.format(now)
try:
client = boto3.Session().client('sagemaker')
except:
print ('You need to install boto3 to create an experiment. Try pip install --upgrade boto3')
return ''
try:
Experiment.create(experiment_name=experiment_name,
description="Running AutoGluon Tabular with SageMaker Experiments",
sagemaker_boto_client=client)
print ('Created an experiment named {}, you should be able to see this in SageMaker Studio right now.'.format(experiment_name))
except:
print ('Could not create the experiment. Is your basename properly configured? Also try installing the sagemaker experiments SDK with pip install sagemaker-experiments.')
return ''
return experiment_name
if __name__ == "__main__":
start = timer()
args = parse_args()
# Print SageMaker args
print('\n====== args ======')
for k,v in vars(args).items():
print(f'{k}, type: {type(v)}, value: {v}')
print()
train()
# Package inference code with model export
subprocess.call('mkdir /opt/ml/model/code'.split())
subprocess.call('cp /opt/ml/code/inference.py /opt/ml/model/code/'.split())
elapsed_time = round(timer()-start,3)
print(f'Elapsed time: {elapsed_time} seconds')
print('===== Training Completed =====')
# +
from sagemaker.mxnet.estimator import MXNet
from sagemaker import get_execution_role
role = get_execution_role()
estimator = MXNet(source_dir = 'src',
entry_point = 'train.py',
role=role,
framework_version = '1.7.0',
py_version = 'py3',
instance_count=1,
instance_type='ml.m5.2xlarge',
volume_size=100)
s3_path = 's3://sagemaker-us-east-1-181880743555/model-hosting/test_set.csv'
estimator.fit(s3_path, wait=False)
# +
# from sagemaker.sklearn.estimator import SKLearn
# from sagemaker import get_execution_role
# script_path = 'train.py'
# # first, let's get the estimator defined
# est = SKLearn(entry_point=script_path,
# instance_type="ml.c4.xlarge",
# instance_count = 1,
# role=role,
# sagemaker_session=sess,
# py_version = 'py3',
# framework_version = '0.20.0')
# # then, let's set up the tuning framework
# from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
# hyperparameter_ranges = {'lr': ContinuousParameter(0.00001, 0.001),
# 'batch-size': IntegerParameter(25, 300)}
# +
# objective_metric_name = 'Accuracy'
# objective_type = 'Maximize'
# metric_definitions = [{'Name': 'Accuracy',
# 'Regex': 'Accuracy: ([0-9\\.]+)'}]
# +
# tuner = HyperparameterTuner(est,
# objective_metric_name,
# hyperparameter_ranges,
# metric_definitions,
# max_jobs=20,
# max_parallel_jobs=3,
# objective_type=objective_type)
# +
# msg = 'aws s3 cp test_set.csv s3://{}/{}/ && aws s3 cp train_set.csv s3://{}/{}/'.format(bucket, prefix, bucket, prefix)
# os.system(msg)
# +
# # may complain about not wanting headers
# inputs = {'train': 's3://{}/{}/train_set.csv'.format(bucket, prefix),
# 'test': 's3://{}/{}/test_set.csv'.format(bucket, prefix)}
# +
# tuner.fit(inputs)
# -
# ### Redeploy to existing SageMaker Endpoint
# +
from sagemaker.tuner import HyperparameterTuner
job_name = 'sagemaker-scikit-lea-201014-1830'
tuner = HyperparameterTuner.attach(job_name)
# -
best_estimator = tuner.best_estimator()
model = best_estimator.create_model()
model_name = model.name
# +
import boto3
import random
import time
import datetime
def create_model(model, now):
sm_client = boto3.client('sagemaker')
x = random.randint(1, 100)
model_name = '{}-{}'.format(model.name, now)
response = sm_client.create_model(ModelName=model_name,
PrimaryContainer={'ContainerHostname': 'string','Image': model.image_uri, 'ModelDataUrl': model.model_data},
ExecutionRoleArn= 'arn:aws:iam::181880743555:role/service-role/AmazonSageMaker-ExecutionRole-20200929T125134')
return response
def get_endpoint_config(model_name, now):
sm_client = boto3.client('sagemaker')
endpoint_config_name = 'ec-{}-{}'.format(model_name, now)
response = sm_client.create_endpoint_config(EndpointConfigName= endpoint_config_name,
ProductionVariants=[{'VariantName': 'v-{}'.format(model_name),
'ModelName': model_name,
'InitialInstanceCount': 1,
'InstanceType':'ml.m5.large'}])
return endpoint_config_name
def update_endpoint(model_name, endpoint_name, now):
sm_client = boto3.client('sagemaker')
endpoint_config = get_endpoint_config(model_name, now)
# deregister a scaling policy
resource_id = get_resource_id(endpoint_name)
client = boto3.client('application-autoscaling')
try:
response = client.deregister_scalable_target(ServiceNamespace='sagemaker',
ResourceId=resource_id,
ScalableDimension='sagemaker:variant:DesiredInstanceCount')
except:
print ('no autoscaling policy to deregister, continuing')
# get monitoring schedules
try:
response = sm_client.list_monitoring_schedules(EndpointName=endpoint_name,
MaxResults=10,
StatusEquals='Scheduled')
# delete monitoring schedules
for each in response['MonitoringScheduleSummaries']:
name = each['MonitoringScheduleName']
response = sm_client.delete_monitoring_schedule(MonitoringScheduleName=name)
except:
print ('already deleted the monitoring schedules')
response = sm_client.update_endpoint(EndpointName=endpoint_name,
EndpointConfigName=endpoint_config)
return response
now = str(datetime.datetime.now()).split('.')[-1]
endpoint_name = 'sagemaker-scikit-learn-2020-10-14-15-12-50-644'
create_model(model, now)
update_endpoint(model_name, endpoint_name, now)
# -
# ---
# # Automate with Notebook Runner
# Now we're able to monitor new endpoints, we want the ability to automate this whole flow so that we can do it rapidly. As it so happens, a simple and fast way of doing that is using SageMaker processing jobs, CloudWatch, and Lambda. Luckily we can import all of the infrastructure we need using a simple toolkit, which we'll step through here.
#
# GitHub notes are right here: https://github.com/aws-samples/sagemaker-run-notebook
# +
# todo - make sure they have the right execution role here, add cfn all access, then a trust relationship, then inlines to allow create stack, plus codebuild create project nad start build
# +
# # !wget https://github.com/aws-samples/sagemaker-run-notebook/releases/download/v0.15.0/sagemaker_run_notebook-0.15.0.tar.gz
# +
# # !pip install sagemaker_run_notebook-0.15.0.tar.gz
# +
# # !run-notebook create-infrastructure --update
# -
# %%writefile requirements.txt
awscli
boto3
sagemaker
pandas
sklearn
# +
# # !run-notebook create-container --requirements requirements.txt
# +
# # !wget https://github.com/aws-samples/sagemaker-run-notebook/releases/download/v0.15.0/install-run-notebook.sh
# -
# Next, __you need to open a system terminal on Studio, cd into the directory where we just downloaded `install-run-notebook.sh`, and run the command `bash install-run-notebook.sh`.__ This will run for a few minutes, then prompt you to refresh your web browser. Do that, and you'll see a new Jupyter Widget!
# After restarting your Studio page, click on the spaceship widget on the top lefthand side of your Stuio domain view. Make sure you're actually looking at an ipython notebook while you do this.
#
# Using the widget is super simple. Paste in your execution role, which you can find by running `sagemaker.get_execution_role()` locally. Then paste in your ECR image repository, which you can find by opening up the ECR page in the AWS console. It should default to `notebook-runner`, so you can just paste that in directly.
#
# Then click the big blue `run now` button, and __this entire notebook is going to run on a SageMaker processing job.__
# Before you do that, you'll want to comment-out those last few cells you ran to install this toolkit and get the infrastructure up and running.
# If you want, you can parameterize this entire notebook using Papermill. Read more about how to do that with the following resources:
# - Blog post: https://aws.amazon.com/blogs/machine-learning/scheduling-jupyter-notebooks-on-sagemaker-ephemeral-instances/
# - GitHub repository: https://github.com/aws-samples/sagemaker-run-notebook
| Starter Notebooks/MLOps and Hosting/Hosting Models on SageMaker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modified Sentiment Analysis Code
# +
import random
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords
from nltk import FreqDist
from nltk import NaiveBayesClassifier
from nltk.classify import accuracy
import builtins
try:
profile = builtins.profile
except AttributeError:
def profile(func):
return func
@profile
def label_docs():
docs = [(list(movie_reviews.words(fid)), cat)
for cat in movie_reviews.categories()
for fid in movie_reviews.fileids(cat)]
random.seed(42)
random.shuffle(docs)
return docs
@profile
def isStopWord(word):
return word in sw or len(word) == 1
@profile
def filter_corpus():
review_words = movie_reviews.words()
print("# Review Words", len(review_words))
res = [w.lower() for w in review_words if not isStopWord(w.lower())]
print("# After filter", len(res))
return res
@profile
def select_word_features(corpus):
words = FreqDist(corpus)
N = int(.02 * len(words.keys()))
return list(words.keys())[:N]
@profile
def doc_features(doc):
doc_words = FreqDist(w for w in doc if not isStopWord(w))
features = {}
for word in word_features:
features['count (%s)' % word] = (doc_words.get(word, 0))
return features
@profile
def make_features(docs):
return [(doc_features(d), c) for (d,c) in docs]
@profile
def split_data(sets):
return sets[200:], sets[:200]
if __name__ == "__main__":
labeled_docs = label_docs()
sw = set(stopwords.words('english'))
filtered = filter_corpus()
word_features = select_word_features(filtered)
featuresets = make_features(labeled_docs)
train_set, test_set = split_data(featuresets)
classifier = NaiveBayesClassifier.train(train_set)
print("Accuracy", accuracy(classifier, test_set))
print(classifier.show_most_informative_features())
# -
| tests/practice/pda_ch-12_Modified Sentiment Analysis Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Resit Assignment part A
#
# **Deadline: Friday, November 13, 2020 before 17:00**
#
# - Please name your files:
# * ASSIGNMENT-RESIT-A.ipynb
# * utils.py (from part B)
# * raw_text_to_coll.py (from part B)
#
# Please name your zip file as follows: RESIT-ASSIGNMENT.zip and upload it via Canvas (Resit Assignment).
# - Please submit your assignment on Canvas: Resit Assignment
# - If you have **questions** about this topic
# - [in the week of the 2nd of November] please contact **Pia (<EMAIL>)**
# - [in the week of the 9th of November] please contact **Marten (<EMAIL>)**
#
# Questions and answers will be collected in [this Q&A document](https://docs.google.com/document/d/1Yf2lE6HdApz4wSgNpxWL_nnVcXED1YNW8Rg__wCKcvs/edit?usp=sharing),
# so please check if your question has already been answered.
#
# All of the covered chapters are important to this assignment. However, please pay special attention to:
# - Chapter 10 - Dictionaries
# - Chapter 11 - Functions and scope
# * Chapter 14 - Reading and writing text files
# * Chapter 15 - Off to analyzing text
# - Chapter 17 - Data Formats II (JSON)
# - Chapter 19 - More about Natural Language Processing Tools (spaCy)
#
#
# In this assignment:
# * we are going to process the texts in ../Data/Dreams/*txt
# * for each file, we are going to determine:
# * the number of characters
# * the number of sentences
# * the number of words
# * the longest word
# * the longest sentence
# ## Note
# This notebook should be placed in the same folder as the other Assignments!
# ## Loading spaCy
# Please make sure that spaCy is installed on your computer
import spacy
# Please make sure you can load the English spaCy model:
nlp = spacy.load('en_core_web_sm')
# ## Exercise 1: get paths
# Define a function called **get_paths** that has the following parameter:
# * **input_folder**: a string
#
# The function:
# * stores all paths to .txt files in the *input_folder* in a list
# * returns a list of strings, i.e., each string is a file path
# +
# your code here
# -
# Please test your function using the following function call
paths = get_paths(input_folder='../Data/Dreams')
print(paths)
# ## Exercise 2: load text
# Define a function called **load_text** that has the following parameter:
# * **txt_path**: a string
#
#
# The function:
# * opens the **txt_path** for reading and loads the contents of the file as a string
# * returns a string, i.e., the content of the file
# +
# your code here
# -
# ## Exercise 3: return the longest
# Define a function called **return_the_longest** that has the following parameter:
# * **list_of_strings**: a list of strings
#
#
# The function:
# * returns the string with the highest number of characters. If multiple strings have the same length, return one of them.
def return_the_longest(list_of_strings):
"""
given a list of strings, return the longest string
if multiple strings have the same length, return one of them.
:param str list_of_strings: a list of strings
"""
# Please test you function by running the following cell:
# +
a_list_of_strings = ["this", "is", "a", "sentence"]
longest_string = return_the_longest(a_list_of_strings)
error_message = f'the longest string should be "sentence", you provided {longest_string}'
assert longest_string == 'sentence', error_message
# -
# ## Exercise 4: extract statistics
# We are going to use spaCy to extract statistics from Vickie's dreams! Here are a few tips below about how to use spaCy:
# #### tip 1: process text with spaCy
a_text = 'this is one sentence. this is another.'
doc = nlp(a_text)
# #### tip 2: the number of characters is the length of the document
num_chars = len(doc.text)
print(num_chars)
# #### tip 3: loop through the sentences of a document
for sent in doc.sents:
sent = sent.text
print(sent)
# #### tip 4: loop through the words of a document
for token in doc:
word = token.text
print(word)
# Define a function called **extract_statistics** that has the following parameters:
# * **nlp**: the result of calling spacy.load('en_core_web_sm')
# * **txt_path**: path to a txt file, e.g., '../Data/Dreams/vickie8.txt'
#
# The function:
# * loads the content of the file using the function **load_text**
# * processes the content of the file using **nlp(content)** (see tip 1 of this exercise)
#
# The function returns a dictionary with five keys:
# * **num_sents**: the number of sentences in the document
# * **num_chars**: the number of characters in the document
# * **num_tokens**: the number of words in the document
# * **longest_sent**: the longest sentence in the document
# * Please make a list with all the sentences and call the function **return_the_longest** to retrieve the longest sentence
# * **longest_word**: the longest word in the document
# * Please make a list with all the words and call the function **return_the_longest** to retrieve the longest word
#
# Test the function on one of the files from Vickie's dreams.
def extract_statistics(nlp, txt_path):
"""
given a txt_path
-use the load_text function to load the text
-process the text using spaCy
:param nlp: loaded spaCy model (result of calling spacy.load('en_core_web_sm'))
:param str txt_path: path to txt file
:rtype: dict
:return: a dictionary with the following keys:
-"num_sents" : the number of sentences
-"num_chars" : the number of characters
-"num_tokens" : the number of words
-"longest_sent" : the longest sentence
-"longest_word" : the longest word
"""
stats = extract_statistics(nlp, txt_path=paths[0])
stats
# ## Exercise 5: process all txt files
# #### tip 1: how to obtain the basename of a file
import os
basename = os.path.basename('../Data/Dreams/vickie1.txt')[:-4]
print(basename)
# Define a function called **process_all_txt_files** that has the following parameters:
# * **nlp**: the result of calling spacy.load('en_core_web_sm')
# * **input_folder**: a string (we will test it using '../Data/Dreams')
#
# The function:
# * obtains a list of txt paths using the function **get_paths** with **input_folder** as an argument
# * loops through the txt paths one by one
# * for each iteration, the **extract_statistics** function is called with **txt_path** as an argument
#
# The function returns a dictionary:
# * the keys are the basenames of the txt files (see tip 1 of this exercise)
# * the values are the output of calling the function **extract_statistics** for a specific file
#
# Test your function using '../Data/Dreams' as a value for the parameter *input_folder*.
def process_all_txt_files(nlp, input_folder):
"""
given a list of txt_paths
-process each with the extract_statistics function
:param nlp: loaded spaCy model (result of calling spacy.load('en_core_web_sm'))
:param list txt_paths: list of paths to txt files
:rtype: dict
:return: dictionary mapping:
-basename -> output of extract_statistics function
"""
basename_to_stats = process_all_txt_files(nlp, input_folder='../Data/Dreams')
basename_to_stats
# ## Exercise 6: write to disk
# In this exercise, you are going to write our results to our computer.
# Please loop through **basename_to_stats** and create one JSON file for each dream.
#
# * the path is f'{basename}.json', i.e., 'vickie1.json', 'vickie2.json', etc. (please write them to the same folder as this notebook)
# * the content of each JSON file is each value of **basename_to_stats**
import json
for basename, stats in basename_to_stats.items():
pass
| Assignments/ASSIGNMENT-RESIT-A.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 实现多元线性回归模型
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# +
boston = datasets.load_boston()
X = boston.data
y = boston.target
X = X[y < 50.0]
y = y[y < 50.0]
# -
X.shape
# %run ../playML/model_selection.py
X_train, X_test, y_train, y_test = train_test_split(X, y, seed=666)
X_train
X_test
y_train
y_test
# %run ../playML/LinearRegression.py
reg = LinearRegression()
reg.fit_normal(X_train, y_train)
reg.coef_
reg.interception_
reg.score(X_test, y_test)
| data-science/scikit-learn/03/03-Linear-Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import astropy.constants as const
import os, sys, time
import astropy.units as u
from fractions import Fraction
import astropy.constants as const
import astropy.units as u
from astropy.cosmology import z_at_value
from astropy.cosmology import WMAP9 as cosmo
from fractions import Fraction
import hasasia.sensitivity as hassens
import hasasia.sim as hassim
import hasasia.skymap as hassky
# -
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['Times New Roman'],'size':14})#,'weight':'bold'})
# +
current_path = os.getcwd()
splt_path = current_path.split("/")
top_path_idx = splt_path.index('DetectorDesignSensitivities')
top_directory = "/".join(splt_path[0:top_path_idx+1])
load_directory = top_directory + '/LoadFiles/InstrumentFiles/'
sys.path.insert(0,top_directory + '/Functions')
import StrainandNoise_v4 as SnN
import SNRcalc_v4 as SnC
import HorizonDistance as HD
# -
def t_of_f(M,q,z,f):
m_conv = const.G/const.c**3 #Converts M = [M] to M = [sec]
eta = q/(1+q)**2
M_time = M.to('kg')*m_conv
M_chirp = eta**(3/5)*M_time
return 5*M_chirp**(-5/3)*(8*np.pi*f)**(-8/3)
def f_of_t(M,q,z,t):
m_conv = const.G/const.c**3 #Converts M = [M] to M = [sec]
eta = q/(1+q)**2
M_time = M.to('kg')*m_conv
M_chirp = eta**(3/5)*M_time
return 1./8./np.pi/M_chirp*(5*M_chirp/t.to('s'))**(3./8.)
def f_evolve(M,q,z,t_init_source,T_obs):
m_conv = const.G/const.c**3 #Converts M = [M] to M = [sec]
eta = q/(1+q)**2
M_time = M.to('kg')*m_conv
M_chirp_source = eta**(3/5)*M_time
#Assumes t_init is in source frame
f_init_source = 1./8./np.pi/M_chirp_source*\
(5*M_chirp_source/t_init_source.to('s'))**(3./8.)
print('f_init_source: ',f_init_source)
f_init_inst = f_init_source/(1+z)
print('f_init_inst: ',f_init_inst)
T_obs_source = T_obs.to('s')/(1+z)
print('T_obs_source: ',T_obs_source.to('yr'))
f_T_obs_source = 1./8./np.pi/M_chirp_source*\
(5*M_chirp_source/(t_init_source.to('s')-T_obs_source))**(3./8.)
print('f_end_source: ',f_T_obs_source)
f_T_obs_inst = f_T_obs_source/(1+z)
print('f_T_obs_inst: ',f_T_obs_inst)
delf_source = f_T_obs_source-f_init_source
print('delf_source: ',delf_source)
delf = 1./8./np.pi/M_chirp_source*(5*M_chirp_source/t_init_source.to('s'))**(3./8.)*(3*T_obs_source/8/t_init_source.to('s'))
print('delf_Jeff: ',delf)
delf_obs = f_T_obs_inst - f_init_inst
print('delf_obs: ', delf_obs)
return [f_init_source,f_init_inst,f_T_obs_source,f_T_obs_inst,delf_source,delf_obs]
# +
L = 2.5*u.Gm #armlength in Gm
L = L.to('m')
LISA_T_obs = 4*u.yr
f_acc_break_low = .4*u.mHz.to('Hz')*u.Hz
f_acc_break_high = 8.*u.mHz.to('Hz')*u.Hz
f_IMS_break = 2.*u.mHz.to('Hz')*u.Hz
A_acc = 3e-15*u.m/u.s/u.s
A_IMS = 10e-12*u.m
Background = False
ESA_LISA = SnN.SpaceBased('ESA_LISA',\
LISA_T_obs,L,A_acc,f_acc_break_low,f_acc_break_high,A_IMS,f_IMS_break,\
Background=Background)
# +
m1 = 36*u.M_sun
m2 = 29*u.M_sun
q = m1/m2
M = m1+m2
chi1=0.0
chi2=0.0
inc = 0.0
DL = 411.5*u.Mpc
z = z_at_value(cosmo.luminosity_distance,DL)
source = SnN.BlackHoleBinary(M,q,chi1,chi2,z,inc)
T_obs = 4*u.yr
t_init = 30*u.yr
f_init = 0.018*u.Hz
'''print('t_merge: ',t_of_f(M,q,z,f_init).to('yr'))
print('f_rest: ',f_of_t(M,q,z,source.T_obs/(1+z)))
print(t_of_f(2e9*u.M_sun,1.0,0.0,8e-9*u.Hz).to('yr'))
print(f_of_t(1e6*u.M_sun,1.0,3.0,5.*u.yr))'''
f_init = f_of_t(M,q,z,t_init)/(1+z)
print('f_init: ', f_init)
print('t_init: ', t_of_f(M,q,z,f_init*(1+z)).to('yr'))
# -
#Vars = [M,q,chi1,chi2,z]
M = [1e6,65.0,1e4,1e5]*u.M_sun
q = [1.0,1.0,1.0,1.0]
x1 = [0.95,0.0,-0.95,0.0]
x2 = [0.95,0.0,-0.95,0.0]
z = [3.0,0.093,20.0,1.0]
inc = 0.0 #Doesn't really work...
f_init = ESA_LISA.f_opt
T_obs = ESA_LISA.T_obs
t_init_source = []
f_init_source = []
f_init_inst = []
f_T_obs_source = []
f_T_obs_inst = []
delf_source = []
delf_obs = []
for indx in range(len(M)):
#random_t_init_source = np.random.uniform(0,100)*u.yr
random_t_init_source = 4*u.yr
tmp = f_evolve(M[indx],q[indx],z[indx],random_t_init_source,T_obs)
t_init_source.append(random_t_init_source)
f_init_source.append(tmp[0])
f_init_inst.append(tmp[1])
f_T_obs_source.append(tmp[2])
f_T_obs_inst.append(tmp[3])
delf_source.append(tmp[4])
delf_obs.append(tmp[5])
print('')
source_1 = SnN.BlackHoleBinary(M[0],q[0],x1[0],x2[0],z[0],inc,instrument=ESA_LISA)
source_2 = SnN.BlackHoleBinary(M[1],q[1],x1[1],x2[1],z[1],inc,instrument=ESA_LISA)
source_3 = SnN.BlackHoleBinary(M[2],q[2],x1[2],x2[2],z[2],inc,instrument=ESA_LISA)
source_4 = SnN.BlackHoleBinary(M[3],q[3],x1[3],x2[3],z[3],inc,instrument=ESA_LISA)
# +
print(SnC.calcMonoSNR(source_1,ESA_LISA))
print('')
print(SnC.calcMonoSNR(source_2,ESA_LISA))
print('')
print(SnC.calcMonoSNR(source_3,ESA_LISA))
print('')
print(SnC.calcMonoSNR(source_4,ESA_LISA))
indxfgw = np.abs(ESA_LISA.fT-ESA_LISA.f_opt).argmin()
print('')
print(source_1.h_gw*np.sqrt(ESA_LISA.T_obs.to('s')/ESA_LISA.S_n_f[indxfgw]))
print(source_2.h_gw*np.sqrt(ESA_LISA.T_obs.to('s')/ESA_LISA.S_n_f[indxfgw]))
print(source_3.h_gw*np.sqrt(ESA_LISA.T_obs.to('s')/ESA_LISA.S_n_f[indxfgw]))
print(source_4.h_gw*np.sqrt(ESA_LISA.T_obs.to('s')/ESA_LISA.S_n_f[indxfgw]))
# +
colors = ['c','k','r','b','y']
plt.figure(figsize=(10,5))
plt.loglog(ESA_LISA.fT,ESA_LISA.h_n_f,color=colors[0])
plt.loglog(source_1.f,SnN.Get_CharStrain(source_1),color=colors[1])
plt.loglog(source_2.f,SnN.Get_CharStrain(source_2),color=colors[2])
plt.loglog(source_3.f,SnN.Get_CharStrain(source_3),color=colors[3])
plt.loglog(source_4.f,SnN.Get_CharStrain(source_4),color=colors[4])
plt.axvline(x=f_init.value,color=colors[0],linestyle=':')
for freq_init,freq_end,i in zip(f_init_inst,f_T_obs_inst,range(len(colors))):
plt.axvline(x=freq_init.value, color=colors[i+1],linestyle='--')
plt.axvline(x=freq_end.value,color=colors[i+1],linestyle=':')
plt.show()
# +
colors = ['c','k','r','b','y']
plt.figure(figsize=(10,5))
plt.loglog(ESA_LISA.fT,ESA_LISA.h_n_f,color=colors[0])
plt.loglog(source_1.f,SnN.Get_CharStrain(source_1),color=colors[1])
plt.loglog(source_2.f,SnN.Get_CharStrain(source_2),color=colors[2])
plt.loglog(source_3.f,SnN.Get_CharStrain(source_3),color=colors[3])
plt.loglog(source_4.f,SnN.Get_CharStrain(source_4),color=colors[4])
plt.axvline(x=f_init.value,color=colors[0],linestyle=':')
for freq_init,freq_end,i in zip(f_init_source,f_T_obs_source,range(len(colors))):
plt.axvline(x=freq_init.value, color=colors[i+1],linestyle='--')
plt.axvline(x=freq_end.value,color=colors[i+1],linestyle=':')
plt.show()
# +
colors = ['c','k','r','b','y']
plt.figure(figsize=(10,5))
plt.loglog(ESA_LISA.fT,ESA_LISA.h_n_f,color=colors[0])
plt.loglog(source_1.f,SnN.Get_CharStrain(source_1),color=colors[1])
plt.axvline(x=source_1.f_init.value, color=colors[1],linestyle='--')
plt.axvline(x=source_1.f_T_obs.value,color=colors[1],linestyle=':')
plt.loglog(source_2.f,SnN.Get_CharStrain(source_2),color=colors[2])
plt.axvline(x=source_2.f_init.value, color=colors[2],linestyle='--')
plt.axvline(x=source_2.f_T_obs.value,color=colors[2],linestyle=':')
plt.loglog(source_3.f,SnN.Get_CharStrain(source_3),color=colors[3])
plt.axvline(x=source_3.f_init.value, color=colors[3],linestyle='--')
plt.axvline(x=source_3.f_T_obs.value,color=colors[3],linestyle=':')
plt.loglog(source_4.f,SnN.Get_CharStrain(source_4),color=colors[4])
plt.axvline(x=source_4.f_init.value, color=colors[4],linestyle='--')
plt.axvline(x=source_4.f_T_obs.value,color=colors[4],linestyle=':')
plt.axvline(x=f_init.value,color=colors[0],linestyle=':')
plt.show()
# -
| Simulations/Testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Model: LSTM bidirectional + Dropout
#Word Embedding: Pre-Trained (https://devmount.github.io/GermanWordEmbeddings/)
#Dataset: 3
#based on https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py
# +
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.initializers import Constant
# -
BASE_DIR = ''
GLOVE_DIR = BASE_DIR
TEXT_DATA_DIR = './dataset3/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.2
# +
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glovegerman.txt')) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
# +
# second, prepare text samples and their labels
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname==fname:
fpath = os.path.join(path, fname)
print (fname)
args = {} if sys.version_info < (3,) else {'encoding': 'utf-8'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
# +
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# +
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
# -
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
# +
print('Training model.')
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, LSTM, Flatten, Bidirectional, Dropout
import keras
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Bidirectional(LSTM(64))(embedded_sequences)
x = Dropout(0.25)(x)
preds = Dense(len(labels_index), activation='sigmoid')(x)
# -
model = Model(sequence_input, preds)
print(model.summary())
# +
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=64,
epochs=10,
validation_data=(x_val, y_val))
# -
| Keras LSTM-bidirectional+dropout (GE)(DS3).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
test_string ='''
source ip: 10.16.90.249
source hostname: android-ba50a4497de455a
source port: 55198
source mac address: 50:2e:5c:f0:f6:98
system name :
user name:
location :
sep , sms status :
field sales user ( yes / no) :
dsw event log:
-------------------------------------------------------------------------------------------------------------
=========================
event data
=========================
related events:
event id: 84682727
event summary: internal outbreak for 137/udp
occurrence count: 505
event count: 1
host and connection information
source ip: 10.16.90.249
source hostname: android-ba50a4497de455a
source port: 55198
source mac address: 50:2e:5c:f0:f6:98
destination hostname: [no entry]
destination port: 137
connection directionality: internal
protocol: udp
device information
device ip: 80.71.06.702
device name: company-european-asa.company.com-1
log time: 2016-09-26 at 08:23:55 utc
action: blocked
cvss score: -1
scwx event processing information
sherlock rule id (sle): 537074
inspector rule id: 186739
inspector event id: 639601949
ontology id: 200020003203009162
event type id: 200020003203009062
agent id: 103761
event detail:
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.58.3/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.31.10/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.3.11/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.58.3/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.3.11/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.58.3/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.17.32/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.3.11/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.17.32/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 192.168.127.1220 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.17.32/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.31.10/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.3.11/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.58.3/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.127.12/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:192.168.127.12/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
sep 26 08:23:55 80.71.06.702 %asa-4-106023: deny udp src inside:10.16.90.249/55198 dst noris:172.16.31.10/137 by access-group "acl_inside" [0x30e3d92a, 0x0]
[correlation_data]
sep 26 04:23:54 172.16.31.10 dhcpd[23598]: dhcpack on 10.16.90.249 to 50:2e:5c:f0:f6:98 (android-ba50a4497de455a) via eth2 relay 10.16.88.2 lease-duration 691200 (renew)
ascii packet(s):
[no entry]
hex packet(s):
[no entry]
'''
#Duplication removal
from collections import Counter
def remov_duplicates(input):
input = input.split(" ")
for i in range(0, len(input)):
input[i] = "".join(input[i])
UniqW = Counter(input)
s = " ".join(UniqW.keys())
return s
#remov_duplicates(item)
#security log cleanup
print(len(item))
words=set(['source','ip','hostname','mac','events','yes / no'])
word_cleanup=r'\b(?:{})\b'.format('|'.join(words))
item = item.replace('\n', ' ').replace('\r', '')
item=re.sub(r'((:)?\s?\d+(.|:)?)+', '', item)
item=re.sub('(_x000D_|_x_|_x|x_)', '', item)
item=re.sub(r'(\[|\]|(\-)+|(\=)+|\%|\,|\"|\:|\(|\))?','',item)
item = re.sub(word_cleanup,'',item)
print('after initial cleanup: ',item,end='\n')
item=remov_duplicates(item)
print('after duplicates removal: ',item,end='\n')
print(len(item))
# -
import pandas as pd
df=pd.read_excel('input_data.xlsx')
#using the cleanup logic in dataframe
for item in df['Description']:
if str(item).find('source ip')==0:
words=set(['source','ip','hostname','mac','events','yes / no'])
word_cleanup=r'\b(?:{})\b'.format('|'.join(words))
item = item.replace('\n', ' ').replace('\r', '')
item=re.sub(r'((:)?\s?\d+(.|:)?)+', '', item)
item=re.sub('(_x000D_|_x_|_x|x_)', '', item)
item=re.sub(r'(\[|\]|(\-)+|(\=)+|\%|\,|\"|\:|\(|\))?','',item)
item = re.sub(word_cleanup,'',item)
#print('after initial cleanup: ',item,end='\n')
item=remov_duplicates(item)
#print('after duplicates removal: ',item,end='\n')
#print(len(item))
df.to_csv('clean_data.csv',index=False)
| notebooks/security_log_cleanup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from mpcontribs.client import Client
from pymatgen import Structure, MPRester
name = 'esters'
client = Client()
mpr = MPRester()
# **Retrieve and update project info**
# client.projects.update_entry(pk=name, project={'long_title': 'Improved c-axis parameter for BiSe'}).result()
client.get_project(name).pretty()
# **Create contribution**
path = '/Users/patrick/gitrepos/mp/MPContribs/mpcontribs-data/CONTCAR'
structure = Structure.from_file(path)
mpids = mpr.find_structure(structure)
contributions = [{
'project': name, 'identifier': mpids[0], 'is_public': True,
'structures': [structure]
}]
# **Submit contribution**
client.delete_contributions(name)
client.submit_contributions(contributions)
# **Query contribution**
query = {"project": name, "_fields": ["id"]}
resp = client.contributions.get_entries(**query).result()
cid = resp["data"][0]["id"]
sid = client.get_contribution(cid)["structures"][0]["id"]
client.get_structure(sid)
| mpcontribs-portal/notebooks/portal.mpcontribs.org/esters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Projet Tree Grenoble
import csv
with open('/home/dataplus-01/Documents/Modules_Socle/Python/Projet_arbre/data/trees.csv', newline="") as f:
reader=csv.reader(f)
data=list(reader)
print(data[0:2],sep="/n")
for e in data[0:2]:
print(e[1],sep="/n")
data_ANNEEDEPLANTATION=[]
ind=data[0].index('ANNEEDEPLANTATION')
for e in data[1:]:
# print(e[ind],sep="/n")
if e[ind]!="":
data_ANNEEDEPLANTATION.append(int(e[ind]))
else:
data_ANNEEDEPLANTATION.append(-1)
# Afficher les 50 premières lignes et les 50 dernières lignes
print(data[:50],sep="/n")
print(data[-50:],sep="/n")
# Combien d’arbres sont recensés dans ce jeu de données ?
len(data)-1
# Pour combien d’arbres manque-t-il l’information concernant la date de plantation ?
# check the count of "" in data_ANNEEDEPLANTATION
count = data_ANNEEDEPLANTATION.count(-1)
print('Count of missing data:', count)
# Combien d’arbres ont été plantés l’année de votre naissance ?
# check the count of "1990" in data_ANNEEDEPLANTATION
count = data_ANNEEDEPLANTATION.count(1990)
print('Count of tree planted in 1990:', count)
# Quelle est la plus ancienne année de plantation recensée dans ce dataset ? La plus récente ?
int_data_ANNEEDEPLANTATION = [int(annee) for annee in data_ANNEEDEPLANTATION if annee != -1]
plus_ancien=min(int_data_ANNEEDEPLANTATION)
plus_recent=max(data_ANNEEDEPLANTATION)
print(plus_ancien)
print(plus_recent)
# Combien d’arbres ont été plantés année par année (ex : 1987 : 771, 1988 : 266, etc…) ?
data_ANNEEDEPLANTATION_count=[]
for annee in range(plus_ancien,(plus_recent+1)):
count = data_ANNEEDEPLANTATION.count(annee)
print(annee, " : ", count)
data_ANNEEDEPLANTATION_count.append(count)
# Combien d’arbres ont été plantés en moyenne chaque année ?
from statistics import mean
round(mean(data_ANNEEDEPLANTATION_count),0)
# +
# Stocker conjointement l’année de plantation et le nombre d’arbres plantés
# dans un tuple. Les tuples seront stockés dans une liste (ex : [ (‘1987’, 771),
# (‘1988’, 266), ….. ] )
liste_annee_nbr=[]
for annee in range(plus_ancien,(plus_recent+1)):
count = data_ANNEEDEPLANTATION.count(annee)
tuple_annee_nbr=(str(annee),count)
liste_annee_nbr.append(tuple_annee_nbr)
print(liste_annee_nbr)
# -
# Quel Maire a planté le plus d’arbres à Grenoble ?
# - <NAME> 49 - 59
# - <NAME> 59 - 65
# - <NAME> 65 - 83
# - <NAME> 83 - 95
# - <NAME> 95 - 14
# - <NAME> 14 - WIP
LM,AM,HD,AC,MD,EP=0,0,0,0,0,0
for e in liste_annee_nbr[0:10]:
LM+=e[1]
for e in liste_annee_nbr[10:16]:
AM+=e[1]
for e in liste_annee_nbr[16:34]:
HD+=e[1]
for e in liste_annee_nbr[34:46]:
AC+=e[1]
for e in liste_annee_nbr[46:65]:
MD+=e[1]
for e in liste_annee_nbr[65:68]:
EP+=e[1]
print(LM,AM,HD,AC,MD,EP)
print(LM+AM+HD+AC+MD+EP)
# Récupérez maintenant l’information concernant le genre botanique et la
# stocker conjointement avec l’année de plantation dans un tuple. Les tuples
# seront stockés dans une liste (ex : [ (‘1987’, ‘Acer), (‘1988’, ‘Acerifolia), ….. ])
data_GENRE_BOTA=[]
data_annee_nbr=[]
ind=data[0].index('GENRE_BOTA')
for e in data[1:]:
# print(e[ind],sep="/n")
data_GENRE_BOTA.append(e[ind])
tuple_annee_genre=(str(e[19]),e[ind])
data_annee_nbr.append(tuple_annee_genre)
print(data_annee_nbr)
# Pour combien d'arbres manque-t-il l’information concernant le genre botanique ?
nb_manq=0
for e in data_GENRE_BOTA:
if e=="":
nb_manq+=1
print(nb_manq)
int_data_ANNEEDEPLANTATION
# Utilisez Matplotlib pour tracer l’histogramme représentant le nombre d’arbres plantés par année.
import matplotlib.pyplot
matplotlib.pyplot.hist(int_data_ANNEEDEPLANTATION,plus_recent-plus_ancien)
# +
#Triez les tuples (année, genre_botanique) en fonction de l’année de plantation par ordre décroissant.
data_annee_nbr_clean=[]
for e in data_annee_nbr:
if not(e[0]=="" or e[1]==""):
data_annee_nbr_clean.append(e)
data_annee_nbr_clean.sort(key=lambda a: a[0],reverse=True)
data_annee_nbr_clean
| notebooks/arbres.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas practice
import pandas as pd
# # `Series` objects
# The `pandas` library contains these useful data structures:
# * `Series` objects, that we will discuss now. A `Series` object is 1D array, similar to a column in a spreadsheet (with a column name and row labels).
# * `DataFrame` objects. This is a 2D table, similar to a spreadsheet (with column names and row labels).
# * `Panel` objects. You can see a `Panel` as a dictionary of `DataFrame`s. These are less used, so we will not discuss them here.
# ## Creating a `Series`
# Let's start by creating our first `Series` object!
s = pd.Series([2,-1,3,5])
s
# ## Similar to a 1D `ndarray`
# `Series` objects behave much like one-dimensional NumPy `ndarray`s, and you can often pass them as parameters to NumPy functions:
import numpy as np
np.exp(s)
# Arithmetic operations on `Series` are also possible, and they apply *elementwise*, just like for `ndarray`s:
s + [1000,2000,3000,4000]
# Similar to NumPy, if you add a single number to a `Series`, that number is added to all items in the `Series`. This is called * broadcasting*:
s + 1000
# The same is true for all binary operations such as `*` or `/`, and even conditional operations:
s < 0
# ## Index labels
# Each item in a `Series` object has a unique identifier called the *index label*. By default, it is simply the rank of the item in the `Series` (starting at `0`) but you can also set the index labels manually:
s2 = pd.Series([68, 83, 112, 68], index=["alice", "bob", "charles", "darwin"])
s2
# You can then use the `Series` just like a `dict`:
s2["bob"]
# You can still access the items by integer location, like in a regular array:
s2[1]
# To make it clear when you are accessing by label or by integer location, it is recommended to always use the `loc` attribute when accessing by label, and the `iloc` attribute when accessing by integer location:
s2.loc["bob"]
s2.iloc[1]
# Slicing a `Series` also slices the index labels:
s2.iloc[1:3]
# This can lead to unexpected results when using the default numeric labels, so be careful:
surprise = pd.Series([1000, 1001, 1002, 1003])
surprise
surprise_slice = surprise[2:]
surprise_slice
# Oh look! The first element has index label `2`. The element with index label `0` is absent from the slice:
try:
surprise_slice[0]
except KeyError as e:
print("Key error:", e)
# But remember that you can access elements by integer location using the `iloc` attribute. This illustrates another reason why it's always better to use `loc` and `iloc` to access `Series` objects:
surprise_slice.iloc[0]
# ## Init from `dict`
# You can create a `Series` object from a `dict`. The keys will be used as index labels:
weights = {"alice": 68, "bob": 83, "colin": 86, "darwin": 68}
s3 = pd.Series(weights)
s3
# You can control which elements you want to include in the `Series` and in what order by explicitly specifying the desired `index`:
s4 = pd.Series(weights, index = ["colin", "alice"])
s4
# ## Automatic alignment
# When an operation involves multiple `Series` objects, `pandas` automatically aligns items by matching index labels.
# +
print(s2.keys())
print(s3.keys())
s2 + s3
# -
# The resulting `Series` contains the union of index labels from `s2` and `s3`. Since `"colin"` is missing from `s2` and `"charles"` is missing from `s3`, these items have a `NaN` result value. (ie. Not-a-Number means *missing*).
#
# Automatic alignment is very handy when working with data that may come from various sources with varying structure and missing items. But if you forget to set the right index labels, you can have surprising results:
# +
s5 = pd.Series([1000,1000,1000,1000])
print("s2 =", s2.values)
print("s5 =", s5.values)
s2 + s5
# -
# Pandas could not align the `Series`, since their labels do not match at all, hence the full `NaN` result.
# ## Init with a scalar
# You can also initialize a `Series` object using a scalar and a list of index labels: all items will be set to the scalar.
meaning = pd.Series(42, ["life", "universe", "everything"])
meaning
# ## `Series` name
# A `Series` can have a `name`:
s6 = pd.Series([83, 68], index=["bob", "alice"], name="weights")
s6
# ## Plotting a `Series`
# Pandas makes it easy to plot `Series` data using matplotlib (for more details on matplotlib, check out the [matplotlib tutorial](tools_matplotlib.ipynb)). Just import matplotlib and call the `plot()` method:
# %matplotlib inline
import matplotlib.pyplot as plt
temperatures = [4.4,5.1,6.1,6.2,6.1,6.1,5.7,5.2,4.7,4.1,3.9,3.5]
s7 = pd.Series(temperatures, name="Temperature")
s7.plot()
plt.show()
# There are *many* options for plotting your data. It is not necessary to list them all here: if you need a particular type of plot (histograms, pie charts, etc.), just look for it in the excellent [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) section of pandas' documentation, and look at the example code.
# # Handling time
# Many datasets have timestamps, and pandas is awesome at manipulating such data:
# * it can represent periods (such as 2016Q3) and frequencies (such as "monthly"),
# * it can convert periods to actual timestamps, and *vice versa*,
# * it can resample data and aggregate values any way you like,
# * it can handle timezones.
#
# ## Time range
# Let's start by creating a time series using `pd.date_range()`. This returns a `DatetimeIndex` containing one datetime per hour for 12 hours starting on October 29th 2016 at 5:30pm.
dates = pd.date_range('2016/10/29 5:30pm', periods=12, freq='H')
dates
# This `DatetimeIndex` may be used as an index in a `Series`:
temp_series = pd.Series(temperatures, dates)
temp_series
# Let's plot this series:
# +
temp_series.plot(kind="bar")
plt.grid(True)
plt.show()
# -
# ## Resampling
# Pandas lets us resample a time series very simply. Just call the `resample()` method and specify a new frequency:
temp_series_freq_2H = temp_series.resample("2H")
temp_series_freq_2H
# The resampling operation is actually a deferred operation, which is why we did not get a `Series` object, but a `DatetimeIndexResampler` object instead. To actually perform the resampling operation, we can simply call the `mean()` method: Pandas will compute the mean of every pair of consecutive hours:
temp_series_freq_2H = temp_series_freq_2H.mean()
# Let's plot the result:
temp_series_freq_2H.plot(kind="bar")
plt.show()
# Note how the values have automatically been aggregated into 2-hour periods. If we look at the 6-8pm period, for example, we had a value of `5.1` at 6:30pm, and `6.1` at 7:30pm. After resampling, we just have one value of `5.6`, which is the mean of `5.1` and `6.1`. Rather than computing the mean, we could have used any other aggregation function, for example we can decide to keep the minimum value of each period:
temp_series_freq_2H = temp_series.resample("2H").min()
temp_series_freq_2H
# Or, equivalently, we could use the `apply()` method instead:
temp_series_freq_2H = temp_series.resample("2H").apply(np.min)
temp_series_freq_2H
# ## Upsampling and interpolation
# This was an example of downsampling. We can also upsample (ie. increase the frequency), but this creates holes in our data:
temp_series_freq_15min = temp_series.resample("15Min").mean()
temp_series_freq_15min.head(n=10) # `head` displays the top n values
# One solution is to fill the gaps by interpolating. We just call the `interpolate()` method. The default is to use linear interpolation, but we can also select another method, such as cubic interpolation:
temp_series_freq_15min = temp_series.resample("15Min").interpolate(method="cubic")
temp_series_freq_15min.head(n=10)
temp_series.plot(label="Period: 1 hour")
temp_series_freq_15min.plot(label="Period: 15 minutes")
plt.legend()
plt.show()
# ## Timezones
# By default datetimes are *naive*: they are not aware of timezones, so 2016-10-30 02:30 might mean October 30th 2016 at 2:30am in Paris or in New York. We can make datetimes timezone *aware* by calling the `tz_localize()` method:
temp_series_ny = temp_series.tz_localize("America/New_York")
temp_series_ny
# Note that `-04:00` is now appended to all the datetimes. This means that these datetimes refer to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) - 4 hours.
#
# We can convert these datetimes to Paris time like this:
temp_series_paris = temp_series_ny.tz_convert("Europe/Paris")
temp_series_paris
# You may have noticed that the UTC offset changes from `+02:00` to `+01:00`: this is because France switches to winter time at 3am that particular night (time goes back to 2am). Notice that 2:30am occurs twice! Let's go back to a naive representation (if you log some data hourly using local time, without storing the timezone, you might get something like this):
temp_series_paris_naive = temp_series_paris.tz_localize(None)
temp_series_paris_naive
# Now `02:30` is really ambiguous. If we try to localize these naive datetimes to the Paris timezone, we get an error:
try:
temp_series_paris_naive.tz_localize("Europe/Paris")
except Exception as e:
print(type(e))
print(e)
# Fortunately using the `ambiguous` argument we can tell pandas to infer the right DST (Daylight Saving Time) based on the order of the ambiguous timestamps:
temp_series_paris_naive.tz_localize("Europe/Paris", ambiguous="infer")
# ## Periods
# The `pd.period_range()` function returns a `PeriodIndex` instead of a `DatetimeIndex`. For example, let's get all quarters in 2016 and 2017:
quarters = pd.period_range('2016Q1', periods=8, freq='Q')
quarters
# Adding a number `N` to a `PeriodIndex` shifts the periods by `N` times the `PeriodIndex`'s frequency:
quarters + 3
# The `asfreq()` method lets us change the frequency of the `PeriodIndex`. All periods are lengthened or shortened accordingly. For example, let's convert all the quarterly periods to monthly periods (zooming in):
quarters.asfreq("M")
# By default, the `asfreq` zooms on the end of each period. We can tell it to zoom on the start of each period instead:
quarters.asfreq("M", how="start")
# And we can zoom out:
quarters.asfreq("A")
# Of course we can create a `Series` with a `PeriodIndex`:
quarterly_revenue = pd.Series([300, 320, 290, 390, 320, 360, 310, 410], index = quarters)
quarterly_revenue
quarterly_revenue.plot(kind="line")
plt.show()
# We can convert periods to timestamps by calling `to_timestamp`. By default this will give us the first day of each period, but by setting `how` and `freq`, we can get the last hour of each period:
last_hours = quarterly_revenue.to_timestamp(how="end", freq="H")
last_hours
# And back to periods by calling `to_period`:
last_hours.to_period()
# Pandas also provides many other time-related functions that we recommend you check out in the [documentation](http://pandas.pydata.org/pandas-docs/stable/timeseries.html). To whet your appetite, here is one way to get the last business day of each month in 2016, at 9am:
months_2016 = pd.period_range("2016", periods=12, freq="M")
one_day_after_last_days = months_2016.asfreq("D") + 1
last_bdays = one_day_after_last_days.to_timestamp() - pd.tseries.offsets.BDay()
last_bdays.to_period("H") + 9
# # `DataFrame` objects
# A DataFrame object represents a spreadsheet, with cell values, column names and row index labels. You can define expressions to compute columns based on other columns, create pivot-tables, group rows, draw graphs, etc. You can see `DataFrame`s as dictionaries of `Series`.
#
# ## Creating a `DataFrame`
# You can create a DataFrame by passing a dictionary of `Series` objects:
people_dict = {
"weight": pd.Series([68, 83, 112], index=["alice", "bob", "charles"]),
"birthyear": pd.Series([1984, 1985, 1992], index=["bob", "alice", "charles"], name="year"),
"children": pd.Series([0, 3], index=["charles", "bob"]),
"hobby": pd.Series(["Biking", "Dancing"], index=["alice", "bob"]),
}
people = pd.DataFrame(people_dict)
people
# A few things to note:
# * the `Series` were automatically aligned based on their index,
# * missing values are represented as `NaN`,
# * `Series` names are ignored (the name `"year"` was dropped),
# * `DataFrame`s are displayed nicely in Jupyter notebooks, woohoo!
# You can access columns pretty much as you would expect. They are returned as `Series` objects:
people["birthyear"]
# You can also get multiple columns at once:
people[["birthyear", "hobby"]]
# If you pass a list of columns and/or index row labels to the `DataFrame` constructor, it will guarantee that these columns and/or rows will exist, in that order, and no other column/row will exist. For example:
d2 = pd.DataFrame(
people_dict,
columns=["birthyear", "weight", "height"],
index=["bob", "alice", "eugene"]
)
d2
# Another convenient way to create a `DataFrame` is to pass all the values to the constructor as an `ndarray`, or a list of lists, and specify the column names and row index labels separately:
values = [
[1985, np.nan, "Biking", 68],
[1984, 3, "Dancing", 83],
[1992, 0, np.nan, 112]
]
d3 = pd.DataFrame(
values,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
# To specify missing values, you can either use `np.nan` or NumPy's masked arrays:
masked_array = np.ma.asarray(values, dtype=np.object)
masked_array[(0, 2), (1, 2)] = np.ma.masked
d3 = pd.DataFrame(
masked_array,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
# Instead of an `ndarray`, you can also pass a `DataFrame` object:
d4 = pd.DataFrame(
d3,
columns=["hobby", "children"],
index=["alice", "bob"]
)
d4
# It is also possible to create a `DataFrame` with a dictionary (or list) of dictionaries (or list):
people = pd.DataFrame({
"birthyear": {"alice":1985, "bob": 1984, "charles": 1992},
"hobby": {"alice":"Biking", "bob": "Dancing"},
"weight": {"alice":68, "bob": 83, "charles": 112},
"children": {"bob": 3, "charles": 0}
})
people
# ## Multi-indexing
# If all columns are tuples of the same size, then they are understood as a multi-index. The same goes for row index labels. For example:
d5 = pd.DataFrame(
{
("public", "birthyear"):
{("Paris","alice"):1985, ("Paris","bob"): 1984, ("London","charles"): 1992},
("public", "hobby"):
{("Paris","alice"):"Biking", ("Paris","bob"): "Dancing"},
("private", "weight"):
{("Paris","alice"):68, ("Paris","bob"): 83, ("London","charles"): 112},
("private", "children"):
{("Paris", "alice"):np.nan, ("Paris","bob"): 3, ("London","charles"): 0}
}
)
d5
# You can now get a `DataFrame` containing all the `"public"` columns very simply:
d5["public"]
d5["public", "hobby"] # Same result as d5["public"]["hobby"]
# ## Dropping a level
# Let's look at `d5` again:
d5
# There are two levels of columns, and two levels of indices. We can drop a column level by calling `droplevel()` (the same goes for indices):
d5.columns = d5.columns.droplevel(level = 0)
d5
# ## Transposing
# You can swap columns and indices using the `T` attribute:
d6 = d5.T
d6
# ## Stacking and unstacking levels
# Calling the `stack()` method will push the lowest column level after the lowest index:
d7 = d6.stack()
d7
# Note that many `NaN` values appeared. This makes sense because many new combinations did not exist before (eg. there was no `bob` in `London`).
#
# Calling `unstack()` will do the reverse, once again creating many `NaN` values.
d8 = d7.unstack()
d8
# If we call `unstack` again, we end up with a `Series` object:
d9 = d8.unstack()
d9
# The `stack()` and `unstack()` methods let you select the `level` to stack/unstack. You can even stack/unstack multiple levels at once:
d10 = d9.unstack(level = (0,1))
d10
# ## Most methods return modified copies
# As you may have noticed, the `stack()` and `unstack()` methods do not modify the object they apply to. Instead, they work on a copy and return that copy. This is true of most methods in pandas.
# ## Accessing rows
# Let's go back to the `people` `DataFrame`:
people
# The `loc` attribute lets you access rows instead of columns. The result is a `Series` object in which the `DataFrame`'s column names are mapped to row index labels:
people.loc["charles"]
# You can also access rows by integer location using the `iloc` attribute:
people.iloc[2]
# You can also get a slice of rows, and this returns a `DataFrame` object:
people.iloc[1:3]
# Finally, you can pass a boolean array to get the matching rows:
people[np.array([True, False, True])]
# This is most useful when combined with boolean expressions:
people[people["birthyear"] < 1990]
# ## Adding and removing columns
# You can generally treat `DataFrame` objects like dictionaries of `Series`, so the following work fine:
people
# +
people["age"] = 2018 - people["birthyear"] # adds a new column "age"
people["over 30"] = people["age"] > 30 # adds another column "over 30"
birthyears = people.pop("birthyear")
del people["children"]
people
# -
birthyears
# When you add a new colum, it must have the same number of rows. Missing rows are filled with NaN, and extra rows are ignored:
people["pets"] = pd.Series({"bob": 0, "charles": 5, "eugene":1}) # alice is missing, eugene is ignored
people
# When adding a new column, it is added at the end (on the right) by default. You can also insert a column anywhere else using the `insert()` method:
people.insert(1, "height", [172, 181, 185])
people
# ## Assigning new columns
# You can also create new columns by calling the `assign()` method. Note that this returns a new `DataFrame` object, the original is not modified:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
has_pets = people["pets"] > 0
)
# Note that you cannot access columns created within the same assignment:
try:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
overweight = people["body_mass_index"] > 25
)
except KeyError as e:
print("Key error:", e)
# The solution is to split this assignment in two consecutive assignments:
d6 = people.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
d6.assign(overweight = d6["body_mass_index"] > 25)
# Having to create a temporary variable `d6` is not very convenient. You may want to just chain the assigment calls, but it does not work because the `people` object is not actually modified by the first assignment:
try:
(people
.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
.assign(overweight = people["body_mass_index"] > 25)
)
except KeyError as e:
print("Key error:", e)
# But fear not, there is a simple solution. You can pass a function to the `assign()` method (typically a `lambda` function), and this function will be called with the `DataFrame` as a parameter:
(people
.assign(body_mass_index = lambda df: df["weight"] / (df["height"] / 100) ** 2)
.assign(overweight = lambda df: df["body_mass_index"] > 25)
)
# Problem solved!
# ## Evaluating an expression
# A great feature supported by pandas is expression evaluation. This relies on the `numexpr` library which must be installed.
people.eval("weight / (height/100) ** 2 > 25")
# Assignment expressions are also supported. Let's set `inplace=True` to directly modify the `DataFrame` rather than getting a modified copy:
people.eval("body_mass_index = weight / (height/100) ** 2", inplace=True)
people
# You can use a local or global variable in an expression by prefixing it with `'@'`:
overweight_threshold = 30
people.eval("overweight = body_mass_index > @overweight_threshold", inplace=True)
people
# ## Querying a `DataFrame`
# The `query()` method lets you filter a `DataFrame` based on a query expression:
people.query("age > 30 and pets == 0")
# ## Sorting a `DataFrame`
# You can sort a `DataFrame` by calling its `sort_index` method. By default it sorts the rows by their index label, in ascending order, but let's reverse the order:
people.sort_index(ascending=False)
# Note that `sort_index` returned a sorted *copy* of the `DataFrame`. To modify `people` directly, we can set the `inplace` argument to `True`. Also, we can sort the columns instead of the rows by setting `axis=1`:
people.sort_index(axis=1, inplace=True)
people
# To sort the `DataFrame` by the values instead of the labels, we can use `sort_values` and specify the column to sort by:
people.sort_values(by="age", inplace=True)
people
# ## Plotting a `DataFrame`
# Just like for `Series`, pandas makes it easy to draw nice graphs based on a `DataFrame`.
#
# For example, it is trivial to create a line plot from a `DataFrame`'s data by calling its `plot` method:
people.plot(kind = "line", x = "body_mass_index", y = ["height", "weight"])
plt.show()
# You can pass extra arguments supported by matplotlib's functions. For example, we can create scatterplot and pass it a list of sizes using the `s` argument of matplotlib's `scatter()` function:
people.plot(kind = "scatter", x = "height", y = "weight", s=[40, 120, 200])
plt.show()
# Again, there are way too many options to list here: the best option is to scroll through the [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) page in pandas' documentation, find the plot you are interested in and look at the example code.
# ## Operations on `DataFrame`s
# Although `DataFrame`s do not try to mimick NumPy arrays, there are a few similarities. Let's create a `DataFrame` to demonstrate this:
grades_array = np.array([[8,8,9],[10,9,9],[4, 8, 2], [9, 10, 10]])
grades = pd.DataFrame(grades_array, columns=["sep", "oct", "nov"], index=["alice","bob","charles","darwin"])
grades
# You can apply NumPy mathematical functions on a `DataFrame`: the function is applied to all values:
np.sqrt(grades)
# Similarly, adding a single value to a `DataFrame` will add that value to all elements in the `DataFrame`. This is called *broadcasting*:
grades + 1
# Of course, the same is true for all other binary operations, including arithmetic (`*`,`/`,`**`...) and conditional (`>`, `==`...) operations:
grades >= 5
# Aggregation operations, such as computing the `max`, the `sum` or the `mean` of a `DataFrame`, apply to each column, and you get back a `Series` object:
grades.mean()
# The `all` method is also an aggregation operation: it checks whether all values are `True` or not. Let's see during which months all students got a grade greater than `5`:
(grades > 5).all()
# Most of these functions take an optional `axis` parameter which lets you specify along which axis of the `DataFrame` you want the operation executed. The default is `axis=0`, meaning that the operation is executed vertically (on each column). You can set `axis=1` to execute the operation horizontally (on each row). For example, let's find out which students had all grades greater than `5`:
(grades > 5).all(axis = 1)
# The `any` method returns `True` if any value is True. Let's see who got at least one grade 10:
(grades == 10).any(axis = 1)
# If you add a `Series` object to a `DataFrame` (or execute any other binary operation), pandas attempts to broadcast the operation to all *rows* in the `DataFrame`. This only works if the `Series` has the same size as the `DataFrame`s rows. For example, let's substract the `mean` of the `DataFrame` (a `Series` object) from the `DataFrame`:
grades - grades.mean() # equivalent to: grades - [7.75, 8.75, 7.50]
# We substracted `7.75` from all September grades, `8.75` from October grades and `7.50` from November grades. It is equivalent to substracting this `DataFrame`:
pd.DataFrame([[7.75, 8.75, 7.50]]*4, index=grades.index, columns=grades.columns)
# If you want to substract the global mean from every grade, here is one way to do it:
grades - grades.values.mean() # substracts the global mean (8.00) from all grades
# ## Automatic alignment
# Similar to `Series`, when operating on multiple `DataFrame`s, pandas automatically aligns them by row index label, but also by column names. Let's create a `DataFrame` with bonus points for each person from October to December:
bonus_array = np.array([[0,np.nan,2],[np.nan,1,0],[0, 1, 0], [3, 3, 0]])
bonus_points = pd.DataFrame(bonus_array, columns=["oct", "nov", "dec"], index=["bob","colin", "darwin", "charles"])
bonus_points
grades + bonus_points
# Looks like the addition worked in some cases but way too many elements are now empty. That's because when aligning the `DataFrame`s, some columns and rows were only present on one side, and thus they were considered missing on the other side (`NaN`). Then adding `NaN` to a number results in `NaN`, hence the result.
#
# ## Handling missing data
# Dealing with missing data is a frequent task when working with real life data. Pandas offers a few tools to handle missing data.
#
# Let's try to fix the problem above. For example, we can decide that missing data should result in a zero, instead of `NaN`. We can replace all `NaN` values by a any value using the `fillna()` method:
(grades + bonus_points).fillna(0)
# It's a bit unfair that we're setting grades to zero in September, though. Perhaps we should decide that missing grades are missing grades, but missing bonus points should be replaced by zeros:
fixed_bonus_points = bonus_points.fillna(0)
fixed_bonus_points.insert(0, "sep", 0)
fixed_bonus_points.loc["alice"] = 0
grades + fixed_bonus_points
# That's much better: although we made up some data, we have not been too unfair.
#
# Another way to handle missing data is to interpolate. Let's look at the `bonus_points` `DataFrame` again:
bonus_points
# Now let's call the `interpolate` method. By default, it interpolates vertically (`axis=0`), so let's tell it to interpolate horizontally (`axis=1`).
bonus_points.interpolate(axis=1)
# Bob had 0 bonus points in October, and 2 in December. When we interpolate for November, we get the mean: 1 bonus point. Colin had 1 bonus point in November, but we do not know how many bonus points he had in September, so we cannot interpolate, this is why there is still a missing value in October after interpolation. To fix this, we can set the September bonus points to 0 before interpolation.
better_bonus_points = bonus_points.copy()
better_bonus_points.insert(0, "sep", 0)
better_bonus_points.loc["alice"] = 0
better_bonus_points = better_bonus_points.interpolate(axis=1)
better_bonus_points
# Great, now we have reasonable bonus points everywhere. Let's find out the final grades:
grades + better_bonus_points
# It is slightly annoying that the September column ends up on the right. This is because the `DataFrame`s we are adding do not have the exact same columns (the `grades` `DataFrame` is missing the `"dec"` column), so to make things predictable, pandas orders the final columns alphabetically. To fix this, we can simply add the missing column before adding:
grades["dec"] = np.nan
final_grades = grades + better_bonus_points
final_grades
# There's not much we can do about December and Colin: it's bad enough that we are making up bonus points, but we can't reasonably make up grades (well I guess some teachers probably do). So let's call the `dropna()` method to get rid of rows that are full of `NaN`s:
final_grades_clean = final_grades.dropna(how="all")
final_grades_clean
# Now let's remove columns that are full of `NaN`s by setting the `axis` argument to `1`:
final_grades_clean = final_grades_clean.dropna(axis=1, how="all")
final_grades_clean
# ## Aggregating with `groupby`
# Similar to the SQL language, pandas allows grouping your data into groups to run calculations over each group.
#
# First, let's add some extra data about each person so we can group them, and let's go back to the `final_grades` `DataFrame` so we can see how `NaN` values are handled:
final_grades["hobby"] = ["Biking", "Dancing", np.nan, "Dancing", "Biking"]
final_grades
# Now let's group data in this `DataFrame` by hobby:
grouped_grades = final_grades.groupby("hobby")
grouped_grades
# We are ready to compute the average grade per hobby:
grouped_grades.mean()
# That was easy! Note that the `NaN` values have simply been skipped when computing the means.
# ## Pivot tables
# Pandas supports spreadsheet-like [pivot tables](https://en.wikipedia.org/wiki/Pivot_table) that allow quick data summarization. To illustrate this, let's create a simple `DataFrame`:
bonus_points
more_grades = final_grades_clean.stack().reset_index()
more_grades.columns = ["name", "month", "grade"]
more_grades["bonus"] = [np.nan, np.nan, np.nan, 0, np.nan, 2, 3, 3, 0, 0, 1, 0]
more_grades
# Now we can call the `pd.pivot_table()` function for this `DataFrame`, asking to group by the `name` column. By default, `pivot_table()` computes the mean of each numeric column:
pd.pivot_table(more_grades, index="name")
# We can change the aggregation function by setting the `aggfunc` argument, and we can also specify the list of columns whose values will be aggregated:
pd.pivot_table(more_grades, index="name", values=["grade","bonus"], aggfunc=np.max)
# We can also specify the `columns` to aggregate over horizontally, and request the grand totals for each row and column by setting `margins=True`:
pd.pivot_table(more_grades, index="name", values="grade", columns="month", margins=True)
# Finally, we can specify multiple index or column names, and pandas will create multi-level indices:
pd.pivot_table(more_grades, index=("name", "month"), margins=True)
# ## Overview functions
# When dealing with large `DataFrames`, it is useful to get a quick overview of its content. Pandas offers a few functions for this. First, let's create a large `DataFrame` with a mix of numeric values, missing values and text values. Notice how Jupyter displays only the corners of the `DataFrame`:
much_data = np.fromfunction(lambda x,y: (x+y*y)%17*11, (10000, 26))
large_df = pd.DataFrame(much_data, columns=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
large_df[large_df % 16 == 0] = np.nan
large_df.insert(3,"some_text", "Blabla")
large_df
# The `head()` method returns the top 5 rows:
large_df.head()
# Of course there's also a `tail()` function to view the bottom 5 rows. You can pass the number of rows you want:
large_df.tail(n=2)
# The `info()` method prints out a summary of each columns contents:
large_df.info()
# Finally, the `describe()` method gives a nice overview of the main aggregated values over each column:
# * `count`: number of non-null (not NaN) values
# * `mean`: mean of non-null values
# * `std`: [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) of non-null values
# * `min`: minimum of non-null values
# * `25%`, `50%`, `75%`: 25th, 50th and 75th [percentile](https://en.wikipedia.org/wiki/Percentile) of non-null values
# * `max`: maximum of non-null values
large_df.describe()
# # Saving & loading
# Pandas can save `DataFrame`s to various backends, including file formats such as CSV, Excel, JSON, HTML and HDF5, or to a SQL database. Let's create a `DataFrame` to demonstrate this:
my_df = pd.DataFrame(
[["Biking", 68.5, 1985, np.nan], ["Dancing", 83.1, 1984, 3]],
columns=["hobby","weight","birthyear","children"],
index=["alice", "bob"]
)
my_df
# ## Saving
# Let's save it to CSV, HTML and JSON:
my_df.to_csv("my_df.csv")
my_df.to_html("my_df.html")
my_df.to_json("my_df.json")
# Done! Let's take a peek at what was saved:
for filename in ("my_df.csv", "my_df.html", "my_df.json"):
print("#", filename)
with open(filename, "rt") as f:
print(f.read())
print()
# Note that the index is saved as the first column (with no name) in a CSV file, as `<th>` tags in HTML and as keys in JSON.
#
# Saving to other formats works very similarly, but some formats require extra libraries to be installed. For example, saving to Excel requires the openpyxl library:
try:
my_df.to_excel("my_df.xlsx", sheet_name='People')
except ImportError as e:
print(e)
# ## Loading
# Now let's load our CSV file back into a `DataFrame`:
my_df_loaded = pd.read_csv("my_df.csv", index_col=0)
my_df_loaded
# As you might guess, there are similar `read_json`, `read_html`, `read_excel` functions as well. We can also read data straight from the Internet. For example, let's load all U.S. cities from [simplemaps.com](http://simplemaps.com/):
us_cities = None
try:
csv_url = "http://simplemaps.com/files/cities.csv"
us_cities = pd.read_csv(csv_url, index_col=0)
us_cities = us_cities.head()
except IOError as e:
print(e)
us_cities
# There are more options available, in particular regarding datetime format. Check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/io.html) for more details.
# # Combining `DataFrame`s
#
# ## SQL-like joins
# One powerful feature of pandas is it's ability to perform SQL-like joins on `DataFrame`s. Various types of joins are supported: inner joins, left/right outer joins and full joins. To illustrate this, let's start by creating a couple simple `DataFrame`s:
city_loc = pd.DataFrame(
[
["CA", "San Francisco", 37.781334, -122.416728],
["NY", "New York", 40.705649, -74.008344],
["FL", "Miami", 25.791100, -80.320733],
["OH", "Cleveland", 41.473508, -81.739791],
["UT", "Salt Lake City", 40.755851, -111.896657]
], columns=["state", "city", "lat", "lng"])
city_loc
city_pop = pd.DataFrame(
[
[808976, "San Francisco", "California"],
[8363710, "New York", "New-York"],
[413201, "Miami", "Florida"],
[2242193, "Houston", "Texas"]
], index=[3,4,5,6], columns=["population", "city", "state"])
city_pop
# Now let's join these `DataFrame`s using the `merge()` function:
pd.merge(left=city_loc, right=city_pop, on="city")
# Note that both `DataFrame`s have a column named `state`, so in the result they got renamed to `state_x` and `state_y`.
#
# Also, note that Cleveland, Salt Lake City and Houston were dropped because they don't exist in *both* `DataFrame`s. This is the equivalent of a SQL `INNER JOIN`. If you want a `FULL OUTER JOIN`, where no city gets dropped and `NaN` values are added, you must specify `how="outer"`:
all_cities = pd.merge(left=city_loc, right=city_pop, on="city", how="outer")
all_cities
# Of course `LEFT OUTER JOIN` is also available by setting `how="left"`: only the cities present in the left `DataFrame` end up in the result. Similarly, with `how="right"` only cities in the right `DataFrame` appear in the result. For example:
pd.merge(left=city_loc, right=city_pop, on="city", how="right")
# If the key to join on is actually in one (or both) `DataFrame`'s index, you must use `left_index=True` and/or `right_index=True`. If the key column names differ, you must use `left_on` and `right_on`. For example:
city_pop2 = city_pop.copy()
city_pop2.columns = ["population", "name", "state"]
pd.merge(left=city_loc, right=city_pop2, left_on="city", right_on="name")
# ## Concatenation
# Rather than joining `DataFrame`s, we may just want to concatenate them. That's what `concat()` is for:
result_concat = pd.concat([city_loc, city_pop])
result_concat
# Note that this operation aligned the data horizontally (by columns) but not vertically (by rows). In this example, we end up with multiple rows having the same index (eg. 3). Pandas handles this rather gracefully:
result_concat.loc[3]
# Or you can tell pandas to just ignore the index:
pd.concat([city_loc, city_pop], ignore_index=True)
# Notice that when a column does not exist in a `DataFrame`, it acts as if it was filled with `NaN` values. If we set `join="inner"`, then only columns that exist in *both* `DataFrame`s are returned:
pd.concat([city_loc, city_pop], join="inner")
# You can concatenate `DataFrame`s horizontally instead of vertically by setting `axis=1`:
pd.concat([city_loc, city_pop], axis=1)
# In this case it really does not make much sense because the indices do not align well (eg. Cleveland and San Francisco end up on the same row, because they shared the index label `3`). So let's reindex the `DataFrame`s by city name before concatenating:
pd.concat([city_loc.set_index("city"), city_pop.set_index("city")], axis=1)
# This looks a lot like a `FULL OUTER JOIN`, except that the `state` columns were not renamed to `state_x` and `state_y`, and the `city` column is now the index.
# The `append()` method is a useful shorthand for concatenating `DataFrame`s vertically:
city_loc.append(city_pop)
# As always in pandas, the `append()` method does *not* actually modify `city_loc`: it works on a copy and returns the modified copy.
# # Categories
# It is quite frequent to have values that represent categories, for example `1` for female and `2` for male, or `"A"` for Good, `"B"` for Average, `"C"` for Bad. These categorical values can be hard to read and cumbersome to handle, but fortunately pandas makes it easy. To illustrate this, let's take the `city_pop` `DataFrame` we created earlier, and add a column that represents a category:
city_eco = city_pop.copy()
city_eco["eco_code"] = [17, 17, 34, 20]
city_eco
# Right now the `eco_code` column is full of apparently meaningless codes. Let's fix that. First, we will create a new categorical column based on the `eco_code`s:
city_eco["economy"] = city_eco["eco_code"].astype('category')
city_eco["economy"].cat.categories
# Now we can give each category a meaningful name:
city_eco["economy"].cat.categories = ["Finance", "Energy", "Tourism"]
city_eco
# Note that categorical values are sorted according to their categorical order, *not* their alphabetical order:
city_eco.sort_values(by="economy", ascending=False)
| 01_basics_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="iGkZa4BK9dqo"
# # GammaRegressor with StandardScaler and PolynomialFeatures
# + [markdown] id="INm2ARiE9dqv"
# This code template is for the regression analysis using Gamma Regression via data rescaling technique StandardScaler and feature transformation technique PolynomialFeatures.
# + [markdown] id="KmyZtYF39dqw"
# ### Required Packages
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="N4Rv73nOuN1C" outputId="73760cab-9879-4eea-f128-928318dbfc2c"
# !pip install scikit-learn
# + id="O-XKOQMS9dqx"
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder,PolynomialFeatures, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.linear_model import GammaRegressor
warnings.filterwarnings('ignore')
# + [markdown] id="gnZZdF0Q9dqz"
# ### Initialization
#
# Filepath of CSV file
# + id="oUX6q5b99dq0"
#filepath
file_path= ""
# + [markdown] id="lBetrXuN9dq0"
# List of features which are required for model training .
# + id="bvgikwbX9dq1"
#x_values
features=[]
# + [markdown] id="qYKXD8gu9dq2"
# Target feature for prediction.
# + id="6SkMudaw9dq3"
#y_value
target=''
# + [markdown] id="SaD5H-zB9dq4"
# ### Data Fetching
#
# Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
#
# We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
# + id="EpW34yUm9dq4" outputId="f966808a-e73a-4587-c839-70fcfcb3323c"
df=pd.read_csv(file_path)
df.head()
# + [markdown] id="MZ7BUiW_9dq6"
# ### Feature Selections
#
# It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
#
# We will assign all the required input features to X and target/outcome to Y.
# + id="0J0bHRjt9dq6"
X=df[features]
Y=df[target]
# + [markdown] id="5yVBz06Y9dq7"
# ### Data Preprocessing
#
# Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
#
# + id="38Z1pH8a9dq7"
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
# + [markdown] id="_JZVwNT19dq8"
# Calling preprocessing functions on the feature and target set.
#
# + id="DXSAyhvE9dq8" outputId="13cd3d48-6941-4d13-d33d-f2c53b0dd8bf"
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
# + [markdown] id="wpUafOKB9dq9"
# #### Correlation Map
#
# In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
# + id="Ir7a_WRU9dq-" outputId="27fc4ab4-f9e7-4e70-ee9b-dbbe904d6708"
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
# + [markdown] id="LtmtLn_t9dq-"
# ### Data Splitting
#
# The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
# + id="treZmG_89dq_"
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
# + [markdown] id="P1V6Kqzf9dq_"
# ### Model
#
# Generalized Linear Model with a Gamma distribution.
#
# This regressor uses the ‘log’ link function.
#
# For Ref.
# https://scikit-learn.org/stable/modules/linear_model.html#generalized-linear-regression
# + [markdown] id="KkpT7-METQuk"
# ## Data Rescaling
#
# Standardize features by removing the mean and scaling to unit variance
#
# The standard score of a sample x is calculated as:
#
# z = (x - u) / s
#
# where u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.
#
# <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">More about StandardScaler</a>
# + [markdown] id="mYOHL7De9drA"
# #### Feature Transformation:
# Generate polynomial and interaction features.
#
# Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree.
#
# For more reference:-
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
# + id="YjcrmNGJ9drA" colab={"base_uri": "https://localhost:8080/"} outputId="9ea36ae7-2466-4e0d-bd16-4c1801fb615f"
model = make_pipeline(StandardScaler(), PolynomialFeatures(),GammaRegressor())
model.fit(x_train,y_train)
# + [markdown] id="XeI8NwVQ9drA"
#
# We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
#
# score: The score function returns the coefficient of determination R2 of the prediction.
#
# + id="gmKlwNUl9drB" outputId="cdfa473a-deec-4f12-b893-417d1b830848"
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
# + [markdown] id="GRtApNR69drB"
# > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
#
# > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
#
# > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
# + id="VAgblgNB9drB" outputId="6c0131f7-b1e3-4ec3-9900-51249b120927"
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
# + [markdown] id="9yiwhW8j9drC"
# #### Prediction Plot
#
# First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
# For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
# + id="vQ3yjYMG9drC" outputId="75235d34-0cb1-4eef-8787-826d0abdf0a6"
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
# + [markdown] id="l1Wu5fomgmu0"
# **creator: <NAME>, GitHub: [profile](https://github.com/viratchowdary21)**
#
| Regression/Linear Models/GammaRegressor_StandardScaler_PolynomialFeatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import json
import numpy as np
import pandas as pd
import pyproj
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import cm
from pysheds.grid import Grid
from matplotlib import colors
import seaborn as sns
import warnings
from swmmtoolbox import swmmtoolbox
from matplotlib.lines import Line2D
warnings.filterwarnings('ignore')
sns.set()
# %matplotlib inline
# +
ks = (1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 30, 35)
sns.set_palette('spring', len(ks))
fig = plt.figure(figsize=(14, 6))
gs = gridspec.GridSpec(2, 2)
ax0 = plt.subplot(gs[:, 0])
ax1 = plt.subplot(gs[0, 1])
ax2 = plt.subplot(gs[1, 1])
output_dir = '../data/out'
var = {}
maxes = {}
outfall = swmmtoolbox.extract('../data/out/uncontrolled_16.87mm.out',
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k=0'
outfall.plot(ax=ax0, color='k', linestyle='--', zorder=11, linewidth=2)
var[0] = outfall.var()
maxes[0] = outfall.max()
for k in ks:
outfall = swmmtoolbox.extract('../data/out/linear_k{0}_50pct_phi50_16.87mm.out'.format(k),
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k={0}'.format(k)
outfall.plot(ax=ax0, zorder=int(k), alpha=1, linewidth=2)
var[k] = outfall.var()
maxes[k] = outfall.max()
var = pd.Series(var).sort_index(ascending=False)
maxes = pd.Series(maxes).sort_index(ascending=False)
color_dict = dict(zip(ks, sns.color_palette('spring', len(ks))))
color_dict.update({0 : [0.0,0.0,0.0]})
colors = [color_dict[i] for i in var.index]
var.index = var.index.astype(str)
var.plot(ax=ax1, kind='bar', colors=colors, rot=0)
maxes.index = maxes.index.astype(str)
maxes.plot(ax=ax2, kind='bar', colors=colors, rot=0)
ax0.set_xlabel('Time', size=13)
ax2.set_xlabel('Number of controllers (k)', size=13)
ax0.legend(fontsize=12)
ax0.get_xaxis().set_ticklabels([])
ax1.get_xaxis().set_ticklabels([])
ax0.set_ylabel('Outlet discharge $(m^3/s)$', size=13)
ax0.minorticks_off()
ax0.set_title('Effect of number of controllers on hydrograph', size=14)
ax1.set_title('Flashiness (hydrograph variance)', size=14)
ax2.set_title('Peak discharge', size=14)
ax1.set_ylabel(r'Hydrograph variance $(m^3/s)^2$', size=13)
ax2.set_ylabel(r'Peak discharge ($m^3/s$)', size=13)
plt.tight_layout()
#plt.savefig('../img/hydrograph_num_controllers_vert.png', bbox_inches='tight', dpi=200)
# +
ks = (1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 30, 35)
sns.set_palette('inferno', len(ks))
fig = plt.figure(figsize=(12, 3.6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2.7])
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
output_dir = '../data/out'
var_l = {}
var_n = []
var_n_labels = []
outfall = swmmtoolbox.extract('../data/out/uncontrolled_16.87mm.out',
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k=0'
var_l[0] = outfall.var()
var_n.append(outfall.var())
var_n_labels.append(0)
for k in ks:
outfall = swmmtoolbox.extract('../data/out/linear_k{0}_50pct_phi50_16.87mm.out'.format(k),
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k={0}'.format(k)
var_l[k] = outfall.var()
for fn in os.listdir(output_dir):
if (('naive') in fn) and ('50pct' in fn) and ('16.87mm' in fn) and (not 'under' in fn):
outfall = swmmtoolbox.extract(os.path.join(output_dir, fn), 'system,Flow_leaving_outfalls,11').iloc[:,0]
k = int(fn.split('_k')[1].split('_')[0])
outfall.name = 'k={0}'.format(k)
var_n.append(outfall.var())
var_n_labels.append(k)
var_l = pd.Series(var_l).sort_index(ascending=False)
var_n = pd.Series(var_n, index=var_n_labels).sort_index(ascending=False)
color_dict = dict(zip(var_l.index, sns.color_palette('spring_r', len(var_l.index))))
color_dict.update({0 : [0.0,0.0,0.0]})
colors = [color_dict[i] for i in var_l.index]
var_l.plot(ax=ax0, kind='bar', colors=colors, rot=90)
color_dict = dict(zip(var_n.index, sns.color_palette('cool', len(var_n.index))))
color_dict.update({0 : [0.0,0.0,0.0]})
colors = [color_dict[i] for i in var_n.index]
var_n.plot(ax=ax1, kind='bar', colors=colors, rot=90)
ax0.set_title('Flashiness (optimized)', size=14)
ax1.set_title('Flashiness (randomized)', size=14)
ax0.set_ylabel(r'Hydrograph variance $(m^3/s)^2$', size=12)
ax0.set_xlabel('Number of controllers (k)', size=12, labelpad=8)
ax1.set_xlabel('Number of controllers (k)', size=12, labelpad=8)
ax1.yaxis.set_ticklabels([])
plt.tight_layout()
#plt.savefig('../img/num_controllers_comparison_var.png', bbox_inches='tight', dpi=200)
# +
ks = (1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 30, 35)
sns.set_palette('inferno', len(ks))
fig = plt.figure(figsize=(12, 3.6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2.7])
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
output_dir = '../data/out'
max_l = {}
max_n = []
max_n_labels = []
outfall = swmmtoolbox.extract('../data/out/uncontrolled_diff_1.5in.out',
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k=0'
max_l[0] = outfall.max()
max_n.append(outfall.max())
max_n_labels.append(0)
for k in ks:
outfall = swmmtoolbox.extract('../data/out/linear_k{0}_50pct_phi10_diff_1.5in.out'.format(k),
'system,Flow_leaving_outfalls,11').iloc[:,0]
outfall.name = 'k={0}'.format(k)
max_l[k] = outfall.max()
for fn in os.listdir(output_dir):
if (('naive') in fn) and ('50pct' in fn) and ('1.5in' in fn):
outfall = swmmtoolbox.extract(os.path.join(output_dir, fn), 'system,Flow_leaving_outfalls,11').iloc[:,0]
k = int(fn.split('_k')[1].split('_')[0])
outfall.name = 'k={0}'.format(k)
max_n.append(outfall.max())
max_n_labels.append(k)
max_l = pd.Series(max_l).sort_index(ascending=False)
max_n = pd.Series(max_n, index=max_n_labels).sort_index(ascending=False)
color_dict = dict(zip(max_l.index, sns.color_palette('spring_r', len(max_l.index))))
color_dict.update({0 : [0.0,0.0,0.0]})
colors = [color_dict[i] for i in max_l.index]
max_l.plot(ax=ax0, kind='bar', colors=colors, rot=90)
color_dict = dict(zip(max_n.index, sns.color_palette('cool', len(max_n.index))))
color_dict.update({0 : [0.0,0.0,0.0]})
colors = [color_dict[i] for i in max_n.index]
max_n.plot(ax=ax1, kind='bar', colors=colors, rot=90)
ax0.set_title('Peak discharge (optimized)', size=14)
ax1.set_title('Peak discharge (randomized)', size=14)
ax0.set_ylabel(r'Peak discharge ($m^3/s$)', size=12)
ax0.set_xlabel('Number of controllers (k)', size=12, labelpad=8)
ax1.set_xlabel('Number of controllers (k)', size=12, labelpad=8)
ax1.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig('../img/num_controllers_comparison_max.png', bbox_inches='tight', dpi=200)
# -
| notebooks/FIG_num_controllers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="nX_W0YPqzyRc" colab_type="text"
# # Analysis of App Profiles for the App Store and Google Play Markets
#
# My aim in this project is to find mobile app profiles that are profitable for the App Store and Google Play markets. My job is to enable mobile app developers to make data-driven decisions with respect to the kind of apps they build.
#
# I will suppose that the main source of revenue consists of in-app ads. This means that the revenue for any given app is mostly influenced by the number of users that use the app.
#
# My goal for this project is to analyze data to help developers understand what kinds of apps are likely to attract more users.
# + [markdown] id="nj3qhQSnzyRg" colab_type="text"
# ## Opening and Exploring the Data
#
# As of January 2020, there were approximately 2 million iOS apps available on the App Store, and 2.1 million Android apps on Google Play.
#
# Collecting data for over four million apps requires a significant amount of time and money, so we'll try to analyze a sample of data instead. To avoid spending resources with collecting new data ourselves, we should first try to see whether we can find any relevant existing data at no cost. Luckily, these are two data sets that seem suitable for our purpose:
#
# - [A data set](https://www.kaggle.com/lava18/google-play-store-apps) containing data about approximately ten thousand Android apps from Google Play. You can download the data set directly from [this link](https://dq-content.s3.amazonaws.com/350/googleplaystore.csv).
# - [A data set](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps) containing data about approximately seven thousand iOS apps from the App Store. You can download the data set directly from [this link](https://dq-content.s3.amazonaws.com/350/AppleStore.csv).
#
# Let's start by opening the two data sets and then continue with exploring the data.
# + id="ZNzXBSALzyRl" colab_type="code" colab={}
from csv import reader
# Google Play dataset
with open("googleplaystore.csv") as f:
android_data = list(reader(f))
# Apple Store dataset
with open("AppleStore.csv") as f:
ios_data = list(reader(f))
android_cols = android_data[0]
android_data = android_data[1:]
ios_cols = ios_data[0]
ios_data = ios_data[1:]
# + [markdown] id="aVNUM6G2zyRr" colab_type="text"
# To make it easier to explore the two data sets, I'll first write a function named `explore_data()` that I can use repeatedly to explore rows in a more readable way. I'll also add an option for our function to show the number of rows and columns for any data set.
# + id="SiW2GuwezyRs" colab_type="code" colab={}
def explore_data(dataset, start, end, rows_and_columns=False):
dataset_slice = dataset[start:end]
for row in dataset_slice:
print(row)
print('\n') # adds a new (empty) line after each row
if rows_and_columns:
print('Number of rows:', len(dataset))
print('Number of columns:', len(dataset[0]))
# + [markdown] id="vTlm-FjczyRv" colab_type="text"
# Now let's take a look at the first few rows of each dataset.
# + id="FoMhUn5SzyRw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 642} outputId="bde2d770-9092-4397-bb85-01a88a097fce"
n_rows = 3
print("First {} rows of the".format(n_rows), "Google Play Store", "dataset\n")
print(android_cols)
print("\n")
explore_data(android_data, 0, n_rows, True)
print("\n")
print("\n")
print("First {} rows of the".format(n_rows), "Apple Store", "dataset\n")
print(ios_cols)
print("\n")
explore_data(ios_data, 0, n_rows, True)
# + [markdown] id="UE3vWssczyR0" colab_type="text"
# The Google Play data set has 10841 apps and 13 columns. At a quick glance, the columns that might be useful for the purpose of our analysis are `'App'`, `'Category'`, `'Reviews'`, `'Installs'`, `'Type'`, `'Price'`, and `'Genres'`.
#
# We have 7197 iOS apps in this data set, and the columns that seem interesting are: `'track_name'`, `'currency'`, `'price'`, `'rating_count_tot'`, `'rating_count_ver'`, and `'prime_genre'`. Not all column names are self-explanatory in this case, but details about each column can be found in the data set [documentation](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps/home).
# + [markdown] id="XEkJ769CzyR6" colab_type="text"
# ## Data Cleaning
#
# Now we need to process the data to make some analysis.
#
# ### Deleting Wrong Data
#
# I build a function called `clean_dataset()` to analyze row by row the data set and print an error when there is a missing element and eventually remove the row.
# + id="FrEFsxP_zyR7" colab_type="code" colab={}
def clean_dataset(data, clean=False):
LEN_COL_DATA = len(data[0])
LEN_DATA = len(data)
for idx, row in enumerate(data):
len_row = len(row)
if len_row != LEN_COL_DATA:
print("Row number", len_row, "contains missing values\n")
if clean:
del data[idx]
print("Removed bad row\n")
if LEN_DATA == len(data):
print("No bad rows!\n")
# + [markdown] id="lt_Y-spPzyR-" colab_type="text"
# Now I'm looking on the entire data sets to see if there are some missing values.
#
#
# + id="egbI7oCjzyR_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="c556537c-d207-443c-ca41-1507969a742d"
print("Google Play Store", "dataset\n")
clean_dataset(android_data, True)
print("\n")
print("Apple Store", "dataset\n")
clean_dataset(ios_data, True)
# + [markdown] id="ogtsjo0kzySC" colab_type="text"
# ### Removing Duplicate Entries
#
# Let's build a function called `duplicate_in_dataset()` to analyze row by row the data set and tell us if there are some duplicate rows.
# + id="auCVio7ZzySD" colab_type="code" colab={}
def duplicate_in_dataset(data, NAME_COL=0):
duplicate_apps = []
unique_apps = []
for app in data:
name = app[NAME_COL]
if name in unique_apps:
duplicate_apps.append(name)
else:
unique_apps.append(name)
print("Number of duplicate apps:", len(duplicate_apps))
print("\n")
print("Examples of duplicate apps:", duplicate_apps[:15])
# + [markdown] id="Wk_wRbcyzySI" colab_type="text"
# For the *Google Play Store* the column that contains the name of the APP is *App* at index 0.
#
# For the *Apple Store* the column that contains the name of the APP is *track_name* at index 1.
# + id="IsGRT84FzySK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="8a3e4202-d23a-46d3-f0ff-44fb940a48b4"
print("Google Play Store", "dataset\n")
duplicate_in_dataset(android_data, 0)
print("\n")
print("Apple Store", "dataset\n")
duplicate_in_dataset(ios_data, 1)
# + [markdown] id="t3T74HO0zySQ" colab_type="text"
# We don't want to count certain apps more than once when we analyze data, so we need to remove the duplicate entries and keep only one entry per app. One thing we could do is remove the duplicate rows randomly, but we could probably find a better way.
#
# The main difference happens on the number of reviews. The different numbers show that the data was collected at different times. We can use this to build a criterion for keeping rows. We won't remove rows randomly, but rather we'll keep the rows that have the highest number of reviews because the higher the number of reviews, the more reliable the ratings.
#
# To do that, we will:
#
# - Create a dictionary where each key is a unique app name, and the value is the highest number of reviews of that app
# - Use the dictionary to create a new data set, which will have only one entry per app (and we only select the apps with the highest number of reviews)
#
# Let's build a function `remove_duplicates()` to do this.
# + id="hH8yXBRpzySS" colab_type="code" colab={}
def remove_duplicates(data, NAME_COL, RATING_COL):
# create an empty dict to store unique APPs rows
dict_rows = {}
for row in data:
# save name and rating for next comparison
name = row[NAME_COL]
rating = float(row[RATING_COL])
# if we don't have already a row for that app save it
if name not in dict_rows:
dict_rows[name] = row
# else compare the rating stored to check if its greater
elif rating > float(dict_rows[name][RATING_COL]):
dict_rows[name] = row
# finally merge all the rows stored as a new dataset
data_new = list(dict_rows.values())
return data_new
# + [markdown] id="nFzR0RTwzySV" colab_type="text"
# For the *Google Play Store* the column that contains the number of ratings of the APP is *Reviews* at index 3.
#
# For the *Apple Store* the column that contains the number of ratings of the APP is *rating_count_tot* at index 5.
# + id="bDYqAIqNzySW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="d11e1bfd-a054-4544-e10e-77bdb1b0aa8c"
print("Google Play Store", "dataset\n")
android_clean = remove_duplicates(android_data, 0, 3)
print("Removed:", len(android_data) - len(android_clean),"rows\n")
print("\n")
print("Apple Store", "dataset\n")
ios_clean = remove_duplicates(ios_data, 1, 5)
print("Removed:", len(ios_data) - len(ios_clean),"rows\n")
# + [markdown] id="ioLk2sUSzySa" colab_type="text"
# ### Removing Non-English Apps
#
# If you explore the data sets enough, you'll notice the names of some of the apps suggest they are not directed toward an English-speaking audience.
#
# We're not interested in keeping these kind of apps, so we'll remove them. One way to go about this is to remove each app whose name contains a symbol that is not commonly used in English text — English text usually includes letters from the English alphabet, numbers composed of digits from 0 to 9, punctuation marks (., !, ?, ;, etc.), and other symbols (+, *, /, etc.).
#
# All these characters that are specific to English texts are encoded using the ASCII standard. Each ASCII character has a corresponding number between 0 and 127 associated with it, and we can take advantage of that to build a function `normal_string()` that checks an app name and tells us whether it contains non-ASCII characters more than a fixed thereshold.
#
# We built this function below, and we use the built-in `ord()` function to find out the corresponding encoding number of each character.
# + id="jxPW_xoVzySb" colab_type="code" colab={}
def normal_string(string, LIMIT=3):
count = 0
for a in string:
if ord(a) > 127: # it is a non-English character
count += 1
if count > LIMIT:
return False
# if it has finished the for loop there aren't non-English characters
return True
# + [markdown] id="I-II4VNuzySh" colab_type="text"
# Check the output of the function for some examples:
# + id="L2GadvB6zySi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="ac32dbe4-fc9e-4201-db36-745465b01f16"
print(normal_string("Instagram"))
print(normal_string("爱奇艺PPS -《欢乐颂2》电视剧热播"))
print(normal_string("Docs To Go™ Free Office Suite"))
print(normal_string("Instachat 😜"))
# + [markdown] id="PdNcNamgzySl" colab_type="text"
# Let's build a function called `english_dataset()` to create a new dataset containing only English apps using the `normal_string()` function.
# + id="9C_eTjFezySm" colab_type="code" colab={}
def english_dataset(data, NAME_COL=0):
data_new = []
for row in data:
name = row[NAME_COL]
if normal_string(name):
data_new.append(row)
return data_new
# + [markdown] id="m9v22wUqzySp" colab_type="text"
# The function is still not perfect, and very few non-English apps might get past our filter, but this seems good enough at this point in our analysis — we shouldn't spend too much time on optimization at this point.
#
# Use the above new function `english_dataset()` to create new datasets.
# + id="eICu3tPYzySq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="fe14dac4-743d-4b12-a978-ce57cd99ec6b"
print("Google Play Store", "dataset\n")
android_clean_english = english_dataset(android_clean, 0)
print("Removed:", len(android_clean) - len(android_clean_english),"non-English apps\n")
print("\n")
print("Apple Store", "dataset\n")
ios_clean_english = english_dataset(ios_clean, 1)
print("Removed:", len(ios_clean) - len(ios_clean_english),"non-English apps\n")
# + [markdown] id="ubJDzVmYzySt" colab_type="text"
# ### Isolating the Free Apps
#
# As we mentioned in the introduction, we only study apps that are free to download and install, and our main source of revenue consists of in-app ads. Our data sets contain both free and non-free apps, and we'll need to isolate only the free apps for our analysis. Below, we isolate the free apps for both our data sets.
#
# Let's build a function called `free_dataset()` to create a new dataset containing only free apps.
# + id="Q4G3kvKDzySv" colab_type="code" colab={}
def free_dataset(data, PRICE_COL):
data_new = []
for row in data:
# check if the APP is free
if row[PRICE_COL] == "0.0" or row[PRICE_COL] == "0":
data_new.append(row)
return data_new
# + [markdown] id="MEOLV7glzySy" colab_type="text"
# For the *Google Play Store* the column that contains the price of the APP is *Price* at index 7.
#
# For the *Apple Store* the column that contains the price of the APP is *price* at index 4.
# + id="j2KnyyQLzySz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="6c9b128f-2a6a-4522-fe0c-723331963450"
print("Google Play Store", "dataset\n")
android_final = free_dataset(android_clean_english, 7)
print("Removed:", len(android_clean_english) - len(android_final),"paid apps\n")
print("\n")
print("Apple Store", "dataset\n")
ios_final = free_dataset(ios_clean_english, 4)
print("Removed:", len(ios_clean_english) - len(ios_final),"paid apps\n")
# + [markdown] id="4Uo4zeGdzyS2" colab_type="text"
# Final datasets:
# + id="gz5H5Hv7zyS2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 507} outputId="0fa7acf6-690e-464a-f399-52fb6a2b4985"
print("First {} rows of the".format(n_rows), "Google Play Store", "dataset\n")
explore_data(android_final, 0, n_rows, True)
print("\n")
print("First {} rows of the".format(n_rows), "Apple Store", "dataset\n")
explore_data(ios_final, 0, n_rows, True)
# + [markdown] id="sOPAojtFzyS4" colab_type="text"
# ## Data Analysis
#
# + [markdown] id="aaAqbsA2Dtk7" colab_type="text"
# ### Most Common Apps by Genre
#
# As I mentioned in the introduction, my aim is to determine the kinds of apps that are likely to attract more users because our revenue is highly influenced by the number of people using the apps.
#
# To minimize risks and overhead, the validation strategy for an app idea is comprised of three steps:
#
# 1. Build a minimal Android version of the app, and add it to Google Play.
# 2. If the app has a good response from users, develop it further.
# 3. If the app is profitable after six months, also build an iOS version of the app and add it to the App Store.
#
# Because the end goal is to add the app on both the App Store and Google Play, we need to find app profiles that are successful on both markets. For instance, a profile that might work well for both markets might be a productivity app that makes use of gamification.
#
# Let's begin the analysis by getting a sense of the most common genres for each market. For this, we'll build a frequency table for the `prime_genre` column of the App Store data set, and the `Genres` and `Category` columns of the Google Play data set.
#
# I'll build two functions we can use to analyze the frequency tables:
#
# - One function to generate frequency tables that show percentages
# - Another function that we can use to display the percentages in a descending order
# + id="zTEHShVEzyS6" colab_type="code" colab={}
def freq_table(dataset, index):
table = {}
for row in dataset:
val = row[index]
if val in table:
table[val] += 1
else:
table[val] = 1
return table
def display_table(dataset, index):
table = freq_table(dataset, index)
table_display = []
for key in table:
key_val_as_tuple = (table[key], key)
table_display.append(key_val_as_tuple)
table_sorted = sorted(table_display, reverse = True)
for entry in table_sorted:
print(entry[1], ':', entry[0])
# + [markdown] id="QqaJpm9qDC9Y" colab_type="text"
# We start by examining the frequency table for the `prime_genre` column of the App Store data set.
# + id="jc1EmHq-zyS9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="1ca7be5b-bdd0-480a-d1d8-1b546389e6b7"
display_table(ios_final, 11)
# + [markdown] id="XpGySRstDIJx" colab_type="text"
# We can see that among the free English apps, more than a half (58.16%) are games. Entertainment apps are close to 8%, followed by photo and video apps, which are close to 5%. Only 3.66% of the apps are designed for education, followed by social networking apps which amount for 3.29% of the apps in our data set.
#
# The general impression is that App Store (at least the part containing free English apps) is dominated by apps that are designed for fun (games, entertainment, photo and video, social networking, sports, music, etc.), while apps with practical purposes (education, shopping, utilities, productivity, lifestyle, etc.) are more rare. However, the fact that fun apps are the most numerous doesn't also imply that they also have the greatest number of users — the demand might not be the same as the offer.
#
# Let's continue by examining the `Genres` and `Category` columns of the Google Play data set (two columns which seem to be related).
# + id="V-pUsqpxzyTA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 571} outputId="e93ce24c-6acd-46a4-f7f3-844937f8b252"
display_table(android_final, 1) # Category
# + [markdown] id="fCjmPqbkDQZD" colab_type="text"
# The landscape seems significantly different on Google Play: there are not that many apps designed for fun, and it seems that a good number of apps are designed for practical purposes (family, tools, business, lifestyle, productivity, etc.). However, if we investigate this further, we can see that the family category (which accounts for almost 19% of the apps) means mostly games for kids.
#
#
# Even so, practical apps seem to have a better representation on Google Play compared to App Store. This picture is also confirmed by the frequency table we see for the `Genres` column:
# + id="zXyPnGArzyTC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4f67c657-403a-45de-a037-6a45faf9c68c"
display_table(android_final, 9) # Genres
# + [markdown] id="kqdqkNr9DbfE" colab_type="text"
# The difference between the `Genres` and the `Category` columns is not crystal clear, but one thing we can notice is that the `Genres` column is much more granular (it has more categories). We're only looking for the bigger picture at the moment, so we'll only work with the `Category` column moving forward.
#
# Up to this point, we found that the App Store is dominated by apps designed for fun, while Google Play shows a more balanced landscape of both practical and for-fun apps. Now we'd like to get an idea about the kind of apps that have most users.
# + [markdown] id="4itFZkw4DgM_" colab_type="text"
# ### Most Popular Apps by Genre on the App Store
#
# One way to find out what genres are the most popular (have the most users) is to calculate the average number of installs for each app genre. For the Google Play data set, we can find this information in the `Installs` column, but for the App Store data set this information is missing. As a workaround, we'll take the total number of user ratings as a proxy, which we can find in the `rating_count_tot` app.
#
# Below, we calculate the average number of user ratings per app genre on the App Store:
# + id="kGAYQ7O3zyTF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="32776e0f-ff26-4bef-d471-0455bee5735f"
ios_genre_freq_table = freq_table(ios_final, 11)
for genre in ios_genre_freq_table:
total = 0
len_genre = 0
for row in ios_final:
if row[11] == genre:
total += float(row[5])
len_genre += 1
print(genre, ":", round(total/len_genre))
# + [markdown] id="VVlrDyBpD-H1" colab_type="text"
# On average, navigation apps have the highest number of user reviews, but this figure is heavily influenced by Waze and Google Maps, which have close to half a million user reviews together:
# + colab_type="code" id="rdNDNVbVdw4X" outputId="4d9f8744-a164-4ac0-a56c-ba8eb5ecf6f9" colab={"base_uri": "https://localhost:8080/", "height": 118}
for app in ios_final:
if app[-5] == 'Navigation':
print(app[1], ':', app[5]) # print name and number of ratings
# + [markdown] colab_type="text" id="55kQqN_qdw4Z"
# The same pattern applies to social networking apps, where the average number is heavily influenced by a few giants like Facebook, Pinterest, Skype, etc. Same applies to music apps, where a few big players like Pandora, Spotify, and Shazam heavily influence the average number.
#
# Our aim is to find popular genres, but navigation, social networking or music apps might seem more popular than they really are. The average number of ratings seem to be skewed by very few apps which have hundreds of thousands of user ratings, while the other apps may struggle to get past the 10,000 threshold. We could get a better picture by removing these extremely popular apps for each genre and then rework the averages, but we'll leave this level of detail for later.
#
# Reference apps have 74,942 user ratings on average, but it's actually the Bible and Dictionary.com which skew up the average rating:
# + colab_type="code" id="T2H7TxY6dw4a" outputId="7c81bbba-365c-47bc-e8cf-64beec498733" colab={"base_uri": "https://localhost:8080/", "height": 319}
for app in ios_final:
if app[-5] == 'Reference':
print(app[1], ':', app[5])
# + [markdown] colab_type="text" id="RClnbNGSdw4d"
# However, this niche seems to show some potential. One thing we could do is take another popular book and turn it into an app where we could add different features besides the raw version of the book. This might include daily quotes from the book, an audio version of the book, quizzes about the book, etc. On top of that, we could also embed a dictionary within the app, so users don't need to exit our app to look up words in an external app.
#
# This idea seems to fit well with the fact that the App Store is dominated by for-fun apps. This suggests the market might be a bit saturated with for-fun apps, which means a practical app might have more of a chance to stand out among the huge number of apps on the App Store.
#
# Other genres that seem popular include weather, book, food and drink, or finance. The book genre seem to overlap a bit with the app idea we described above, but the other genres don't seem too interesting to us:
#
# - Weather apps — people generally don't spend too much time in-app, and the chances of making profit from in-app adds are low. Also, getting reliable live weather data may require us to connect our apps to non-free APIs.
#
# - Food and drink — examples here include Starbucks, Dunkin' Donuts, McDonald's, etc. So making a popular food and drink app requires actual cooking and a delivery service, which is outside the scope of our company.
#
# - Finance apps — these apps involve banking, paying bills, money transfer, etc. Building a finance app requires domain knowledge, and we don't want to hire a finance expert just to build an app.
#
# Now let's analyze the Google Play market a bit.
# + [markdown] id="fkw_5EcKEM2p" colab_type="text"
# ### Most Popular Apps by Genre on Google Play
#
# For the Google Play market, we actually have data about the number of installs, so we should be able to get a clearer picture about genre popularity. However, the install numbers don't seem precise enough — we can see that most values are open-ended (100+, 1,000+, 5,000+, etc.):
# + colab_type="code" id="k9XN-3Oxdw4e" outputId="4c059bef-c6a1-4c8b-8be2-a65f8fb32676" colab={"base_uri": "https://localhost:8080/", "height": 370}
display_table(android_final, 5) # the Installs columns
# + [markdown] colab_type="text" id="MS8oEkvcdw4g"
# One problem with this data is that is not precise. For instance, we don't know whether an app with 100,000+ installs has 100,000 installs, 200,000, or 350,000. However, we don't need very precise data for our purposes — we only want to get an idea which app genres attract the most users, and we don't need perfect precision with respect to the number of users.
#
# We're going to leave the numbers as they are, which means that we'll consider that an app with 100,000+ installs has 100,000 installs, and an app with 1,000,000+ installs has 1,000,000 installs, and so on.
#
# To perform computations, however, we'll need to convert each install number to `float` — this means that we need to remove the commas and the plus characters, otherwise the conversion will fail and raise an error. We'll do this directly in the loop below, where we also compute the average number of installs for each genre (category).
# + colab_type="code" outputId="68579b74-d620-445b-eccb-8c66726673a7" id="RUs1BELB42qM" colab={"base_uri": "https://localhost:8080/", "height": 571}
android_category_freq_table = freq_table(android_final, 1)
for category in android_category_freq_table:
total = 0
len_category = 0
for row in android_final:
if row[1] == category:
total += float(row[5].replace(",", "").replace("+", ""))
len_category += 1
print(category, ":", round(total/len_category))
# + [markdown] colab_type="text" id="jmJZ_ZNAdw4m"
# On average, communication apps have the most installs: 38,456,119. This number is heavily skewed up by a few apps that have over one billion installs (WhatsApp, Facebook Messenger, Skype, Google Chrome, Gmail, and Hangouts), and a few others with over 100 and 500 million installs:
# + colab_type="code" id="yDgqLB3xdw4p" outputId="8b5839ac-aac6-4396-85ff-4a816a258717" colab={"base_uri": "https://localhost:8080/", "height": 470}
for app in android_final:
if app[1] == 'COMMUNICATION' and (app[5] == '1,000,000,000+'
or app[5] == '500,000,000+'
or app[5] == '100,000,000+'):
print(app[0], ':', app[5])
# + [markdown] colab_type="text" id="GkB0Ewlsdw47"
# If we removed all the communication apps that have over 100 million installs, the average would be reduced roughly ten times:
# + colab_type="code" id="GVS8X24bdw48" outputId="0eaeadd0-c078-4f0b-fba5-a21dc4fb7d34" colab={"base_uri": "https://localhost:8080/", "height": 34}
under_100_m = []
for app in android_final:
n_installs = app[5]
n_installs = n_installs.replace(',', '')
n_installs = n_installs.replace('+', '')
if (app[1] == 'COMMUNICATION') and (float(n_installs) < 100000000):
under_100_m.append(float(n_installs))
sum(under_100_m) / len(under_100_m)
# + [markdown] colab_type="text" id="VGf0n6cydw5A"
# We see the same pattern for the video players category, which is the runner-up with 24,727,872 installs. The market is dominated by apps like Youtube, Google Play Movies & TV, or MX Player. The pattern is repeated for social apps (where we have giants like Facebook, Instagram, Google+, etc.), photography apps (Google Photos and other popular photo editors), or productivity apps (Microsoft Word, Dropbox, Google Calendar, Evernote, etc.).
#
# Again, the main concern is that these app genres might seem more popular than they really are. Moreover, these niches seem to be dominated by a few giants who are hard to compete against.
#
# The game genre seems pretty popular, but previously we found out this part of the market seems a bit saturated, so we'd like to come up with a different app recommendation if possible.
#
# The books and reference genre looks fairly popular as well, with an average number of installs of 8,767,811. It's interesting to explore this in more depth, since we found this genre has some potential to work well on the App Store, and our aim is to recommend an app genre that shows potential for being profitable on both the App Store and Google Play.
#
# Let's take a look at some of the apps from this genre and their number of installs:
# + colab_type="code" id="EsjrE2Nudw5B" outputId="bc245ebd-ea99-4aaa-9f67-4fc94050f24b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
for app in android_final:
if app[1] == 'BOOKS_AND_REFERENCE':
print(app[0], ':', app[5])
# + [markdown] colab_type="text" id="j4vZcL-Udw5G"
# The book and reference genre includes a variety of apps: software for processing and reading ebooks, various collections of libraries, dictionaries, tutorials on programming or languages, etc. It seems there's still a small number of extremely popular apps that skew the average:
# + colab_type="code" id="sPYrMGhKdw5H" outputId="95ec4ea1-8b98-422f-ec55-2dd7ad72cb20" colab={"base_uri": "https://localhost:8080/", "height": 101}
for app in android_final:
if app[1] == 'BOOKS_AND_REFERENCE' and (app[5] == '1,000,000,000+'
or app[5] == '500,000,000+'
or app[5] == '100,000,000+'):
print(app[0], ':', app[5])
# + [markdown] colab_type="text" id="jGuGD7ODdw5K"
# However, it looks like there are only a few very popular apps, so this market still shows potential. Let's try to get some app ideas based on the kind of apps that are somewhere in the middle in terms of popularity (between 1,000,000 and 100,000,000 downloads):
# + colab_type="code" id="s9pL6QCddw5K" outputId="c6b8fa6c-42a1-410a-b87c-c0d93dd3f0a8" colab={"base_uri": "https://localhost:8080/", "height": 823}
for app in android_final:
if app[1] == 'BOOKS_AND_REFERENCE' and (app[5] == '1,000,000+'
or app[5] == '5,000,000+'
or app[5] == '10,000,000+'
or app[5] == '50,000,000+'):
print(app[0], ':', app[5])
# + [markdown] colab_type="text" id="MnArt0Mbdw5M"
# This niche seems to be dominated by software for processing and reading ebooks, as well as various collections of libraries and dictionaries, so it's probably not a good idea to build similar apps since there'll be some significant competition.
#
# We also notice there are quite a few apps built around the book Quran, which suggests that building an app around a popular book can be profitable. It seems that taking a popular book (perhaps a more recent book) and turning it into an app could be profitable for both the Google Play and the App Store markets.
#
# However, it looks like the market is already full of libraries, so we need to add some special features besides the raw version of the book. This might include daily quotes from the book, an audio version of the book, quizzes on the book, a forum where people can discuss the book, etc.
# + [markdown] id="VZcWb5Q7EvQp" colab_type="text"
# ## Conclusions
#
# In this project, we analyzed data about the App Store and Google Play mobile apps with the goal of recommending an app profile that can be profitable for both markets.
#
# We concluded that taking a popular book (perhaps a more recent book) and turning it into an app could be profitable for both the Google Play and the App Store markets. The markets are already full of libraries, so we need to add some special features besides the raw version of the book. This might include daily quotes from the book, an audio version of the book, quizzes on the book, a forum where people can discuss the book, etc.
| Analysis_of_App_Profiles_for_the_App_Store_and_Google_Play_Markets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
from classMVA import MVA
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] deletable=true editable=true
# # DATOS REALES. IMAGEN POR SATÉLITE
# + [markdown] deletable=true editable=true
#
# # 1. VALIDACIÓN SIN REGULARIZACIÓN PARA PCA
# + [markdown] deletable=true editable=true
# En primer lugar cargamos los datos:
# + deletable=true editable=true
import urllib2
from pyspark.mllib.regression import LabeledPoint
response = urllib2.urlopen("https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/satimage/sat.trn")
textdata = response.read()
datos = textdata.split('\n')
# + [markdown] deletable=true editable=true
# ## 1.1 PCA CON SCIKIT LEARN
# + [markdown] deletable=true editable=true
# Creamos un numpy array con las variables y normalizamos eliminando la media
# + deletable=true editable=true
data_1_sk = np.array(map(lambda x: np.array(map(int, x.split(' ')[:-1])), datos[:-1]))
media = np.mean(data_1_sk, axis=0)
X_pca = data_1_sk - media
# + [markdown] deletable=true editable=true
# Con PCA de ScikitLearn extraemos 15 componentes y loa proyectamos
# + deletable=true editable=true
from sklearn.decomposition import PCA
nprojections = 15
pca = PCA(n_components=nprojections)
pca.fit(X_pca)
X_projected_sk = pca.transform(X_pca)
# + [markdown] deletable=true editable=true
# ## 1.2 PCA CON LA TOOLBOX
# + [markdown] deletable=true editable=true
# Creamos un RDD de Labeled Points para PCA de la Toolbox y entrenamos el modelo para 15 componentes
# + deletable=true editable=true
RDD = sc.parallelize(textdata.split('\n')[:-1]).map(lambda line: line.split(" "))
RDD_labeled=RDD.map(lambda x: LabeledPoint(x[-1],x[0:-1]))
RDD_PCA = RDD_labeled.map(lambda x: x.features)
# + deletable=true editable=true
print RDD_PCA.count()
# + deletable=true editable=true
PCA_model = MVA('PCA','none','None',1e-5,nprojections,1e-8,1e-5,100,30)
PCA_model.fit(RDD_PCA)
# + [markdown] deletable=true editable=true
# ## 1.3 VISUALIZACIÓN
# + [markdown] deletable=true editable=true
# Dibujamos los vectores de proyección para las dos soluciones: en azul para PCA y en rojo para la Toolbox
# + deletable=true editable=true
fig1 = plt.figure()
ax1 = fig1.add_subplot(331)
ax1.plot(pca.components_[0,], 'b-')
ax1 = fig1.add_subplot(332)
ax1.plot(pca.components_[1,], 'b-')
ax1 = fig1.add_subplot(333)
ax1.plot(pca.components_[2,], 'b-')
ax1 = fig1.add_subplot(334)
ax1.plot(pca.components_[3,], 'b-')
ax1 = fig1.add_subplot(335)
ax1.plot(pca.components_[4,], 'b-')
ax1 = fig1.add_subplot(336)
ax1.plot(pca.components_[5,], 'b-')
ax1 = fig1.add_subplot(337)
ax1.plot(pca.components_[6,], 'b-')
ax1 = fig1.add_subplot(338)
ax1.plot(pca.components_[7,], 'b-')
ax1 = fig1.add_subplot(339)
ax1.plot(pca.components_[8,], 'b-')
fig2 = plt.figure()
ax1 = fig2.add_subplot(331)
ax1.plot(PCA_model._U[0,], 'r-')
ax1 = fig2.add_subplot(332)
ax1.plot(PCA_model._U[1,], 'r-')
ax1 = fig2.add_subplot(333)
ax1.plot(PCA_model._U[2,], 'r-')
ax1 = fig2.add_subplot(334)
ax1.plot(PCA_model._U[3,], 'r-')
ax1 = fig2.add_subplot(335)
ax1.plot(PCA_model._U[4,], 'r-')
ax1 = fig2.add_subplot(336)
ax1.plot(PCA_model._U[5,], 'r-')
ax1 = fig2.add_subplot(337)
ax1.plot(PCA_model._U[6,], 'r-')
ax1 = fig2.add_subplot(338)
ax1.plot(PCA_model._U[7,], 'r-')
ax1 = fig2.add_subplot(339)
ax1.plot(PCA_model._U[8,], 'r-')
plt.show()
# + [markdown] deletable=true editable=true
# ## 1.4 CÁLCULO DE MSE
# + deletable=true editable=true
from sklearn.linear_model import LinearRegression
mses_sk = list()
n_targets = 36
for np2 in range(nprojections):
mse = 0
X_components_sk = X_projected_sk[:,:np2+1]
for k in range(n_targets):
targets = X_pca[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_sk, targets)
predictions = lm.predict(X_components_sk)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_sk.append(mse)
# + deletable=true editable=true
from sklearn.linear_model import LinearRegression
X_projected_tb = np.array(PCA_model.predict(RDD_PCA).collect())
X_pca = RDD_PCA.collect()
media = np.mean(X_pca, axis=0)
X_pca = X_pca-media
mses_tb = list()
n_targets = 36
for np2 in range(nprojections):
mse = 0
X_components_tb = X_projected_tb[:,:np2+1]
for k in range(n_targets):
targets = X_pca[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_tb, targets)
predictions = lm.predict(X_components_tb)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_tb.append(mse)
# + deletable=true editable=true
plt.figure()
plt.plot(map(lambda x: x+1, range(nprojections)), mses_sk, 'g^', label='Scikit Learn' )
plt.hold(True)
plt.plot(map(lambda x: x+1, range(nprojections)), mses_tb, 'b', label='Toolbox' )
plt.legend(loc = 1)
plt.xlabel('Number of projections')
plt.ylabel('Residual approximation error')
plt.show()
# + [markdown] deletable=true editable=true
# # 2. PCA VS OPLS (SIN REGULARIZAR)
# + [markdown] deletable=true editable=true
# Volvemos a cargar los datos.
#
# De los 7 tipos de clases que puede haber, en los datos la clase 6 nunca aparace por lo que vamos a eliminarla.
#
# + deletable=true editable=true
import urllib2
from pyspark.mllib.regression import LabeledPoint
def filter_6 (x):
if x==7.0:
return 6.0
else:
return x
response = urllib2.urlopen("https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/satimage/sat.trn")
textdata = response.read()
RDD = sc.parallelize(textdata.split('\n')[:-1]).map(lambda line: line.split(" "))
RDD_labeled=RDD.map(lambda x: LabeledPoint(x[-1],x[0:-1]))
RDD_labeled=RDD_labeled.map(lambda x: LabeledPoint(filter_6(x.label), x.features))
# + [markdown] deletable=true editable=true
# Nos quedamos con el RDD para PCA
# + deletable=true editable=true
RDD_PCA = RDD_labeled.map(lambda x: x.features)
# + [markdown] deletable=true editable=true
# ## 2.1 ENTRENAMOS LOS MODELOS
# + [markdown] deletable=true editable=true
# Entrenamos el modelo OPLS
# + deletable=true editable=true
nprojections=5
OPLS_model = MVA('OPLS','none','norm',1e-5,nprojections,1e-8,1e-5,100,30)
OPLS_model.fit(RDD_labeled)
# + [markdown] deletable=true editable=true
# Entrenamos el modelo PCA
# + deletable=true editable=true
nprojections=15
PCA_model = MVA('PCA','none','None',1e-5,nprojections,1e-8,1e-5,100,30)
PCA_model.fit(RDD_PCA)
# + [markdown] deletable=true editable=true
# ## 2.2 Calculamos el mse con |X-W^TX|^2 PARA OPLS Y PCA
# + deletable=true editable=true
from sklearn.linear_model import LinearRegression
RDD_new_feat=RDD_labeled.map(lambda x: x.features)
X_projected_tb_OPLS = np.array(OPLS_model.predict(RDD_new_feat).collect())
X_OPLS = RDD_new_feat.collect()
media = np.mean(X_OPLS, axis=0)
X_OPLS = X_OPLS-media
nprojections=5
mses_tb_opls = list()
n_targets = 36
for np2 in range(nprojections):
mse = 0
X_components_tb_opls = X_projected_tb_OPLS[:,:np2+1]
for k in range(n_targets):
targets = X_OPLS[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_tb_opls, targets)
predictions = lm.predict(X_components_tb_opls)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_tb_opls.append(mse)
# + deletable=true editable=true
from sklearn.linear_model import LinearRegression
X_projected_tb = np.array(PCA_model.predict(RDD_PCA).collect())
X_pca = RDD_PCA.collect()
media = np.mean(X_pca, axis=0)
X_pca = X_pca-media
nprojections=15
mses_tb = list()
n_targets = 36
for np2 in range(nprojections):
mse = 0
X_components_tb = X_projected_tb[:,:np2+1]
for k in range(n_targets):
targets = X_pca[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_tb, targets)
predictions = lm.predict(X_components_tb)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_tb.append(mse)
# + [markdown] deletable=true editable=true
# Como podemos comprobar...
# + deletable=true editable=true
plt.figure()
plt.plot(map(lambda x: x+1, range(5)), mses_tb_opls, 'g*', label='OPLS' )
plt.hold(True)
plt.plot(map(lambda x: x+1, range(15)), mses_tb, 'b', label='PCA' )
plt.legend(loc = 1)
plt.xlabel('Number of projections')
plt.ylabel('Residual approximation error')
plt.show()
# + [markdown] deletable=true editable=true
# ## 2.3 Calculamos el mse con |Y-W^TUX|^2
# + deletable=true editable=true
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import label_binarize
from pyspark.mllib.linalg import Vectors
X_projected_tb = np.array(PCA_model.predict(RDD_PCA).collect())
X_pca = RDD_PCA.collect()
media = np.mean(X_pca, axis=0)
X_pca = X_pca-media
set_classes = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
Y_pca_label=RDD_labeled.map(lambda x: Vectors.dense(label_binarize([x.label], classes=set_classes).flatten()))
Y_pca=np.array(Y_pca_label.collect())
media_y=np.mean(Y_pca,axis=0)
Y_pca=Y_pca-media_y
#print Y_pca.shape()
#print Y_pca
mses_tb_pca = list()
nprojections=15
n_targets = 6
for np2 in range(nprojections):
mse = 0
X_components_tb = X_projected_tb[:,:np2+1]
for k in range(n_targets):
targets =Y_pca[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_tb, targets)
predictions = lm.predict(X_components_tb)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_tb_pca.append(mse)
# + deletable=true editable=true
RDD_new_feat=RDD_labeled.map(lambda x: x.features)
X_projected_tb_OPLS = np.array(OPLS_model.predict(RDD_new_feat).collect())
nprojections=5
mses_tb_opls = list()
n_targets = 6
for np2 in range(nprojections):
mse = 0
X_components_tb_opls = X_projected_tb_OPLS[:,:np2+1]
for k in range(n_targets):
targets = Y_pca[:,k]
#Entrenamos modelo
lm = LinearRegression(fit_intercept=False)
lm.fit(X_components_tb_opls, targets)
predictions = lm.predict(X_components_tb_opls)
MSE_iter = np.mean((targets-predictions)**2)
mse += MSE_iter
print 'MSE para ' + str(np2+1) + ' proyecciones: ' + str(mse)
mses_tb_opls.append(mse)
# + deletable=true editable=true
plt.figure()
plt.plot(map(lambda x: x+1, range(5)), mses_tb_opls, 'g', label='OPLS' )
plt.hold(True)
plt.plot(map(lambda x: x+1, range(15)), mses_tb_pca, 'b', label='PCA' )
plt.legend(loc = 1)
plt.xlabel('Number of projections')
plt.ylabel('Residual approximation error')
plt.show()
# + [markdown] deletable=true editable=true
# # 3. OPLS REGULARIZADO
# + deletable=true editable=true
import urllib2
from pyspark.mllib.regression import LabeledPoint
response = urllib2.urlopen("https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/satimage/sat.trn")
textdata = response.read()
RDD = sc.parallelize(textdata.split('\n')[:-1]).map(lambda line: line.split(" "))
RDD_labeled=RDD.map(lambda x: LabeledPoint(x[-1],x[0:-1]))
# + deletable=true editable=true
def filter_6 (x):
if x==7.0:
return 6.0
else:
return x
rdd_label= RDD_labeled.map(lambda x: x.label)
rdd_label_new=rdd_label.map(lambda x: filter_6(x))
label=rdd_label_new.distinct().collect()
new=RDD_labeled.map(lambda x: x.features).zip(rdd_label_new)
RDD_labeled_new=new.map(lambda x: LabeledPoint(x[1],x[0]))
# + deletable=true editable=true
term_reg=list()
reg1=0.00001
term_reg.append(reg1)
reg2=0.1
term_reg.append(reg2)
reg3=0.99
term_reg.append(reg3)
# + deletable=true editable=true
nprojections=6
U_opls_reg=list()
for i in range(len(term_reg)):
OPLS_model = MVA('OPLS','l1','norm',1e-5,nprojections,term_reg[i],1e-5,100,30)
OPLS_model.fit(RDD_labeled_new)
U_opls_reg.append(OPLS_model._U)
# + deletable=true editable=true
import matplotlib.pyplot as plt
for i in range(5):
plt.figure()
#ax1 = fig1.add_subplot(331)
#ax1.plot(pca.components_[0,], 'b-')
#arreglar
plt.plot(abs(U_opls_reg[0].T[:,i]),'b',label=term_reg[0])
plt.hold(True)
plt.plot(abs(U_opls_reg[1].T[:,i]),'r',label=term_reg[1])
plt.hold(True)
plt.plot(abs(U_opls_reg[2].T[:,i]),'g',label=term_reg[2])
plt.legend(loc = 1)
plt.xlabel('k', fontsize=14)
plt.ylabel('U(k)', fontsize=14)
plt.show()
# + [markdown] deletable=true editable=true
# # DATOS SINTÉTICOS
# + [markdown] deletable=true editable=true
# # 1. PCA VS OPLS
# + [markdown] deletable=true editable=true
# Se van a crear unos datos sintéticos se podrá modificar en la siguiente celda de ajuste:
# + deletable=true editable=true
import numpy as numpy
#Number of samples
np=1050000
#Number of inputs
nx=10
#Number of outputs
ny=5
#Variance of X
VarX=numpy.array([1, 1e-1, 1e-2,1e-3,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9])
# + deletable=true editable=true
X = numpy.random.randn(np,nx) * VarX
W=numpy.random.randn(nx,ny)
Y=X.dot(W)
# + deletable=true editable=true
RDD_X = sc.parallelize(X.tolist())
RDD_Y = sc.parallelize(Y.tolist())
RDD_labeled = RDD_Y.zip(RDD_X)
# + [markdown] deletable=true editable=true
# # 2. ESCALABILIDAD
# + deletable=true editable=true
samples=numpy.array([0.01,0.1,1])
# + deletable=true editable=true
#RDD for OPLS
RDD_1=RDD_labeled.sample(False, samples[0], 80)
RDD_2=RDD_labeled.sample(False, samples[1], 80)
RDD_3=RDD_labeled
ESC_RDD_OPLS=[RDD_1,RDD_2,RDD_3]
#RDD for PCA
RDD_PCA=RDD_labeled.map(lambda x: x[1])
RDD_1=RDD_PCA.sample(False, samples[0], 80)
RDD_2=RDD_PCA.sample(False, samples[1], 80)
RDD_3=RDD_PCA
ESC_RDD_PCA=[RDD_1,RDD_2,RDD_3]
# + deletable=true editable=true
from classMVA import MVA
import time
for i in range(3):
print 'hola'
prueba = MVA('PCA','l1','None',1e-5,10,0.01,1e-3,100,30)
print 'INIT TIME FOR ' + str(i+1) + ' samples ' + str(time.localtime( time.time() ))
RDD_PCA_ESC=ESC_RDD_PCA[i]
#print RDD_PCA_ESC.count()
prueba.fit(RDD_PCA_ESC)
print 'END TIME FOR RDD ' + str(i+1) + ' samples ' + str(time.localtime( time.time() ))
# + deletable=true editable=true
from classMVA import MVA
import time
for i in range(3):
prueba = MVA('OPLS','l1','norm',1e-5,10,0.01,1e-3,100,30)
print 'INIT TIME FOR ' + str(i+1) + ' samples ' + str(time.localtime( time.time() ))
RDD_OPLS_ESC=ESC_RDD_OPLS[i]
prueba.fit(RDD_OPLS_ESC)
print 'END TIME FOR RDD ' + str(i+1) + ' samples ' + str(time.localtime( time.time() ))
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
N = 3
timePca = (124,140,211)
timeOpls=(99,153,140)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, timePca, width, color='r')
rects2 = ax.bar(ind + width, timeOpls, width, color='y')
ax.set_ylabel('Time in seconds')
ax.set_xlabel('Percentage of input data')
ax.set_title('Scalability of the data')
ax.set_xticks(ind+ width / 2)
ax.set_xticklabels(('1%','10%','100%'))
ax.legend((rects1[0], rects2[0]), ('PCA', 'OPLS'),loc=2)
plt.show()
# + deletable=true editable=true
| User_guide-JAG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Product Details
# 1. Product ID
# 2. Name of Product
# 3. Price of Product
# 4. Quantity
# +
# Noting the time of trasection
import time
# Reading the Inventory
fd = open('Inventory.txt','r')
products = fd.read().split('\n')
fd.close()
# Taking User Input
ui_username = input("Enter your Name: ")
ui_phone = input("Enter your Phone No: ")
ui_mail = input("Enter your Mail: ")
ui_prod_id = input("Enter product ID: ")
ui_prod_qn = input("Enter product Quantity: ")
updated_product_lst = []
# Going through each product detail
for product in products:
prod_details = product.split(',')
if(prod_details[0] == ui_prod_id):
# Checking if product exists or not
if (int(ui_prod_qn) <= int(prod_details[3])):
# If we're having enough quantity
print("-----------------------------")
print("Product Name : ", prod_details[1])
print("Price : ", prod_details[2])
print("Quantity : ", ui_prod_qn)
print("-----------------------------")
print("Billing Amount : ", int(ui_prod_qn) * int(prod_details[2]))
print("-----------------------------")
# Updating Inventory list
prod_details[3] = str(int(prod_details[3]) - int(ui_prod_qn))
# Generating Sales in Sales.txt
fd = open("Sales.txt",'a')
sales_detail = ui_username +","+ ui_phone +","+ ui_mail +","+prod_details[1] +","+ ui_prod_id +","+ ui_prod_qn +","+ str(int(ui_prod_qn) * int(prod_details[2]))+","+time.ctime()+ "\n"
fd.write(sales_detail)
fd.close()
else:
# If we're not having enough quantity
print("Sorry, We're not having enought quantity.")
print("We're having only",prod_details[3],'quantity.')
print("Would you like to purchase it?")
ch = input("Press Y/N: ")
if (ch == 'Y' or ch == 'y'):
# If you want to purchase with remaining quantity
print("-----------------------------")
print("Product Name : ", prod_details[1])
print("Price : ", prod_details[2])
print("Quantity : ", prod_details[3])
print("-----------------------------")
print("Billing Amount : ", int(prod_details[3]) * int(prod_details[2]))
print("-----------------------------")
# Generating Sales in Sales.txt
fd = open("Sales.txt",'a')
sales_detail = ui_username +","+ ui_phone +","+ ui_mail +","+prod_details[1] +","+ ui_prod_id +","+ prod_details[3] +","+ str(int(prod_details[3]) * int(prod_details[2]))+","+time.ctime()+ "\n"
fd.write(sales_detail)
fd.close()
# Updating Inventory list
prod_details[3] = '0'
else:
print("Thanks")
# Updating my Inventory List
updated_product_lst.append(prod_details)
lst = []
# Updating my Inventory String
for i in updated_product_lst:
prod = i[0] +","+ i[1] +","+ i[2] +","+ i[3] + '\n'
lst.append(prod)
# Removing Last \n from the list
lst[-1] = lst[-1][:-1]
# Updating Inventory File
fd = open('Inventory.txt','w')
for i in lst:
fd.write(i)
fd.close()
print("Inventory Updated")
# -
| 3. Inventory Management System with Files/6. Conclusion/Inventory Management System - Conclusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 3</font>
#
# ## Download: http://github.com/dsacademybr
# ## Exercícios - Loops e Condiconais - Solução
# Exercício 1 - Crie uma estrutura que pergunte ao usuário qual o dia da semana. Se o dia for igual a Domingo ou
# igual a sábado, imprima na tela "Hoje é dia de descanso", caso contrário imprima na tela "Você precisa trabalhar!"
dia = input('Digite o dia da semana: ')
if dia == 'Domingo' or dia == 'Sábado':
print("Hoje é dia de descanso")
else:
print("Você precisa trabalhar!")
# Exercício 2 - Crie uma lista de 5 frutas e verifique se a fruta 'Morango' faz parte da lista
lista = ['Laranja', 'Maça', 'Abacaxi', 'Uva', 'Morango']
for fruta in lista:
if fruta == 'Morango':
print("Morango faz parte da lista de frutas")
# Exercício 3 - Crie uma tupla de 4 elementos, multiplique cada elemento da tupla por 2 e guarde os resultados em uma
# lista
tup1 = (1, 2, 3, 4)
lst1 = []
for i in tup1:
novo_valor = i * 2
lst1.append(novo_valor)
print(lst1)
# Exercício 4 - Crie uma sequência de números pares entre 100 e 150 e imprima na tela
for i in range(100, 151, 2):
print(i)
# Exercício 5 - Crie uma variável chamada temperatura e atribua o valor 40. Enquanto temperatura for maior que 35,
# imprima as temperaturas na tela
temperatura = 40
while temperatura > 35:
print(temperatura)
temperatura = temperatura - 1
# Exercício 6 - Crie uma variável chamada contador = 0. Enquanto counter for menor que 100, imprima os valores na tela,
# mas quando for encontrado o valor 23, interrompa a execução do programa
contador = 0
while contador < 100:
if contador == 23:
break
print(contador)
contador += 1
# Exercício 7 - Crie uma lista vazia e uma variável com valor 4. Enquanto o valor da variável for menor ou igual a 20,
# adicione à lista, apenas os valores pares e imprima a lista
numeros = list()
i = 4
while (i <= 20):
numeros.append(i)
i = i+2
print(numeros)
# Exercício 8 - Transforme o resultado desta função range em uma lista: range(5, 45, 2)
nums = range(5, 45, 2)
print(list(nums))
# Exercício 9 - Faça a correção dos erros no código abaixo e execute o programa. Dica: são 3 erros.
temperatura = float(input('Qual a temperatura? '))
if temperatura > 30:
print('Vista roupas leves.')
else:
print('Busque seus casacos.')
# +
# Exercício 10 - Faça um programa que conte quantas vezes a letra "r" aparece na frase abaixo. Use um placeholder na
# sua instrução de impressão
# “É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a
# vantagem de existir.” (Machado de Assis)
frase = "É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a vantagem de existir."
count = 0
for caracter in frase:
if caracter == 'r':
count += 1
print("O caracter r aparece %s vezes na frase." %(count))
# -
# # Fim
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| Cap03/Notebooks/DSA-Python-Cap03-Exercicios-Loops-Condiconais-Solucao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pymongo
from scrape_mars import scrape
####################function
def mars_insert(page):
"""insert dictionary into mongodb"""
# establishing connection to mongo instance
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
#client = pymongo.MongoClient('localhost', 27017)
# Define database and collection
db = client.mars_news_db
#db=client['mars_news_db']
collection = db.page_data
#collection=db['page_data']
collection.insert_one(page)
return None # pass
####################function
def mars_search():
"""search and return latest record from mongodb"""
# establishing connection to mongo instance
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
#client = pymongo.MongoClient('localhost', 27017)
# Define database and collection
db = client.mars_news_db
collection = db.page_data
record = collection.find().sort('_id',-1).limit(1)[0]
return record
dict=mars_search()
dict['news_title']
try:
record = collection.find()
print(len(record)
# record.sort('_id',-1).limit(1)[0]
# print(record)[0]
except:
print('call scrape')
record_count=count_documents(collection)
if record_count == 0:
print('call scrape')
else:
record=record.sort('_id',-1).limit(1)[0]
print(record)
count(record)
record = collection.find().sort('_id',-1).limit(1)[0]
record
| mission_to_mars/write_read_mars_db.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# based on sentence transformer from here: https://github.com/UKPLab/sentence-transformers/blob/master/examples/applications/clustering/kmeans.py
import pandas as pd
df = pd.read_csv('Data/CrisisLogger/crisislogger.csv')
new_df=df.drop(columns='upload_id').rename(columns={'transcriptions':'DATA_COLUMN'})
new_df.insert(0, 'LABEL_COLUMN', 0)
corpus = new_df['DATA_COLUMN'].to_numpy()
# +
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
# models explained here: https://www.sbert.net/docs/pretrained_models.html
# embedder = SentenceTransformer('all-MiniLM-L6-v2')
embedder = SentenceTransformer('all-distilroberta-v1')
corpus_embeddings = embedder.encode(corpus)
# +
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
# +
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
# -
print("Total clusters =", len(clustered_sentences))
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1, ", size =", len(cluster))
print(cluster[0])
print("")
| Agglomerative clusters - sentence-xformers sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import syft as sy
from syft import grid as gr
import torch as th
from syft.lib import lib_ast
bob = sy.VirtualMachine(name="Bob")
client = bob.get_client()
out = client.torch.ones(2,3)
y = out + out
del y
bob.store
_alice = sy.VirtualMachine(name="Alice")
alice = _alice.get_client()
p = alice.torch.zeros(3,4)
| examples/experimental/trask/Dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.1
# language: julia
# name: julia-0.6
# ---
using GraphViz
addprocs(5);
include("../daggen.jl")
using DagScheduler
isdir(".mempool") && rm(".mempool"; recursive=true)
runenv = RunEnv();
dag1 = gen_straight_dag(ones(Int, 6^4))
Graph(Dagger.show_plan(dag1))
@time rundag(runenv, dag1)
cleanup(runenv)
@everywhere MemPool.cleanup()
rmprocs(workers())
| test/notebooks/deep_dag.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing
#
# [](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial8_Preprocessing.ipynb)
#
# Haystack includes a suite of tools to extract text from different file types, normalize white space
# and split text into smaller pieces to optimize retrieval.
# These data preprocessing steps can have a big impact on the systems performance and effective handling of data is key to getting the most out of Haystack.
# + [markdown] pycharm={"name": "#%% md\n"}
# Ultimately, Haystack expects data to be provided as a list documents in the following dictionary format:
# ``` python
# docs = [
# {
# 'text': DOCUMENT_TEXT_HERE,
# 'meta': {'name': DOCUMENT_NAME, ...}
# }, ...
# ]
# ```
# + [markdown] pycharm={"name": "#%% md\n"}
# This tutorial will show you all the tools that Haystack provides to help you cast your data into this format.
# + pycharm={"name": "#%%\n"}
# Let's start by installing Haystack
# Install the latest release of Haystack in your own environment
# #! pip install farm-haystack
# Install the latest master of Haystack
# !pip install grpcio-tools==1.34.1
# !pip install git+https://github.com/deepset-ai/haystack.git
# !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.03.tar.gz
# !tar -xvf xpdf-tools-linux-4.03.tar.gz && sudo cp xpdf-tools-linux-4.03/bin64/pdftotext /usr/local/bin
# + pycharm={"name": "#%%\n"}
# Here are the imports we need
from haystack.file_converter.txt import TextConverter
from haystack.file_converter.pdf import PDFToTextConverter
from haystack.file_converter.docx import DocxToTextConverter
from haystack.preprocessor.utils import convert_files_to_dicts, fetch_archive_from_http
from haystack.preprocessor.preprocessor import PreProcessor
# + pycharm={"name": "#%%\n"}
# This fetches some sample files to work with
doc_dir = "data/preprocessing_tutorial"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/preprocessing_tutorial.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Converters
#
# Haystack's converter classes are designed to help you turn files on your computer into the documents
# that can be processed by the Haystack pipeline.
# There are file converters for txt, pdf, docx files as well as a converter that is powered by Apache Tika.
# + pycharm={"name": "#%%\n"}
# Here are some examples of how you would use file converters
converter = TextConverter(remove_numeric_tables=True, valid_languages=["en"])
doc_txt = converter.convert(file_path="data/preprocessing_tutorial/classics.txt", meta=None)
converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=["en"])
doc_pdf = converter.convert(file_path="data/preprocessing_tutorial/bert.pdf", meta=None)
converter = DocxToTextConverter(remove_numeric_tables=True, valid_languages=["en"])
doc_docx = converter.convert(file_path="data/preprocessing_tutorial/heavy_metal.docx", meta=None)
# + pycharm={"name": "#%%\n"}
# Haystack also has a convenience function that will automatically apply the right converter to each file in a directory.
all_docs = convert_files_to_dicts(dir_path="data/preprocessing_tutorial")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## PreProcessor
#
# The PreProcessor class is designed to help you clean text and split text into sensible units.
# File splitting can have a very significant impact on the system's performance and is absolutely mandatory for Dense Passage Retrieval models.
# In general, we recommend you split the text from your files into small documents of around 100 words for dense retrieval methods
# and no more than 10,000 words for sparse methods.
# Have a look at the [Preprocessing](https://haystack.deepset.ai/docs/latest/preprocessingmd)
# and [Optimization](https://haystack.deepset.ai/docs/latest/optimizationmd) pages on our website for more details.
# + pycharm={"name": "#%%\n"}
# This is a default usage of the PreProcessor.
# Here, it performs cleaning of consecutive whitespaces
# and splits a single large document into smaller documents.
# Each document is up to 1000 words long and document breaks cannot fall in the middle of sentences
# Note how the single document passed into the document gets split into 5 smaller documents
preprocessor = PreProcessor(
clean_empty_lines=True,
clean_whitespace=True,
clean_header_footer=False,
split_by="word",
split_length=100,
split_respect_sentence_boundary=True
)
docs_default = preprocessor.process(doc_txt)
print(f"n_docs_input: 1\nn_docs_output: {len(docs_default)}")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Cleaning
#
# - `clean_empty_lines` will normalize 3 or more consecutive empty lines to be just a two empty lines
# - `clean_whitespace` will remove any whitespace at the beginning or end of each line in the text
# - `clean_header_footer` will remove any long header or footer texts that are repeated on each page
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Splitting
# By default, the PreProcessor will respect sentence boundaries, meaning that documents will not start or end
# midway through a sentence.
# This will help reduce the possibility of answer phrases being split between two documents.
# This feature can be turned off by setting `split_respect_sentence_boundary=False`.
# + pycharm={"name": "#%%\n"}
# Not respecting sentence boundary vs respecting sentence boundary
preprocessor_nrsb = PreProcessor(split_respect_sentence_boundary=False)
docs_nrsb = preprocessor_nrsb.process(doc_txt)
print("RESPECTING SENTENCE BOUNDARY")
end_text = docs_default[0]["text"][-50:]
print("End of document: \"..." + end_text + "\"")
print()
print("NOT RESPECTING SENTENCE BOUNDARY")
end_text_nrsb = docs_nrsb[0]["text"][-50:]
print("End of document: \"..." + end_text_nrsb + "\"")
# + [markdown] pycharm={"name": "#%% md\n"}
# A commonly used strategy to split long documents, especially in the field of Question Answering,
# is the sliding window approach. If `split_length=10` and `split_overlap=3`, your documents will look like this:
#
# - doc1 = words[0:10]
# - doc2 = words[7:17]
# - doc3 = words[14:24]
# - ...
#
# You can use this strategy by following the code below.
# + pycharm={"name": "#%%\n"}
# Sliding window approach
preprocessor_sliding_window = PreProcessor(
split_overlap=3,
split_length=10,
split_respect_sentence_boundary=False
)
docs_sliding_window = preprocessor_sliding_window.process(doc_txt)
doc1 = docs_sliding_window[0]["text"][:200]
doc2 = docs_sliding_window[1]["text"][:100]
doc3 = docs_sliding_window[2]["text"][:100]
print("Document 1: \"" + doc1 + "...\"")
print("Document 2: \"" + doc2 + "...\"")
print("Document 3: \"" + doc3 + "...\"")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Bringing it all together
# + pycharm={"name": "#%%\n"}
all_docs = convert_files_to_dicts(dir_path="data/preprocessing_tutorial")
preprocessor = PreProcessor(
clean_empty_lines=True,
clean_whitespace=True,
clean_header_footer=False,
split_by="word",
split_length=100,
split_respect_sentence_boundary=True
)
nested_docs = [preprocessor.process(d) for d in all_docs]
docs = [d for x in nested_docs for d in x]
print(f"n_files_input: {len(all_docs)}\nn_docs_output: {len(docs)}")
# + [markdown] pycharm={"name": "#%% md\n"}
# ## About us
#
# This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany
#
# We bring NLP to the industry via open source!
# Our focus: Industry specific language models & large scale QA systems.
#
# Some of our other work:
# - [German BERT](https://deepset.ai/german-bert)
# - [GermanQuAD and GermanDPR](https://deepset.ai/germanquad)
# - [FARM](https://github.com/deepset-ai/FARM)
#
# Get in touch:
# [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
#
# By the way: [we're hiring!](https://apply.workable.com/deepset/)
#
| tutorials/Tutorial8_Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import gradio as gr
def greet(name):
return "Hello " + name + "!!"
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()
# -
iface.launch()
| gradio/example_gradio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# name: python3
# ---
# +
import pandas as pd
df = pd.read_csv("data/0001.csv")
from utils.utils import preprocess_watch_data
df = preprocess_watch_data(df)
import matplotlib.pyplot as plt
fig, (ax1,ax2,ax3) = plt.subplots(3,sharex=True,sharey=True)
import seaborn as sns
fig.set_size_inches(8.5,11)
sns.lineplot(ax=ax1,data=df,x='gyr_t',y='gyr_x')
sns.lineplot(ax=ax2,data=df,x='gyr_t',y='gyr_y')
sns.lineplot(ax=ax3,data=df,x='gyr_t',y='gyr_z')
fig, (ax1,ax2,ax3) = plt.subplots(3,sharex=True,sharey=True)
fig.set_size_inches(8.5,11)
sns.histplot(ax=ax1,data=df,x="gyr_x")
sns.histplot(ax=ax2,data=df,x="gyr_y")
sns.histplot(ax=ax3,data=df,x="gyr_z")
# +
from pytransform3d.rotations import quaternion_integrate
import numpy as np
o = pd.concat([pd.DataFrame(np.zeros_like(df["gyr_y"])),df["gyr_y"],pd.DataFrame(np.zeros_like(df["gyr_y"]))],axis=1) # angular velocity
o
Q = quaternion_integrate(np.array(o))
Q = Q[:100]
from utils.utils import plot
plot(Q,len(Q),save=True)
# -
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
Data = pd.read_csv('NIGERIA_DEMOGRAPHICS_DATA.csv')
Data.head()
Data.columns.values
# Since the column headers look very long and clumsy, we would have to rename them into something shorter and understandable.
Data.rename(columns = {'Are you a Nigerian?': 'Nigerian Citizen',
'Age Group': 'Age',
'Are you married?': 'Married',
'Do you have kids?': 'Parenting' ,
'Where do you live?': 'State of Residence',
'What part of your state do you live in?': 'Area of Residence',
'Are you Happy with your job?': 'Job Satisfaction',
'Is this your primary source of income?': 'Primary Income(Job)',
'What is your steady/constant income range in a month?': 'Monthly Income(Job)',
'Do you think it is enough for you and your family?': 'Income sufficiency',
'What are your other sources of income? ': 'Secondary income',
'Do you pay your tax?': 'Tax Payment',
'What is your largest form of expense': 'Major Monthly Expense',
'If there was one thing you really wanted to own, what would that be?': 'Aspiring Possession',
'How much do you think you earn from all other sources of income in a month': 'Cumulative Secondary Income',
'On an average, how much do you think you spend in a month?': 'Average Monthly Expenditure',
'Do you think Nigeria can get better?': 'Is Nigeria Hopeful',
'In a short sentence, describe your government': 'Thought about the government',
'Please fill your email if you want to be notified once this research is completed.': 'E-mails'
}, inplace= True
)
Data.head()
Data.info()
Data.shape
# From the dataset, we desire to drop some columns we might not be working with. These are the 'Timestamp' and 'E-mails' columns. This is because neither of them have little or no impact on our project statement.
Data.drop(['Timestamp','E-mails'], axis = 1, inplace = True)
Data.isnull().sum()
# From the above result, it is discovered that some column values are missing. There are various methods of fixing this. Since we do not have the luxury of datasets, we will be mindful of the method we opt for.
#
# <li>For the 'Nigerian Citizen', we will be replacing the missing values with the most frequent in the column.</li>
# <li>The 'Occupation', 'Secondary income','Cumulative Secondary Income' and 'Thought about the government' columns will be filled with 'No', 'None', 0 and 'No commment' respectively.</li>
# This is because they all have just one missing value and we will choose to almost reflect them with a null input.
Data.describe(include='all')
# ### Fill up all the missing values
Data.loc[ :,('Nigerian Citizen')] = Data['Nigerian Citizen'].fillna('Yes')
Data.loc[:,('Occupation')]=Data['Occupation'].fillna('No')
Data.loc[:,('Secondary income')] = Data['Secondary income'].fillna('None')
Data.loc[:,('Cumulative Secondary Income')]= Data['Cumulative Secondary Income'].fillna(0)
Data.loc[:,('Thought about the government')]= Data['Thought about the government'].fillna(method='pad')
Data.info()
# ## Now we will proceed into enquiring the earnings of most Nigerians in a month.
data=Data['Monthly Income(Job)'].value_counts()
data
Data['Age'].value_counts()
Data['Occupation'].value_counts()
# %matplotlib inline
import matplotlib.pyplot as plt
data.plot(kind='bar',)
plt.title('Monthly Income Rate')
plt.ylabel('Count')
plt.xlabel('Income Range')
plt.xticks(rotation = 90)
plt.show()
Data.columns
# ## Replacing the values with the mean of each range for Monthly Income
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('Below 5000',2500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('5000 – 10,000',7500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('11,000 – 20,000',15500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('21,000 – 50,000',35500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('51,000 – 80,000',65500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('81,000 – 100,000',90500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('110,000 – 200,000',15500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('201,000 – 300,000',250500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('301,000 – 500,000',400500)
Data['Monthly Income(Job)'] = Data['Monthly Income(Job)'].replace('500,000 – above',600000)
Data['Monthly Income(Job)'].head(5)
Data.head()
# ## Replacing the values with the mean of each range for Age
Data['Age'] = Data['Age'].replace('18 – 25',22)
Data['Age'] = Data['Age'].replace('26 – 35',31)
Data['Age'] = Data['Age'].replace('36 – 45',41)
Data['Age'] = Data['Age'].replace('46 – 55',51)
Data['Age'] = Data['Age'].replace('56 – 65',61)
Data['Age'].head(5)
# ### Let's create a column for Over all monthly income and how much is left after all expenses have been removed
Data['Grand Monthly Income'] = Data['Monthly Income(Job)'] + Data['Cumulative Secondary Income']
Data['Balance(Monthly)'] = Data['Grand Monthly Income'] - Data['Average Monthly Expenditure']
Data.head()
Data['Balance(Monthly)'].value_counts()
Data['Balance(Monthly)'].max()
outlier = Data['Balance(Monthly)'] == 9999995500.0
Data[outlier]
Data['Balance(Monthly)'].min()
underlier = Data['Balance(Monthly)'] == -39999400000.0
Data[underlier]
# # Data Visualization
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(Data['Job Satisfaction'], palette = 'pink')
plt.title('People Opinions about Job Satisfaction', fontsize = 20)
plt.show()
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(Data['Area of Residence'], palette = 'pink')
plt.title('Area of Residence', fontsize = 20)
plt.show()
Data.columns
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(Data['Age'], palette = 'pink')
plt.title('Age', fontsize = 20)
plt.show()
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(Data['Tax Payment'], palette = 'pink')
plt.title('Tax Payment', fontsize = 20)
plt.show()
plt.rcParams['figure.figsize'] = (10, 5)
sns.countplot(Data['Is Nigeria Hopeful'], palette = 'pink')
plt.title('Is Nigeria Hopeful', fontsize = 20)
plt.show()
plt.rcParams['figure.figsize'] = (30, 10)
Data['State of Residence'].value_counts().plot(kind = 'bar')
plt.title('State of Residence', fontsize = 20)
plt.show()
| AI SATURDAY - DS PROJECT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
import time
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import re
URL = 'https://www.pakwheels.com/'
# +
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(executable_path='/home/daniyal214/Desktop/Data Science/chromedriver',
options = options)
driver.get(URL)
# +
# driver.find_element_by_xpath('//*[@id="om-lightbox-modern-optin-wrap"]/a').click()
# -
driver.find_element_by_xpath('//*[@id="browes_make_id"]').click()
# # Toyota
driver.find_element_by_link_text('Toyota').click()
# +
# featured = driver.find_elements_by_css_selector('.classified-listing.featured-listing')
# cards = driver.find_elements_by_css_selector('.classified-listing ')
# +
# 1578-100
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# -
for j in range(1500):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
len(brand)
toyota = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
toyota.head()
toyota.drop_duplicates(inplace=True)
toyota.to_csv('Toyota2', index=False)
toyota.shape
# +
# toyota['Description'][0]
# +
# import re
# +
# s = toyota['Description'][0]
# +
# for i in toyota['Description']:
# print(re.findall(r'([a-zA-Z ]*)', i))
# -
# # Daihatsu
driver.find_element_by_link_text('Daihatsu').click()
# +
#62
# -
100-61
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(39):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
# # MG
driver.find_element_by_link_text('MG').click()
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(98):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
mg = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
mg.head()
mg.drop_duplicates(inplace=True)
mg.to_csv('MG', index=False)
mg.shape
# # <NAME>
driver.find_element_by_link_text('<NAME>').click()
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(100):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
mercedes = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
mercedes.head()
mercedes.drop_duplicates(inplace=True)
mercedes.to_csv('Hyundai', index=False)
mercedes.shape
# # BMW
driver.find_element_by_link_text('BMW').click()
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(100):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
bmw = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
bmw.head()
bmw.drop_duplicates(inplace=True)
bmw.to_csv('BMW', index=False)
bmw.shape
# # Chevrolet
driver.find_element_by_link_text('Chevrolet').click()
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(100):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
chevrolet = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
chevrolet.head()
chevrolet.drop_duplicates(inplace=True)
chevrolet.to_csv('Chevrolet', index=False)
chevrolet.shape
# # Jeep
driver.find_element_by_link_text('Jeep').click()
# +
# brand = []
# year = []
# fuelType = []
# transmission = []
# cc = []
# km_driven = []
# price = []
# description = []
# url = []
# +
import json
for j in range(98):
time.sleep(10)
cards = driver.find_elements_by_css_selector('.classified-listing ')
for i in cards:
try:
lib = i.find_element_by_tag_name('script').get_attribute('innerHTML')
item = json.loads(lib.replace('\n', ''))
brand.append(item['brand']['name'])
year.append(item['modelDate'])
fuelType.append(item['fuelType'])
transmission.append(item['vehicleTransmission'])
cc.append(item['vehicleEngine']['engineDisplacement'])
km_driven.append(item['mileageFromOdometer'])
price.append(item['offers']['price'])
url.append(item['offers']['url'])
description.append(item['name'])
except NoSuchElementException:
pass
try:
time.sleep(3)
element = driver.find_element_by_link_text('Next ›')
actions = ActionChains(driver)
actions.move_to_element(element).perform()
time.sleep(5)
element.click()
except NoSuchElementException:
break
# -
jeep = pd.DataFrame({'Make':brand, 'Model_Year':year, 'Fuel_Type':fuelType,
'Transmission':transmission, 'Engine_Capacity':cc, 'Kms_Driven':km_driven,
'Price':price, 'Url':url, 'Description':description})
jeep.head()
jeep.drop_duplicates(inplace=True)
jeep.to_csv('Jeep', index=False)
jeep.shape
| Web-Scrape-Notebooks/webscrape(Toyota,Daihatsu,MG,Mercedes_Benz,BMW,Chevrolet,Jeep).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # INS Navigation: Basics
#
# ## References
#
# - [VectorNav Tech](https://www.vectornav.com/support/library/imu-and-ins)
# %matplotlib inline
import numpy as np # matrix manipulations
from matplotlib import pyplot as plt
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 10.0)
import ins_nav
from ins_nav.errors import calc_errors
# ## Errors
#
# Error comes from many places:
#
# $$
# error_{bias} = 0.5 * 9.81 * bias * time^2 \\
# error_{missalignment} = 0.5 * 9.81 * sin(miss) * time^2 \\
# error_{ARW} = 0.5 * 9.81 * sin(arw*\sqrt{time/3600}) * time^2 \\
# $$
bias = 0.125 # [g]
miss = 0.1 # [deg]
arw = 0.1 # [deg/sqrt(hour)]
t = 10 # [sec]
be,me,ae = calc_errors(bias, miss, arw, t)
print("Error after {} sec".format(t))
print("---------------------------------------------------------")
print("Error from bias: {:.1f} m".format(be))
print("Error from axis misalignment: {:.1f} m".format(me))
print("Error from ARW: {:.1f} m".format(ae))
def run():
bias = 0.03 # [g]
miss = 0.1 # [deg]
arw = 0.1 # [deg/sqrt(hour)]
sbias = []
smiss = []
sarw = []
for t in range(100):
be,me,ae = calc_errors(bias, miss, arw, t)
sbias.append(be/1000)
smiss.append(me/1000)
sarw.append(ae/1000)
plt.subplot(3,1,1); plt.plot(sbias, label="Bias"); plt.grid(True); plt.ylabel("km"); plt.legend()
plt.subplot(3,1,2); plt.plot(smiss, label="Missalignment"); plt.grid(True); plt.ylabel("km"); plt.legend()
plt.subplot(3,1,3); plt.plot(sarw, label="ARW"); plt.grid(True); plt.ylabel("km"); plt.legend()
run()
| docs/basic_errors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
import tensorflow_data_validation as tfdv
import pandas as pd
import datetime
from tensorflow_data_validation.utils import slicing_util
from typing import List, Optional, Text, Union, Dict, Iterable, Mapping
from tensorflow_metadata.proto.v0 import schema_pb2
# -
tfdv.__version__
# ## Configure environment settings
base_schema_path = 'gs://mlops-dev-workspace/drift-monitor/templates/schema/schema.pbtxt'
baseline_stats_path = 'gs://mlops-dev-workspace/drift-monitor/templates/baseline_stats/stats.pbtxt'
schema_path = 'gs://mlops-dev-workspace/drift-monitor/schema/schema.pbtxt'
schema = tfdv.load_schema_text(base_schema_path)
tfdv.display_schema(schema)
# ### Fine tune the schema
# +
# Do your stuff
#for feature in schema.feature:
# print(feature.name)
#path = tfdv.FeaturePath("Wilderness_Area")
# +
#tfdv.set_domain(schema, 'Elevation', schema_pb2.FloatDomain(name='Elevation', min=1000, max=3000))
# -
tfdv.display_schema(schema)
# ### Save the updated schema
# +
#tfdv.get_feature(schema, 'Wilderness_Area').skew_comparator.infinity_norm.threshold = 0.001
# -
tfdv.write_schema_text(schema, schema_path)
# ## Run the job
project = 'mlops-dev-env'
log_table = 'data_validation.test1'
model = 'covertype_tf'
version = 'v3'
start_time = '2020-05-25T16:00:00'
end_time = '2020-05-25T22:00:00'
output_path = 'gs://mlops-dev-workspace/drift-monitor/output/test'
baseline_stats_file = 'gs://mlops-dev-workspace/drift-monitor/baseline_stats/stats.pbtxt'
time_window = '60m'
# !python ../run.py \
# --project={project} \
# --request_response_log_table={log_table} \
# --model={model} \
# --version={version}\
# --start_time={start_time} \
# --end_time={end_time} \
# --output_path={output_path} \
# --schema_file={schema_path} \
# --baseline_stats_file={baseline_stats_file} \
# --time_window={time_window}
# ## Analyze results
stats = tfdv.load_statistics(stats_path)
for dataset in stats.datasets:
print(dataset.name)
anomalies_path = output_path + '/' + 'anomalies.pbtxt'
# !gsutil ls {anomalies_path}
stats_path = output_path + '/' + 'stats.pb'
# !gsutil ls {stats_path}
anomalies = tfdv.load_anomalies_text(anomalies_path)
tfdv.display_anomalies(anomalies)
tfdv.visualize_statistics(stats)
all_examples_stats = tfdv.get_slice_stats(stats, "All Examples")
tfdv.visualize_statistics(all_examples_stats)
anomaly_list = list(anomalies.anomaly_info)
anomaly_list
| utilities/runner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Model complexity vs accuracy - empirical anlaysis
#
# This notebook is intended as a moderate stress test for the DSX infrastructure. Notebook generates a known function, adds random noise to it and runs an ML algorithm on a wild goose chase asking it to fit and predict based on this data.
#
# Right now I am running this against 10 Million points. To increase the complexity, you can do two things
# - Increase the number of points (direct hit)
# - Increase the complexity of the function (indirect)
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Create a linear stream of `10`million points between `-50` and `50`.
x = np.arange(-50,50,0.00001)
x.shape
# Create random noise of same dimension
bias = np.random.standard_normal(x.shape)
# ### Define the function
y2 = np.cos(x)**3 * (x**2/max(x)) + bias*5
# ### Train test split
x_train, x_test, y_train, y_test = train_test_split(x,y2, test_size=0.3)
x_train.shape
# Plotting algorithms cannot work with millions of points, so you downsample just for plotting
stepper = int(x_train.shape[0]/1000)
stepper
fig, ax = plt.subplots(1,1, figsize=(13,8))
ax.scatter(x[::stepper],y2[::stepper], marker='d')
ax.set_title('Distribution of training points')
# ### Curve fitting
# Let us define a function that will try to fit against the training data. It starts with lower order and sequentially increases the complexity of the model. The hope is, somewhere here is the sweet spot of low bias and variance. We will find it empirically
def greedy_fitter(x_train, y_train, x_test, y_test, max_order=25):
"""Fitter will try to find the best order of
polynomial curve fit for the given synthetic data"""
import time
train_predictions=[]
train_rmse=[]
test_predictions=[]
test_rmse=[]
for order in range(1,max_order+1):
t1 = time.time()
coeff = np.polyfit(x_train, y_train, deg=order)
n_order = order
count = 0
y_predict = np.zeros(x_train.shape)
while n_order >=0:
y_predict += coeff[count]*x_train**n_order
count+=1
n_order = n_order-1
# append to predictions
train_predictions.append(y_predict)
# find training errors
current_train_rmse =np.sqrt(mean_squared_error(y_train, y_predict))
train_rmse.append(current_train_rmse)
# predict and find test errors
n_order = order
count = 0
y_predict_test = np.zeros(x_test.shape)
while n_order >=0:
y_predict_test += coeff[count]*x_test**n_order
count+=1
n_order = n_order-1
# append test predictions
test_predictions.append(y_predict_test)
# find test errors
current_test_rmse =np.sqrt(mean_squared_error(y_test, y_predict_test))
test_rmse.append(current_test_rmse)
t2 = time.time()
elapsed = round(t2-t1, 3)
print("Elapsed: " + str(elapsed) + \
"s Order: " + str(order) + \
" Train RMSE: " + str(round(current_train_rmse, 4)) + \
" Test RMSE: " + str(round(current_test_rmse, 4)))
return (train_predictions, train_rmse, test_predictions, test_rmse)
# Run the model. Change the `max_order` to higher or lower if you wish
# %%time
complexity=50
train_predictions, train_rmse, test_predictions, test_rmse = greedy_fitter(
x_train, y_train, x_test, y_test, max_order=complexity)
# ## Plot results
# How well did the models fit against training data?
#
# ### Training results
# %%time
fig, axes = plt.subplots(1,1, figsize=(15,15))
axes.scatter(x_train[::stepper], y_train[::stepper],
label='Original data', color='gray', marker='x')
order=1
for p, r in zip(train_predictions, train_rmse):
axes.scatter(x_train[:stepper], p[:stepper],
label='O: ' + str(order) + " RMSE: " + str(round(r,2)),
marker='.')
order+=1
axes.legend(loc=0)
axes.set_title('Performance against training data')
# ### Test results
# %%time
fig, axes = plt.subplots(1,1, figsize=(15,15))
axes.scatter(x_test[::stepper], y_test[::stepper],
label='Test data', color='gray', marker='x')
order=1
for p, r in zip(test_predictions, test_rmse):
axes.scatter(x_test[:stepper], p[:stepper],
label='O: ' + str(order) + " RMSE: " + str(round(r,2)),
marker='.')
order+=1
axes.legend(loc=0)
axes.set_title('Performance against test data')
# ### Bias vs Variance
ax = plt.plot(np.arange(1,complexity+1),test_rmse)
plt.title('Bias vs Complexity'); plt.xlabel('Order of polynomial'); plt.ylabel('Test RMSE')
ax[0].axes.get_yaxis().get_major_formatter().set_useOffset(False)
plt.savefig('Model efficiency.png')
# #### CPU usage during curve fitting
# 
| ml/curve_fitting_model-complexity-vs-accuracy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
table = Table.read('data/HAT-P-11_ATLAS2_919509646_0000.txt', format='ascii', header_start=1, data_start=3)
# Table column definitions [here](http://dasch.rc.fas.harvard.edu/database.php#variables)
from astropy.time import Time
t = Time(table['Date'], format='jd')
# +
from astropy.timeseries import TimeSeries, aggregate_downsample
import astropy.units as u
ts = TimeSeries(table[['magcal_local']], time=t)
plt.figure(figsize=(4, 3))
binned = aggregate_downsample(ts, time_bin_size=1*u.year, aggregate_func=np.nanmedian)
mask = table['magcal_local_error'] < 1
plt.scatter(t.decimalyear[mask], table['magcal_local'][mask], color='silver', marker=',', s=1)
plt.axhline(np.median(binned['magcal_local'][:-50]), ls='--', color='k')
plt.scatter(Time(binned['time_bin_start'], format='jd').decimalyear, binned['magcal_local'], color='r', zorder=10, s=5)
ax = plt.gca()
for s in ['right', 'top']:
ax.spines[s].set_visible(False)
ax.invert_yaxis()
plt.ylim([10.5, 9.5])
ax.set(xlabel='Year', ylabel='DASCH Magnitude')
plt.savefig('plots/dasch.pdf', bbox_inches='tight')
| dasch.ipynb |