code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xcpp14
// ---
// we need to specify the path and explicitly load the CAF library
#pragma cling add_library_path("/usr/local/lib")
#pragma cling add_include_path("/usr/local/include")
#pragma cling load("/usr/local/lib/libcaf_core.so")
// # Testing
// +
// CAF_TEST_NO_MAIN is required for this notebook
#define CAF_TEST_NO_MAIN
// CAF_SUITE specifies the name of the test_suite
// Your can divide your tests in multiple testsuites
// For this notebook we will only one suite
#define CAF_SUITE playground_suite
// -
// Necessary header file inclusion
#include <caf/test/dsl.hpp>
#include <caf/test/unit_test.hpp>
#include <caf/test/unit_test_impl.hpp>
// ## Most basic test case
// +
namespace n1 {
struct fixture {};
CAF_TEST_FIXTURE_SCOPE(math_tests, fixture)
CAF_TEST(divide) {
CAF_CHECK(1 / 1 == 0); // this would fail
CAF_CHECK(2 / 2 == 1); // this would pass
// CAF_REQUIRE(3 + 3 == 5); // this would fail and stop test execution [uncomment to try]
CAF_CHECK(4 - 4 == 0); // You would not reach here because of failed REQUIRE
}
CAF_TEST_FIXTURE_SCOPE_END()
}
// -
// Above is a simple fixture that contains one test case only i.e. **divide**.
//
// We have used few macros such as CAF_CHECK and CAF_REQUIRE to validate our assertions. The main
// difference between CAF_REQUIRE and CAF_CHECK is that even if CAF_CHECK fails the control flow will
// continue, however failure of assertion by CAF_REQUIRE will stop the test exeuction.
// ## Testing Actors
// A simple example of a test case is shown below. This example shows that you can create the actor system in your fixture, spawn actors and send messages to them. In other words, below code is not very different from your regular program however here we are using the macros such as CAF_CHECK and have arranged them as test cases.
// +
namespace n2 {
#define ERROR_HANDLER [&](caf::error &err) { CAF_FAIL(sys.render(err)); }
struct actor_fixture {
caf::actor_system_config cfg;
caf::actor_system sys;
caf::scoped_actor self;
actor_fixture()
: sys(cfg),
self(sys) {}
~actor_fixture() {}
};
caf::behavior adder(caf::event_based_actor *self) {
return {
[=](int x, int y) -> int {
return x+y;
}
};
}
CAF_TEST_FIXTURE_SCOPE(actor_tests, actor_fixture)
CAF_TEST(simple_actor_test) {
auto adder_actor = sys.spawn(adder);
self->request(adder_actor, caf::infinite, 3, 4).receive([=](int r){
CAF_CHECK(r == 7);
}, ERROR_HANDLER);
}
CAF_TEST_FIXTURE_SCOPE_END()
}
// -
// While the above example works, very soon you would start to face following problems -
//
// * Lot of boilerplate
// * Above is a simple example of one actor, if you are unit testing one actor it would work however the reality
// is that you would have your actor invoking another actor. Writing code to validate that behavior is not so easy.
// * You primary goal would be to check the interaction between the actors and not necessarily the scheduling on
// multiple threads and/or the asynchronous nature of it.
// So how do we write the tests in more declarative and synchronous manner ?
// ### Test Coordinator
// CAF provides an implementation of coordinator (called test_coordinator) that you supply to the scheduler. This coordinator is specifically designed for testing as it does not perform/schedule your actors on multiple thread and provide you the means to run it.
// There is also a fixture class called *test_coordinator_fixture* that is provided to hide the details and boilerplate for setting up the scheduler with **test_corrdinator**.
// +
namespace n3 {
using an_atom =
caf::atom_constant<caf::atom("an_atom")>;
caf::behavior ping(caf::event_based_actor* self) {
return {
[=](an_atom) -> std::string {
return "pong";
}
};
}
caf::behavior pong(caf::event_based_actor* self) {
return {
[=](an_atom, bool pang) -> std::string {
return pang ? "pang" : "ping";
}
};
}
struct ping_pong_fixture : test_coordinator_fixture<> {
};
CAF_TEST_FIXTURE_SCOPE(ping_pong_tests, ping_pong_fixture)
CAF_TEST(ping_should_return_pong) {
auto ping_actor = sys.spawn(ping);
self->send(ping_actor, an_atom::value);
// check if we sent it correctly
expect((an_atom), from(self).to(ping_actor).with(an_atom::value));
// check the response we will get back
expect((std::string), from(ping_actor).to(self).with("pong"));
}
CAF_TEST(pong_should_return_ping_or_pang) {
auto pong_actor = sys.spawn(pong);
// check if we pass true that it should return pang
self->send(pong_actor, an_atom::value, true);
// check if we sent it correctly
expect((an_atom, bool), from(self).to(pong_actor).with(an_atom::value, true));
// check the response we will get back
expect((std::string), from(pong_actor).to(self).with("pang"));
// check if we pass false that it should return ping
self->send(pong_actor, an_atom::value, false);
// check if we sent it correctly
expect((an_atom, bool), from(self).to(pong_actor).with(an_atom::value, false));
// check the response we will get back
expect((std::string), from(pong_actor).to(self).with("ping"));
}
CAF_TEST_FIXTURE_SCOPE_END()
}
// -
// Above shows an excellent way to declarative testing of your actors.
//
//
// What happens behind the scenes is that **expect** macro schedules the run using the test_coordinator. Now there will
// be scenarios where before you get to test your actor implementation you may want to set them up. That setup would require sending some messages.
//
// Next example show case the pattern that you could use for such test cases.
// +
namespace n4 {
// Here we have a stateful actor that requires that
// you call its third method only after the first and second methods
// have been called
struct SomeActorInfo{
bool invoked_first_method;
bool invoked_second_method;
};
caf::behavior make_some_actor(caf::stateful_actor<SomeActorInfo> *self) {
return {
[=](int x) -> std::string {
self->state.invoked_first_method = true;
return "invoked method with int";
},
[=](float y) -> std::string {
self->state.invoked_second_method = true;
return "invoked method with float";
},
[=](std::string) -> int {
if (self->state.invoked_first_method == false ||
self->state.invoked_second_method == false) {
return -1;
}
return 0;
}
};
}
struct some_actor_fixture : test_coordinator_fixture<> {
};
CAF_TEST_FIXTURE_SCOPE(some_actor_tests, some_actor_fixture)
CAF_TEST(some_actor_test_3rd_method) {
auto some_actor = sys.spawn(make_some_actor);
// in this test case we are only interested in
// testing the third method
//
// However the first and second method needs to be invoked
// as well so that they can update the proper state.
// we will send the anonymous request to the first and second
// method of some_actor
caf::anon_send(some_actor, 3);
caf::anon_send(some_actor, (float)0.3);
// we now invoke the 'run' method
// this method is implemented in test_coordinator_fixture and since
// we are inheriting from it we have access to it
//
// What run does is that it process all the messages of the actors
// In this example we used anon_send and therefore we would have 2 messages
// scheduled
auto num_of_messages_processed = run();
std::cout << "Messages Processed - " << num_of_messages_processed << std::endl;
// now we can finally test the method that we wanted to test
self->send(some_actor, "hey");
expect((std::string), from(self).to(some_actor).with("hey"));
expect((int), from(some_actor).to(self).with(0));
}
CAF_TEST_FIXTURE_SCOPE_END()
}
// -
// What is important to note and understand is the usage of **anon_send** in the above example.
//
// In the testcase i.e. some_actor_test_3rd_method we are using a "scoped_actor" (self) and the reply back to it is not scheduled
// the same way as is for **scheduled_actor** and hence invokation of **run** would have processed only 2 messages and not 4 messages.
//
// Since we were interested in getting the reply back from the first and second method of some_actor, it was okay to use anon_send.
// This is to run the test suite from the notebook.
// In your actual program you would not need to do
// this as you would simply run the executable. Make sure
// to not define CAF_TEST_NO_MAIN
char* tn = (char *)std::string("tests").c_str();
caf::test::main(1, &tn);
| notebooks/testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 範例 : 計程車費率預測
# https://www.kaggle.com/c/new-york-city-taxi-fare-prediction
# # [教學目標]
# - 使用並觀察特徵組合, 在計程車費率預測競賽的影響
# # [範例重點]
# - 增加精度差與緯度差兩個特徵, 觀察線性迴歸與梯度提升樹的預測結果有什麼影響 (In[4], Out[4], In[5], Out[5])
# - 再增加座標距離特徵, 觀察線性迴歸與梯度提升樹的預測結果有什麼影響 (In[6], Out[6], In[7], Out[7])
# +
# 做完特徵工程前的所有準備
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor
data_path = '../../data/'
df = pd.read_csv(data_path + 'taxi_data1.csv')
train_Y = df['fare_amount']
df = df.drop(['fare_amount'] , axis=1)
df.head()
# -
# 時間特徵分解方式:使用datetime
df['pickup_datetime'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S UTC'))
df['pickup_year'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%Y')).astype('int64')
df['pickup_month'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%m')).astype('int64')
df['pickup_day'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%d')).astype('int64')
df['pickup_hour'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%H')).astype('int64')
df['pickup_minute'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%M')).astype('int64')
df['pickup_second'] = df['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%S')).astype('int64')
df.head()
# 將結果使用線性迴歸 / 梯度提升樹分別看結果
df = df.drop(['pickup_datetime'] , axis=1)
scaler = MinMaxScaler()
train_X = scaler.fit_transform(df.astype(np.float64))
Linear = LinearRegression()
print(f'Linear Reg Score : {cross_val_score(Linear, train_X, train_Y, cv=5).mean()}')
GDBT = GradientBoostingRegressor()
print(f'Gradient Boosting Reg Score : {cross_val_score(GDBT, train_X, train_Y, cv=5).mean()}')
# 增加緯度差, 經度差兩個特徵
df['longitude_diff'] = df['dropoff_longitude'] - df['pickup_longitude']
df['latitude_diff'] = df['dropoff_latitude'] - df['pickup_latitude']
df[['longitude_diff', 'latitude_diff', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude']].head()
# 結果 : 光是用經緯度差, 準確度就有巨幅上升
train_X = scaler.fit_transform(df.astype(np.float64))
print(f'Linear Reg Score : {cross_val_score(Linear, train_X, train_Y, cv=5).mean()}')
print(f'Gradient Boosting Reg Score : {cross_val_score(GDBT, train_X, train_Y, cv=5).mean()}')
# 增加座標距離特徵
df['distance_2D'] = (df['longitude_diff']**2 + df['latitude_diff']**2)**0.5
df[['distance_2D', 'longitude_diff', 'latitude_diff']].head()
# 結果 : 加上座標距離後, 準確度再度上升(包含線性迴歸)
train_X = scaler.fit_transform(df.astype(np.float64))
print(f'Linear Reg Score : {cross_val_score(Linear, train_X, train_Y, cv=5).mean()}')
print(f'Gradient Boosting Reg Score : {cross_val_score(GDBT, train_X, train_Y, cv=5).mean()}')
# # 作業1
# * 參考今日教材,試著使用經緯度一圈的長度比這一概念,組合出一個新特徵,再觀察原特徵加上新特徵是否提升了正確率?
#
# # 作業2
# * 試著只使用新特徵估計目標值(忽略原特徵),效果跟作業1的結果比較起來效果如何?
| 2nd-ML100Days/homework/D-026/Day_026_Feature_Combination.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # lecture 5
#
# [visualgo](https://visualgo.net/en)
# # Searching
# # Linear (Sequential) Search
#
# - pro: sorting doesn't matter
# - con: inefficient
test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
# +
def linear_search(the_list, target):
count = 0
for item in the_list:
count += 1
if item == target:
print(f"Found {target} in {count} steps")
# -
linear_search(test_list, 3)
# # Binary Search
#
# - pro: more efficient
# - con: sort dependent
# +
import time
def binary_search(the_list, target):
found = False
count = 0
while not found:
count += 1
time.sleep(1)
midpoint_index = len(the_list)// 2
print(the_list, midpoint_index)
if(the_list[midpoint_index] == target):
print(f"Found {target} in {count} steps")
#return midpoint_index
found = True
if(the_list[midpoint_index] > target):
the_list = the_list[:midpoint_index]
continue
if(the_list[midpoint_index] < target):
the_list = the_list[midpoint_index:]
# +
binary_search(test_list, 13)
# -
# ### Bubble Sort
#
# - brute force
# 
#
# - best case O(n)
# - worst case O($n^2$)
# +
import time
def bubble_sort(the_list):
high_idx = len(the_list) - 1
print(high_idx)
for i in range(high_idx):
print(f"The outer loop ran {i} times")
changed = False
for j in range(high_idx):
print(the_list, i, j)
time.sleep(.5)
item = the_list[j]
next = the_list[j+1]
if item > next:
the_list[j] = next
the_list[j+1] = item
changed = True
if changed == False:
break
# -
unsorted_list = [9, 7, 3, 1, 6, 2, 0, 8, 4, 5]
# +
bubble_sort(unsorted_list)
# -
def q_sort(array):
"""Sort the array by using quicksort."""
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[-1]
for x in array:
if x < pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif x > pivot:
greater.append(x)
curr_arr = q_sort(less) + equal + q_sort(greater) # use the + operator to join lists
# Note that you want equal ^^^^^ not pivot
print("less_arr", less, "equl_arr",equal,"greater_arr", greater)
return curr_arr
else: # You need to handle the part at the end of the recursion - when you only have one element in your array, just return the array.
return array
q_sort(unsorted_list)
# # Big O
# +
from math import log
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('bmh')
# Set up runtime comparisons
n = np.linspace(1,10,1000)
labels = ['Constant','Logarithmic','Linear','Log Linear','Quadratic','Exponential']
big_o = [np.ones(n.shape),np.log(n),n,n*np.log(n),n**2,2**n]
# Plot setup
plt.figure(figsize=(12,10))
plt.ylim(0,50)
for i in range(len(big_o)):
plt.plot(n,big_o[i],label = labels[i])
plt.legend(loc=0)
plt.ylabel('Relative Runtime')
plt.xlabel('n')
# -
# [snowball](http://localhost:8888/view/snowball.jpeg)
# +
def sum1(n):
'''
Take an input of n and return the sum of the numbers from 0 to n
'''
final_sum = 0
for x in range(n+1):
final_sum += x
return final_sum
#O(n)
# -
def sum2(n):
"""
Take an input of n and return the sum of the numbers from 0 to n
"""
return (n*(n+1))/2
#O(1)
# %timeit sum1(5000)
# %timeit sum2(5000)
import this
def quadratic(n):
for n in range(n):
for m in range(n):
# O(n**2)
| learn/05week/code/.ipynb_checkpoints/lect-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aditya270520/100Daysofpython/blob/main/user_input_to_number.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="fyMnf9q21uMl" outputId="81e76ef5-1a60-464a-817d-60e354d749d7"
int_text = input("Give me an integer number: ")
int_num = int(int_text)
float_text = input("Give me a float number: ")
float_num = float(float_text)
result = int_num * float_num
print("Your result is: ", result)
| user_input_to_number.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab: Titanic Survival Exploration with Decision Trees
# ## Getting Started
# In this lab, you will see how decision trees work by implementing a decision tree in sklearn.
#
# We'll start by loading the dataset and displaying some of its rows.
# +
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Pretty display for notebooks
# %matplotlib inline
# Set a random seed
import random
random.seed(42)
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# -
# Recall that these are the various features present for each passenger on the ship:
# - **Survived**: Outcome of survival (0 = No; 1 = Yes)
# - **Pclass**: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
# - **Name**: Name of passenger
# - **Sex**: Sex of the passenger
# - **Age**: Age of the passenger (Some entries contain `NaN`)
# - **SibSp**: Number of siblings and spouses of the passenger aboard
# - **Parch**: Number of parents and children of the passenger aboard
# - **Ticket**: Ticket number of the passenger
# - **Fare**: Fare paid by the passenger
# - **Cabin** Cabin number of the passenger (Some entries contain `NaN`)
# - **Embarked**: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
#
# Since we're interested in the outcome of survival for each passenger or crew member, we can remove the **Survived** feature from this dataset and store it as its own separate variable `outcomes`. We will use these outcomes as our prediction targets.
# Run the code cell below to remove **Survived** as a feature of the dataset and store it in `outcomes`.
# +
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
features_raw = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(features_raw.head())
# -
# The very same sample of the RMS Titanic data now shows the **Survived** feature removed from the DataFrame. Note that `data` (the passenger data) and `outcomes` (the outcomes of survival) are now *paired*. That means for any passenger `data.loc[i]`, they have the survival outcome `outcomes[i]`.
#
# ## Preprocessing the data
#
# Now, let's do some data preprocessing. First, we'll remove the names of the passengers, and then one-hot encode the features.
#
# **Question:** Why would it be a terrible idea to one-hot encode the data without removing the names?
# (Andw
features_raw.loc[0]
# +
# Removing the names
features_no_names = features_raw.drop(['Name'], axis=1)
# One-hot encoding
features = pd.get_dummies(features_no_names)
# -
# And now we'll fill in any blanks with zeroes.
features = features.fillna(0.0)
display(features.head())
# ## (TODO) Training the model
#
# Now we're ready to train a model in sklearn. First, let's split the data into training and testing sets. Then we'll train the model on the training set.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, outcomes, test_size=0.2, random_state=42)
# +
# Import the classifier from sklearn
from sklearn.tree import DecisionTreeClassifier
# TODO: Define the classifier, and fit it to the data
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# -
# ## Testing the model
# Now, let's see how our model does, let's calculate the accuracy over both the training and the testing set.
# +
# Making predictions
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# Calculate the accuracy
from sklearn.metrics import accuracy_score
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('The training accuracy is', train_accuracy)
print('The test accuracy is', test_accuracy)
# -
# # Exercise: Improving the model
#
# Ok, high training accuracy and a lower testing accuracy. We may be overfitting a bit.
#
# So now it's your turn to shine! Train a new model, and try to specify some parameters in order to improve the testing accuracy, such as:
# - `max_depth`
# - `min_samples_leaf`
# - `min_samples_split`
#
# You can use your intuition, trial and error, or even better, feel free to use Grid Search!
#
# **Challenge:** Try to get to 85% accuracy on the testing set. If you'd like a hint, take a look at the solutions notebook next.
# +
# TODO: Train the model
model = DecisionTreeClassifier(min_samples_leaf=6)
model.fit(X_train, y_train)
# TODO: Make predictions
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# TODO: Calculate the accuracy
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('The training accuracy is', train_accuracy)
print('The test accuracy is', test_accuracy)
# -
| projects/titanic_survival_exploration/titanic_survival_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/LucilleKaleha/Solution-for-the-Womxn-in-Big-Data-South-Africa-Competition/blob/master/Solution_for_the_Womxn_in_Big_Data_South_Africa_competition.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="fEGztEl3U2P5" colab_type="text"
# ## [<NAME> ](https://www.linkedin.com/in/lucillekaleha/): **Solution for the Womxn in Big Data South Africa competition**
#
#
# ---
# *Thanks to Zindi, Women in Big Data, HERE Technologies and Microsoft for this challenge and opportunity to improve livelihoods using Data Science*
#
# ### Challenges faced:
# - There was no correlation between local cross validation and the leaderbaord, so it was challenging to know how good a model is and whether it was overfitting
# - It was very challenging to get new data from the recommended HERE and XYZ apis.
#
# ### Approach used:
# - Focused more on bulding models rather than feature engineering
# - As location was an important feature, i reverse geocoded the coordinates to get locations for each latitude and longitude using the reversegeocoding python library
# - As there was no single model that yielded good results, i opted to train several models so that they can cancel each others errors and generalize well
# - Because using all the data yielded unsatisfactory resultes, i opted to train each model with 70% of the data
# - To ensure that all the data has been used for training, i used different random states to split the data
# - Finally to generalise the ensembled models; I averaged, blended and retrained the models using the test data as training data and predictions as the target
#
# ### Some small caveats:
# - I realised that using different versions of catboost regressor yielded different results, so i maximised on this and used two versions of catboost.
# - At some point in the notebook you will have to restart the kernel.
# - Setting the random states(seed) did help for reproducability, but some models dont have the random state parameter, so there is some bias/randomness that cannot be accounted for. So predictions will differ by a small margin whenever you run the notebook.
#
#
#
# + id="WqAO45Wq7Y_g" colab_type="code" outputId="7776b683-e96f-4636-ac28-ebe7e592c0fa" colab={"base_uri": "https://localhost:8080/", "height": 791}
# Installing the necessary libraries
#
# !pip install vecstack # For stacking models
# !pip install catboost==0.20.2 # This version of catboost yielded better results with certain random states
# !pip install reverse_geocoder # Used to get location of a place, given coordinates
# + id="67UJh-5jkRL6" colab_type="code" colab={}
# Importing the necessary libraries
#
import pandas as pd
import numpy as np
import requests
from io import StringIO
import reverse_geocoder as rg
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR, NuSVR
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor, XGBRFRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, BayesianRidge
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestRegressor, StackingRegressor,HistGradientBoostingRegressor, ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from lightgbm import LGBMRegressor
from vecstack import stacking
from vecstack import StackingTransformer
from catboost import CatBoostRegressor
import warnings
warnings.filterwarnings('ignore')
# + [markdown] id="QOSZr0a113UC" colab_type="text"
# ### Loading and cleaning data
# + id="wRMvzJcBfUKJ" colab_type="code" colab={}
# Created links to shared files via google drive
#
train = 'https://drive.google.com/file/d/13GpeDjiVR1aRHpkAZc7EeH_cKf52q7qE/view?usp=sharing'
test = 'https://drive.google.com/file/d/17JoUvCmpFXXFbgbZ9Ki3Xqh9qcl7UV8c/view?usp=sharing'
submission = 'https://drive.google.com/file/d/1GN1lSsLU43kQaZtThc4dP60mz8ztwDsL/view?usp=sharing'
dictionary = 'https://drive.google.com/file/d/1lAZnQFsBkPo8TNHYbq5mt2SpSMrG57WR/view?usp=sharing'
# Created a function to read a csv file shared via google and return a dataframe
#
def read_csv(url):
url = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
csv_raw = requests.get(url).text
csv = StringIO(csv_raw)
df = pd.read_csv(csv)
return df
# Creating submission, training, testing and variable definition datataframes
#
sub = read_csv(submission)
train = read_csv(train)
test = read_csv(test)
submission = read_csv(submission)
dictionary = read_csv(dictionary)
# Splitting the target variable from the train dataframe
#
target = train.target
# Aligning the training and testing datasets
train, test = train.align(test, join = 'inner', axis = 1)
# Including a separator column to be used to split the dataframes after combining them
#
train['separator'] = 0
test['separator'] = 1
# Combining the test and train dataframes, so that feature engineering can be done on the go
#
comb = pd.concat([train, test])
# Separating the training and testing dataframes from the combined dataframe
#
train = comb[comb.separator == 0]
test = comb[comb.separator == 1]
# Dropping the separator column as it has served its purpose
#
train.drop('separator', axis = 1, inplace = True)
test.drop('separator', axis = 1, inplace = True)
train['target'] = target
# + [markdown] id="BQFw7hoL18qU" colab_type="text"
# ### Catboost Predictions
#
# + id="Q-QpiT7rlX49" colab_type="code" colab={}
# Splitting the data into training and testing dataframes
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1) # Predictors
y = target # Target
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1) # Testing data
# Splitting the training dataset to 70%, and setting the random state to 90
#
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 90)
# Making predictions
#
predictions_cat = CatBoostRegressor(logging_level='Silent').fit(X_train, y_train).predict(tes)
# + [markdown] id="Ph8CUGE92Kmp" colab_type="text"
# ### Sklearn Stacking Regressor Predictions
# + id="J9aXdPYEe-YK" colab_type="code" colab={}
# Using two different stacked ensembles to make predictions using the sklearn stacking regressor
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 90)
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
estimators_1 = [
('xgb', XGBRegressor(objective ='reg:squarederror')),
('lr', LinearRegression()),
('rf', RandomForestRegressor()),
('lgb', LGBMRegressor()),
('svr', SVR()),
('lasso', Lasso()),
('kneiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
predictions_sreg = StackingRegressor(estimators=estimators_1, final_estimator=CatBoostRegressor(logging_level='Silent')).fit(X_train, y_train).predict(tes)
estimators_2 = [
('XBRF', XGBRFRegressor(objective ='reg:squarederror')),
('Bayesian', BayesianRidge()),
('ExtraTrees', ExtraTreesRegressor()),
('HistGradient', HistGradientBoostingRegressor()),
('NuSVR', NuSVR()),
('Ridge', Ridge()),
('KNeiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
predictions_sreg_2 = StackingRegressor(estimators=estimators_2, final_estimator=CatBoostRegressor(logging_level='Silent')).fit(X_train, y_train).predict(tes)
# + [markdown] id="aUYyZQn82PZF" colab_type="text"
# ### Vecstack Predictions
# + id="L74m8Rx1e-VN" colab_type="code" colab={}
# Using two different stacked ensembles to make predictions using the vecstack stacking regressor
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 90)
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
estimators_1 = [
('xgb', XGBRegressor(objective ='reg:squarederror')),
('lr', LinearRegression()),
('rf', RandomForestRegressor()),
('lgb', LGBMRegressor()),
('svr', SVR()),
('lasso', Lasso()),
('kneiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
stack = StackingTransformer(estimators_1, regression=True, verbose=0, metric =mean_squared_error, shuffle=True)
stack = stack.fit(X_train, y_train)
S_train = stack.transform(X_train)
final_estimator = CatBoostRegressor(logging_level='Silent')
final_estimator = final_estimator.fit(S_train, y_train)
S_tes = stack.transform(tes)
predictions_vecstack = final_estimator.predict(S_tes)
estimators_2 = [
('XBRF', XGBRFRegressor(objective ='reg:squarederror')),
('Bayesian', BayesianRidge()),
('ExtraTrees', ExtraTreesRegressor()),
('HistGradient', HistGradientBoostingRegressor()),
('NuSVR', NuSVR()),
('Ridge', Ridge()),
('KNeiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
stack = StackingTransformer(estimators_2, regression=True, verbose=0, metric =mean_squared_error, shuffle=True)
stack = stack.fit(X_train, y_train)
S_train = stack.transform(X_train)
final_estimator = CatBoostRegressor(logging_level='Silent')
final_estimator = final_estimator.fit(S_train, y_train)
S_tes = stack.transform(tes)
predictions_vecstack_2 = final_estimator.predict(S_tes)
# + [markdown] id="_Est6Jhsi6Oa" colab_type="text"
# ### Feature Engineering
# + id="tNKqShSb_agx" colab_type="code" colab={}
# Created links to shared files via google drive
#
train = 'https://drive.google.com/file/d/13GpeDjiVR1aRHpkAZc7EeH_cKf52q7qE/view?usp=sharing'
test = 'https://drive.google.com/file/d/17JoUvCmpFXXFbgbZ9Ki3Xqh9qcl7UV8c/view?usp=sharing'
submission = 'https://drive.google.com/file/d/1GN1lSsLU43kQaZtThc4dP60mz8ztwDsL/view?usp=sharing'
dictionary = 'https://drive.google.com/file/d/1lAZnQFsBkPo8TNHYbq5mt2SpSMrG57WR/view?usp=sharing'
# Created a function to read a csv file shared via google and return a dataframe
#
def read_csv(url):
url = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
csv_raw = requests.get(url).text
csv = StringIO(csv_raw)
df = pd.read_csv(csv)
return df
# Creating submission, training, testing and variable definition datataframes
#
sub = read_csv(submission)
train = read_csv(train)
test = read_csv(test)
submission = read_csv(submission)
dictionary = read_csv(dictionary)
# Splitting the target variable from the train dataframe
#
target = train.target
# Aligning the training and testing datasets
train, test = train.align(test, join = 'inner', axis = 1)
# Including a separator column to be used to split the dataframes after combining them
#
train['separator'] = 0
test['separator'] = 1
# Combining the test and train dataframes, so that feature engineering can be done on the go
#
comb = pd.concat([train, test])
# + id="-WHQcFpI_aWc" colab_type="code" colab={}
# # Reverse geocoding coordinates to locations
# #
# name = []
# for i in range(len(comb)):
# location = rg.search([(x, y) for x, y in zip(comb.lat, comb.lon)][i])
# name.append(location[0].get('name'))
# # Adding the geocoded locations to the combined dataframe
# comb['name'] = name
# # Creating a csv file of the combined dataframe
# comb.to_csv('women_comb.csv')
# + id="26jJrCIJhd0w" colab_type="code" colab={}
# Loading the combined created csv
#
def read_csv(url):
url = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
csv_raw = requests.get(url).text
csv = StringIO(csv_raw)
return csv
comb_link = 'https://drive.google.com/file/d/1lglzdXOnAlQntIYK-RdYJv8DtckV6xtm/view?usp=sharing'
comb = pd.read_csv(read_csv(comb_link), index_col = 0)
comb.drop(['admin1', 'admin2'], axis = 1, inplace = True)
# Creating a column of how many time a location is represented in the dataset
#
freq_cols = ['name']
for col in freq_cols:
fq_encode = comb[col].value_counts().to_dict()
comb[col+'_fq_enc'] = comb[col].map(fq_encode)
# One hot encoding the location column
#
comb = pd.get_dummies(comb, columns = ['name'], drop_first=True)
# + id="AFvrcOJRo024" colab_type="code" colab={}
# Generating more features
#
comb['Household_Size'] = comb['total_individuals']/comb['total_households']
comb['psa_car1_car_2'] = comb.psa_00/(comb.car_00 + comb.car_01)
comb['latlon'] = abs(comb.lat) + abs(comb.lon)
# + id="ZwUa1wWbktmd" colab_type="code" colab={}
# Separating the train and test dataframes from the combined dataframe
#
train = comb[comb.separator == 0]
test = comb[comb.separator == 1]
train.drop('separator', axis = 1, inplace = True)
test.drop('separator', axis = 1, inplace = True)
train['target'] = target
# + id="5eee1e4hktft" colab_type="code" colab={}
# Training the data with the new features and making predictions
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
predictions_feats = CatBoostRegressor(logging_level='Silent', random_state=101).fit(X, y).predict(tes)
# + [markdown] id="Rcm5vTJO1Kpb" colab_type="text"
# ### Averaging, Blending and Retraining
# + id="kPTGn86mktdP" colab_type="code" colab={}
# Averaging the two stacked predictions from sklearn and vecstack in the ratio of 9:1
#
predictions_vecstack = [x*0.9 + y*0.1 for x, y in zip(predictions_vecstack, predictions_vecstack_2)]
predictions_sreg = [x*0.9 + y*0.1 for x, y in zip(predictions_sreg, predictions_sreg_2)]
# Blending the two ensemble models and the catboost single model
#
stack = [x*0.3 + y*0.7 for x, y in zip(predictions_vecstack, predictions_sreg)]
stack_2 = [x*0.9 + y*0.1 for x, y in zip(stack, predictions_cat)]
stack_3 = [x*0.7 + y*0.3 for x, y in zip(stack_2, predictions_feats)]
# Retraining the models using the test data as training data and the predictions as the target
#
X = tes.copy()
y = stack_3
ridge = Ridge()
ridge.fit(X, y)
preds_ridge = ridge.predict(X)
cat = CatBoostRegressor(verbose = False)
cat.fit(X, y)
preds_cat = cat.predict(X)
# Blending the two trained models
#
blended_1 = [x*0.5 +y*0.5 for x, y in zip(preds_ridge, preds_cat)]
# Retrainig the models using the above approach but using different weights
#
stack = [x*0.4 + y*0.6 for x, y in zip(predictions_vecstack, predictions_sreg)]
stack_2 = [x*0.8 + y*0.2 for x, y in zip(stack, predictions_cat)]
stack_3 = [x*0.65 + y*0.35 for x, y in zip(stack_2, predictions_feats)]
X = tes.copy()
y = stack_3
ridge = Ridge()
ridge.fit(X, y)
preds_ridge = ridge.predict(X)
cat = CatBoostRegressor(verbose = False)
cat.fit(X, y)
preds_cat = cat.predict(X)
blended_2 = [x*0.5 +y*0.5 for x, y in zip(preds_ridge, preds_cat)]
blended_3 = [x*0.9 + y*0.1 for x, y in zip(blended_1, blended_2)]
# Further generalising the model by training using the simple Linear regression model
# Complementing it with the catboost model
#
X = tes.copy()
y = blended_3
linear = LinearRegression()
linear.fit(X, y)
preds_linear = linear.predict(X)
cat = CatBoostRegressor(verbose = False)
cat.fit(X, y)
preds_cat = cat.predict(X)
# Blending the two model predictions
# Creating a predictions file to be used in the next step, as you will have to restart the kernel
#
final_blend_1 = [x*0.1 + y*0.1 + z*0.8 for x, y, z in zip(preds_linear, preds_cat, blended_3)]
sub_df = pd.DataFrame({'ward': test.ward, 'target': final_blend_1})
sub_df.to_csv('final_blend_1.csv', index = False)
# + [markdown] id="X5AZ9X5z3VPT" colab_type="text"
# ### More Ensembles for further regularisation
# ### Train using latest version of catboost
# ### **Restart kernel after installing the latest version of catboost**
# ### *Run the notebook from the cell below after upgrading catboost*
# + id="-9CsLMVFktT7" colab_type="code" outputId="10523b86-db01-47f0-ca05-d86978eee1dc" colab={"base_uri": "https://localhost:8080/", "height": 492}
# Restart kernel after upgrading catboost and run notebook from this cell
# !pip install catboost --upgrade
# + id="DJUBIGdTl_HE" colab_type="code" outputId="6e059efe-9c63-4c38-9a10-796738efcea9" colab={"base_uri": "https://localhost:8080/", "height": 132}
Restart kernel, and run from below
# + id="44p4hbTp4_NQ" colab_type="code" colab={}
# Importing the necessary libraries
#
import pandas as pd
import numpy as np
import requests
from io import StringIO
import reverse_geocoder as rg
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR, NuSVR
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor, XGBRFRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, BayesianRidge
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestRegressor, StackingRegressor,HistGradientBoostingRegressor, ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from lightgbm import LGBMRegressor
from vecstack import stacking
from vecstack import StackingTransformer
from catboost import CatBoostRegressor
import warnings
warnings.filterwarnings('ignore')
# + id="C89U6IRdktPp" colab_type="code" colab={}
train = 'https://drive.google.com/file/d/13GpeDjiVR1aRHpkAZc7EeH_cKf52q7qE/view?usp=sharing'
test = 'https://drive.google.com/file/d/17JoUvCmpFXXFbgbZ9Ki3Xqh9qcl7UV8c/view?usp=sharing'
submission = 'https://drive.google.com/file/d/1GN1lSsLU43kQaZtThc4dP60mz8ztwDsL/view?usp=sharing'
dictionary = 'https://drive.google.com/file/d/1lAZnQFsBkPo8TNHYbq5mt2SpSMrG57WR/view?usp=sharing'
# Creating a function to read a csv file shared via google
#
def read_csv(url):
url = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
csv_raw = requests.get(url).text
csv = StringIO(csv_raw)
df = pd.read_csv(csv)
return df
# Creating submission and training datataframes
#
sub = read_csv(submission)
train = read_csv(train)
test = read_csv(test)
submission = read_csv(submission)
dictionary = read_csv(dictionary)
target = train.target
# Aligning the training and testing datasets
train, test = train.align(test, join = 'inner', axis = 1)
train['separator'] = 0
test['separator'] = 1
comb = pd.concat([train, test])
train = comb[comb.separator == 0]
test = comb[comb.separator == 1]
train.drop('separator', axis = 1, inplace = True)
test.drop('separator', axis = 1, inplace = True)
train['target'] = target
final_blend_1 = pd.read_csv('final_blend_1.csv')
# + id="srptKPtI3pfP" colab_type="code" colab={}
# Training models using different random states and the latest catboost
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 29)
predictions_cat_29 = CatBoostRegressor(logging_level='Silent').fit(X_train, y_train).predict(tes)
# + id="9KNf-sMf3pdu" colab_type="code" colab={}
# Same as before with the only difference being the random state
# Using different random states will ensure that all the data hase been used in bulding the model
#
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 65)
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
estimators_1 = [
('xgb', XGBRegressor(objective ='reg:squarederror')),
('lr', LinearRegression()),
('rf', RandomForestRegressor()),
('lgb', LGBMRegressor()),
('svr', SVR()),
('lasso', Lasso()),
('kneiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
predictions_sreg_65 = StackingRegressor(estimators=estimators_1, final_estimator=CatBoostRegressor(logging_level='Silent')).fit(X_train, y_train).predict(tes)
# + id="SkuuEvQ63pb1" colab_type="code" colab={}
X = train.drop(['ward', 'ADM4_PCODE', 'target'], axis = 1)
y = target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 27)
tes = test.drop(['ward', 'ADM4_PCODE'], axis = 1)
estimators_1 = [
('xgb', XGBRegressor(objective ='reg:squarederror')),
('lr', LinearRegression()),
('rf', RandomForestRegressor()),
('lgb', LGBMRegressor()),
('svr', SVR()),
('lasso', Lasso()),
('kneiba', KNeighborsRegressor()),
('cat', CatBoostRegressor(logging_level='Silent'))
]
predictions_sreg_27 = StackingRegressor(estimators=estimators_1, final_estimator=CatBoostRegressor(logging_level='Silent')).fit(X_train, y_train).predict(tes)
# + id="1Mf4Neos3pYs" colab_type="code" colab={}
# Further averaging, blending and retraining to generalise well
#
stack = [x*0.5 + y*0.5 for x, y in zip(predictions_sreg_65, predictions_sreg_27)]
stack_2 = [x*0.5 + y*0.5 for x, y in zip(stack, predictions_cat_29)]
X = tes.copy()
y = stack_2
ridge = Ridge()
ridge.fit(X, y)
preds_ridge = ridge.predict(X)
cat = CatBoostRegressor(verbose = False)
cat.fit(X, y)
preds_cat = cat.predict(X)
final_blend_2 = [x*0.5 +y*0.5 for x, y in zip(preds_ridge, preds_cat)]
# + id="dYWARbid3pVl" colab_type="code" colab={}
# Making the final prediction
#
final_blend_3 = [x*0.5 + y*0.5 for x, y in zip(final_blend_1.target, final_blend_2)]
sub_df = pd.DataFrame({'ward': test.ward, 'target': final_blend_3})
sub_df.to_csv('final_submission.csv', index = False)
| Solution_for_the_Womxn_in_Big_Data_South_Africa_competition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": true}
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
# %matplotlib inline
# -
np.random.seed(42)
# # Portfolio Planner
#
# In this activity, you will use the iexfinance api to grab historical data for a 60/40 portfolio using `SPY` to represent the stock portion and `AGG` to represent the bonds.
# + jupyter={"outputs_hidden": true}
from iexfinance.stocks import get_historical_data
import iexfinance as iex
# -
http://finance.yahoo.com/q?s=AAPL
# # Data Collection
#
# In this step, you will need to use the IEX api to fetch closing prices for the `SPY` and `AGG` tickers. Save the results as a pandas DataFrame
# + jupyter={"outputs_hidden": true}
list_of_tickers = ["SPY", "AGG"]
# YOUR CODE HERE
# -
# # Monte Carlo Simulation
#
# In this step, you will run Monte Carlo Simulations for your portfolio to model portfolio performance at different retirement ages.
#
# Complete the following steps:
# 1. Calculate the daily returns for the SPY and AGG closing prices.
# 2. Calculate volatility for both the SPY and AGG closing prices.
# 3. Find the last day's closing price for both stocks and save those as variables.
# 4. Run a Monte Carlo Simulation of at least 500 iterations and generate at least 30 years of closing prices
#
# ### HINTS:
# There are 252 trading days per year, so the number of records to generate for each Monte Carlo run will be 252 days * 30 years
# +
# Calculate the daily roi for the stocks
# YOUR CODE HERE
# + jupyter={"outputs_hidden": true}
# Calculate volatility
# YOUR CODE HERE
# + jupyter={"outputs_hidden": true}
# Save the last day's closing price
# YOUR CODE HERE
# + jupyter={"outputs_hidden": true}
# Setup the Monte Carlo Parameters
number_simulations = 500
number_records = 252 * 30
monte_carlo = pd.DataFrame()
# -
# Run the Monte Carlo Simulation
for x in range(number_simulations):
# YOUR CODE HERE
# +
# Visualize the Simulation
# YOUR CODE HERE
# +
# Select the last row for the cumulative returns (cumulative returns at 30 years)
# YOUR CODE HERE
# +
# Select the last row for the cumulative returns (cumulative returns at 20 years)
# YOUR CODE HERE
# +
# Display the 90% confidence interval for the ending returns
# YOUR CODE HERE
# +
# Visualize the distribution of the ending returns
# YOUR CODE HERE
# -
# ---
# # Retirement Analysis
#
# In this section, you will use the monte carlo model to answer the following retirement planning questions:
#
# 1. What are the expected cumulative returns at 30 years for the 10th, 50th, and 90th percentiles?
# 2. Given an initial investment of `$20,000`, what is the expected portfolio return in dollars at the 10th, 50th, and 90th percentiles?
# 3. Given the current projected annual income from the Plaid analysis, will a 4% withdraw rate from the retirement portfolio meet or exceed that value at the 10th percentile?
# 4. How would a 50% increase in the initial investment amount affect the 4% retirement withdrawal?
# ### What are the expected cumulative returns at 30 years for the 10th, 50th, and 90th percentiles?
# +
# YOUR CODE HERE
# -
# ### Given an initial investment of `$20,000`, what is the expected portfolio return in dollars at the 10th, 50th, and 90th percentiles?
# +
# YOUR CODE HERE
# -
# ### Given the current projected annual income from the Plaid analysis, will a 4% withdraw rate from the retirement portfolio meet or exceed that value at the 10th percentile?
#
# Note: This is effectively saying that 90% of the expected returns will be greater than the return at the 10th percentile, so this can help measure the uncertainty about having enough funds at retirement
# +
# YOUR CODE HERE
# -
# ### How would a 50% increase in the initial investment amount affect the 4% retirement withdrawal?
# +
# YOUR CODE HERE
# -
# ### Optional Challenge
#
# In this section, you will calculate and plot the cumulative returns for the median and 90% confidence intervals. This plot shows the expected cumulative returns for any given day between the first day and the last day of investment.
# +
# YOUR CODE HERE
| Instructions/Starter_Code/portfolio_planner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Keras tutorial - the Happy House
#
# Welcome to the first assignment of week 2. In this assignment, you will:
# 1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
# 2. See how you can in a couple of hours build a deep learning algorithm.
#
# Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
#
# In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
# +
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
# %matplotlib inline
# -
# **Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
# ## 1 - The Happy House
#
# For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
#
# <img src="images/happy-house.jpg" style="width:350px;height:270px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
#
#
# As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
#
# You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
#
# <img src="images/house-members.png" style="width:550px;height:250px;">
#
# Run the following code to normalize the dataset and learn about its shapes.
# +
#load_dataset()
# +
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# -
# **Details of the "Happy" dataset**:
# - Images are of shape (64,64,3)
# - Training: 600 pictures
# - Test: 150 pictures
#
# It is now time to solve the "Happy" Challenge.
# ## 2 - Building a model in Keras
#
# Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
#
# Here is an example of a model in Keras:
#
# ```python
# def model(input_shape):
# # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# X_input = Input(input_shape)
#
# # Zero-Padding: pads the border of X_input with zeroes
# X = ZeroPadding2D((3, 3))(X_input)
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), name='max_pool')(X)
#
# # FLATTEN X (means convert it to a vector) + FULLYCONNECTED
# X = Flatten()(X)
# X = Dense(1, activation='sigmoid', name='fc')(X)
#
# # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
# model = Model(inputs = X_input, outputs = X, name='HappyModel')
#
# return model
# ```
#
# Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
#
# **Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
#
# **Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
# +
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(input_shape)
#Zeropadding
X = ZeroPadding2D((3,3))(X_input)
#Convolution
X = Conv2D(32,(7,7),strides=(1,1),name='Conv0')(X)
X = BatchNormalization(axis = 3, name = 'BatchNorm0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
#X = Dense(1, activation='relu', name='fc1')(X)
X = Dense(1, activation='sigmoid', name='fc1')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
# -
# You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
# 1. Create the model by calling the function above
# 2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
# 3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
# 4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
#
# If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
#
# **Exercise**: Implement step 1, i.e. create the model.
### START CODE HERE ### (1 line)
happyModel = HappyModel(X_train.shape[1:])
### END CODE HERE ###
# **Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
### START CODE HERE ### (1 line)
happyModel.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
### END CODE HERE ###
# **Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
### START CODE HERE ### (1 line)
happyModel.fit(X_train, Y_train, epochs=40, batch_size=50)
### END CODE HERE ###
# Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
#
# **Exercise**: Implement step 4, i.e. test/evaluate the model.
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(X_test, Y_test, batch_size=32, verbose=1, sample_weight=None)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
# If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
#
# To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
#
# If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
#
# - Try using blocks of CONV->BATCHNORM->RELU such as:
# ```python
# X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
# ```
# until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
# - You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
# - Change your optimizer. We find Adam works well.
# - If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
# - Run on more epochs, until you see the train accuracy plateauing.
#
# Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
#
# **Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
#
# ## 3 - Conclusion
#
# Congratulations, you have solved the Happy House challenge!
#
# Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
# <font color='blue'>
# **What we would like you to remember from this assignment:**
# - Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
# - Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
# ## 4 - Test with your own image (Optional)
#
# Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the following code
# 4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
#
# The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
# +
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
# -
# ## 5 - Other useful functions in Keras (Optional)
#
# Two other basic features of Keras that you'll find useful are:
# - `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
# - `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
#
# Run the following code.
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
| Keras Tutorial Happy House.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
import tempfile
from fastai2.basics import *
from fastai2.learner import Callback
from nbdev.showdoc import *
# +
#all_slow
# +
#default_exp callback.captum
# -
# # Captum
# Captum is the Model Interpretation Library from PyTorch as available [here](https://captum.ai)
#
# To use this we need to install the package using
#
# `conda install captum -c pytorch`
#
# or
#
# `pip install captum`
#
# This is a Call back to use Captum.
# +
# export
# Dirty hack as json_clean doesn't support CategoryMap type
from ipykernel import jsonutil
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# +
#export
from captum.attr import IntegratedGradients,NoiseTunnel,GradientShap,Occlusion
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.features import ImageFeature
# -
# In all this notebook, we will use the following data:
from fastai2.vision.all import *
path = untar_data(URLs.PETS)/'images'
fnames = get_image_files(path)
def is_cat(x): return x[0].isupper()
dls = ImageDataLoaders.from_name_func(
path, fnames, valid_pct=0.2, seed=42,
label_func=is_cat, item_tfms=Resize(128))
from random import randint
# # Gradient Based Attribution
# ## Integrated Gradients Callback
# The Distill Article [here](https://distill.pub/2020/attribution-baselines/) provides a good overview of what baseline image to choose. We can try them one by one.
#export
class IntegratedGradientsCallback(Callback):
"Integrated Gradient Captum Callback for Resnet Interpretation"
def __init__(self):
pass
def visualize(self,inp, baseline_type='zeros',n_steps=1000 ,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],outlier_perc=1):
tls = L([TfmdLists(inp, t) for t in L(ifnone(self.dl.tfms,[None]))])
inp_data=list(zip(*(tls[0],tls[1])))[0]
return self._visualize(inp_data,n_steps,cmap_name,colors,N,methods,signs,outlier_perc,baseline_type)
def get_baseline_img(self, img_tensor,baseline_type):
if baseline_type=='zeros': return img_tensor*0
if baseline_type=='uniform': return torch.rand(img_tensor.shape)
def _visualize(self,inp_data,n_steps=200,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],outlier_perc=1,baseline_type='zeros'):
self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model)
dl = self.dls
dec_data=dl.after_item(inp_data)
dec_pred=inp_data[1]
dec_img=dec_data[0]
enc_inp,enc_preds=dl.after_batch(to_device(dl.before_batch(dec_data),dl.device))
baseline=self.get_baseline_img(enc_inp,baseline_type).to(dl.device)
colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
attributions_ig = self._integrated_gradients.attribute(enc_inp,baseline, target=enc_preds, n_steps=200)
default_cmap = LinearSegmentedColormap.from_list(cmap_name,colors, N=N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(dec_img.numpy(), (1,2,0)),
methods=methods,
cmap=default_cmap,
show_colorbar=True,
signs=signs,
outlier_perc=outlier_perc, titles=[f'Original Image - ({dec_pred})', 'IG'])
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=IntegratedGradientsCallback())
learn.fine_tune(1)
idx=randint(0,len(fnames))
learn.integrated_gradients.visualize(fnames[idx],baseline_type='uniform')
# ## Noise Tunnel
#export
class NoiseTunnelCallback(Callback):
"Captum Callback for Resnet Interpretation"
def __init__(self):
pass
def after_fit(self):
self.integrated_gradients = IntegratedGradients(self.model)
self._noise_tunnel= NoiseTunnel(self.integrated_gradients)
def visualize(self,inp_data,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],nt_type='smoothgrad'):
dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)
self.enc_inp,self.enc_preds= dl.one_batch()
dec_data=dl.decode((self.enc_inp,self.enc_preds))
self.dec_img,self.dec_pred=dec_data[0][0],dec_data[1][0]
self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
attributions_ig_nt = self._noise_tunnel.attribute(self.enc_inp.to(self.dl.device), n_samples=1, nt_type=nt_type, target=self.enc_preds)
default_cmap = LinearSegmentedColormap.from_list(cmap_name,
self.colors, N=N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions_ig_nt.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(self.dec_img.numpy(), (1,2,0)),
methods,signs,
cmap=default_cmap,
show_colorbar=True,titles=[f'Original Image - ({self.dec_pred})', 'Noise Tunnel'])
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=NoiseTunnelCallback())
learn.fine_tune(1)
idx=randint(0,len(fnames))
learn.noise_tunnel.visualize(fnames[idx], nt_type='smoothgrad')
# # Occlusion
#export
class OcclusionCallback(Callback):
"Captum Callback for Resnet Interpretation"
def __init__(self):
pass
def after_fit(self):
self._occlusion = Occlusion(self.model)
def _formatted_data_iter(self,dl):
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
dl_iter=iter(dl)
while True:
images,labels=next(dl_iter)
images=normalize_func.decode(images).to(dl.device)
return images,labels
def visualize(self,inp_data,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],strides = (3, 4, 4), sliding_window_shapes=(3,15, 15), outlier_perc=2):
dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)
self.dec_img,self.dec_pred=self._formatted_data_iter(dl)
attributions_occ = self._occlusion.attribute(self.dec_img,
strides = strides,
target=self.dec_pred,
sliding_window_shapes=sliding_window_shapes,
baselines=0)
self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
default_cmap = LinearSegmentedColormap.from_list(cmap_name,
self.colors, N=N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(self.dec_img.squeeze().cpu().numpy(), (1,2,0)),methods,signs,
cmap=default_cmap,
show_colorbar=True,
outlier_perc=outlier_perc,titles=[f'Original Image - ({self.dec_pred.cpu().item()})', 'Occlusion']
)
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=OcclusionCallback())
learn.fine_tune(1)
idx=randint(0,len(fnames))
learn.occlusion.visualize(fnames[idx])
# ## Captum Insights Callback
#export
class CaptumInsightsCallback(Callback):
"Captum Insights Callback for Image Interpretation"
def __init__(self): pass
def _formatted_data_iter(self,dl,normalize_func):
dl_iter=iter(dl)
while True:
images,labels=next(dl_iter)
images=normalize_func.decode(images).to(dl.device)
yield Batch(inputs=images, labels=labels)
def visualize(self,inp_data,debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
visualizer = AttributionVisualizer(
models=[self.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[
ImageFeature(
"Image",
baseline_transforms=[_baseline_func],
input_transforms=[normalize_func],
)
],
dataset=self._formatted_data_iter(dl,normalize_func)
)
visualizer.render(debug=debug)
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=CaptumInsightsCallback())
learn.fine_tune(1)
learn.captum_insights.visualize(fnames)
| nbs/73_callback.captum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import libraries
import pandas as pd
import numpy as np
import math
import yaml
import geopandas
import json
from bokeh.io import output_notebook, show, output_file
from bokeh.plotting import figure
from bokeh.models import GeoJSONDataSource, LinearColorMapper, ColorBar, NumeralTickFormatter
from bokeh.palettes import brewer
from bokeh.io.doc import curdoc
from bokeh.models import Slider, HoverTool, Select, ColumnDataSource, CustomJS
from bokeh.layouts import widgetbox, row, column
from bokeh.io import output_notebook, show
output_notebook()
# output_file("js_on_change.html")
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
# -
neighborhood_data = pd.read_csv('https://raw.githubusercontent.com/JimKing100/SF_Real_Estate_Live/master/data/neighborhood_data.csv')
neighborhood_data
# Read the geojson map file for Realtor Neighborhoods into a GeoDataframe object
sf = geopandas.read_file('https://raw.githubusercontent.com/JimKing100/SF_Real_Estate_Live/master/data/Realtor%20Neighborhoods.geojson')
sf.dtypes
# Set the Coordinate Referance System (crs) for projections
# ESPG code 4326 is also referred to as WGS84 lat-long projection
# https://github.com/geopandas/geopandas/issues/245
sf = sf.set_crs(epsg=4326)
sf
# Rename columns in geojson map file
sf = sf.rename(columns={'geometry': 'geometry','nbrhood':'neighborhood_name', 'nid': 'subdist_no'}).set_geometry('geometry')
# +
# Change neighborhood id (subdist_no) for correct code for Mount Davidson Manor and for parks
sf.loc[sf['neighborhood_name'] == '<NAME>', 'subdist_no'] = '4n'
sf.loc[sf['neighborhood_name'] == 'Golden Gate Park', 'subdist_no'] = '12a'
sf.loc[sf['neighborhood_name'] == 'Presidio', 'subdist_no'] = '12b'
sf.loc[sf['neighborhood_name'] == 'Lincoln Park', 'subdist_no'] = '12c'
sf.sort_values(by=['subdist_no'])
# -
# Create a function that merges neighborhood data with mapping data and converts it into JSON format for Bokeh Server
# Create a function the returns json_data for the year selected by the user
def json_data(selectedYear):
yr = selectedYear
# Pull selected year from neighborhood summary data
df_yr = neighborhood_data[neighborhood_data['year'] == yr]
# Merge the GeoDataframe object (sf) with the neighborhood summary data (neighborhood)
merged = pd.merge(sf, df_yr, on='subdist_no', how='left')
# Fill the null values
values = {'year': yr, 'sale_price_count': 0, 'sale_price_mean': 0, 'sale_price_median': 0,
'sf_mean': 0, 'price_sf_mean': 0, 'min_income': 0}
merged = merged.fillna(value=values)
# Bokeh uses geojson formatting, representing geographical features, with json
# Convert to json
merged_json = json.loads(merged.to_json())
# Convert to json preferred string-like object
json_data = json.dumps(merged_json)
return json_data
# +
# This dictionary contains the formatting for the data in the plots
format_data = [('sale_price_count', 0, 100,'0,0', 'Number of Sales'),
('sale_price_mean', 500000, 4000000,'$0,0', 'Average Sales Price'),
('sale_price_median', 500000, 4000000, '$0,0', 'Median Sales Price'),
('sf_mean', 500, 5000,'0,0', 'Average Square Footage'),
('price_sf_mean', 0, 2000,'$0,0', 'Average Price Per Square Foot'),
('min_income', 50000, 600000,'$0,0', 'Minimum Income Required')
]
#Create a DataFrame object from the dictionary
format_df = pd.DataFrame(format_data, columns = ['field' , 'min_range', 'max_range' , 'format', 'verbage'])
# -
# Define the callback function: update_plot
def update_plot(attr, old, new):
# The input yr is the year selected from the slider
yr = slider.value
new_data = json_data(yr)
# The input cr is the criteria selected from the select box
cr = select.value
input_field = format_df.loc[format_df['verbage'] == cr, 'field'].iloc[0]
# Update the plot based on the changed inputs
p = make_plot(input_field)
# Update the layout, clear the old document and display the new document
layout = column(p, select, slider)
curdoc().clear()
curdoc().add_root(layout)
# Update the data
geosource.geojson = new_data
# Create a plotting function
def make_plot(field_name):
# Set the format of the colorbar
min_range = format_df.loc[format_df['field'] == field_name, 'min_range'].iloc[0]
max_range = format_df.loc[format_df['field'] == field_name, 'max_range'].iloc[0]
field_format = format_df.loc[format_df['field'] == field_name, 'format'].iloc[0]
# Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors.
color_mapper = LinearColorMapper(palette = palette, low = min_range, high = max_range)
# Create color bar.
format_tick = NumeralTickFormatter(format=field_format)
color_bar = ColorBar(color_mapper=color_mapper, label_standoff=18, formatter=format_tick,
border_line_color=None, location = (0, 0))
# Create figure object.
verbage = format_df.loc[format_df['field'] == field_name, 'verbage'].iloc[0]
p = figure(title = verbage + ' by Neighborhood for Single Family Homes in SF by Year - 2009 to 2018',
plot_height = 650, plot_width = 850,
toolbar_location = None)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.axis.visible = False
# Add patch renderer to figure.
p.patches('xs','ys', source = geosource, fill_color = {'field' : field_name, 'transform' : color_mapper},
line_color = 'black', line_width = 0.25, fill_alpha = 1)
# Specify color bar layout.
p.add_layout(color_bar, 'right')
# Add the hover tool to the graph
p.add_tools(hover)
return p
# +
# Input geojson source that contains features for plotting for:
# initial year 2018 and initial criteria sale_price_median
geosource = GeoJSONDataSource(geojson = json_data(2018))
input_field = 'sale_price_median'
# Define a sequential multi-hue color palette.
palette = brewer['Blues'][8]
# Reverse color order so that dark blue is highest obesity.
palette = palette[::-1]
# Add hover tool
hover = HoverTool(tooltips = [ ('Neighborhood','@neighborhood_name'),
('# Sales', '@sale_price_count'),
('Average Price', '$@sale_price_mean{,}'),
('Median Price', '$@sale_price_median{,}'),
('Average SF', '@sf_mean{,}'),
('Price/SF ', '$@price_sf_mean{,}'),
('Income Needed', '$@min_income{,}')])
# Call the plotting function
p = make_plot(input_field)
# Make a slider object: slider
slider = Slider(title = 'Year',start = 2009, end = 2018, step = 1, value = 2018)
# slider.on_change('value', update_plot)
# Make a selection object: select
select = Select(title='Select Criteria:', value='Median Sales Price', options=['Median Sales Price', 'Minimum Income Required',
'Average Sales Price', 'Average Price Per Square Foot',
'Average Square Footage', 'Number of Sales'])
#select.on_change('value', update_plot)
# Make a column layout of widgetbox(slider) and plot, and add it to the current document
# Display the current document
# layout = column(p, widgetbox(select), widgetbox(slider))
layout = column(p, select, slider)
#curdoc().add_root(layout)
#show(layout)
# +
# https://stackoverflow.com/questions/53217654/how-to-get-interactive-bokeh-in-jupyter-notebook
# https://stackoverflow.com/questions/53217654/how-to-get-interactive-bokeh-in-jupyter-notebook
def modify_doc(doc):
doc.add_root(column(layout))
slider.on_change('value', update_plot)
select.on_change('value', update_plot)
handler = FunctionHandler(modify_doc)
app = Application(handler)
show(app)
# -
| History/Geopandas and Bokeh Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
plt.rc("font", family="AppleGothic")
plt.rc("axes", unicode_minus=False)
set_matplotlib_formats("retina")
df = pd.read_csv("../../MetObjects.txt")
df.columns
# -
df.info()
df = df[df['date'].notnull()]
df.isnull().sum().plot.barh(figsize=(8, 9))
df.info()
| _ipynbs/painters by numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Polar and Cilindrical Frame of Reference
# <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# Consider that we have the position vector $\bf\vec{r}$ of a particle, moving in a circular path indicated in the figure below by a dashed line. This vector $\bf\vec{r(t)}$ is described in a fixed reference frame as:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\hat{r}}(t) = {x}{\bf\hat{i}}+{y}{\bf\hat{j}} + {z}{\bf\hat{k}}
# \end{equation}
# </span>
#
# <img src="../images/polarCoord.png" width=500/>
# + [markdown] slideshow={"slide_type": "slide"}
# Naturally, we could describe all the kinematic variables in the fixed reference frame. But in circular motions, is convenient to define a basis with a vector in the direction of the position vector $\bf\vec{r}$. So, the vector $\bf\hat{e_R}$ is defined as:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\hat{e_R}} = \frac{\bf\vec{r}}{\Vert{\bf\vec{r} }\Vert}
# \end{equation}
# </span>
#
# The second vector of the basis can be obtained by the cross multiplication between $\bf\hat{k}$ and $\bf\hat{e_R}$:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\hat{e_\theta}} = {\bf\hat{k}} \times {\bf\hat{e_R}}
# \end{equation}
# </span>
#
#
# The third vector of the basis is the conventional ${\bf\hat{k}}$ vector.
#
# <img src="../images/polarCoorderetheta.png" width=500/>
# + [markdown] slideshow={"slide_type": "slide"}
# This basis can be used also for non-circular movements. For a 3D movement, the versor ${\bf\hat{e_R}}$ is obtained by removing the projection of the vector ${\bf\vec{r}}$ onto the versor ${\bf\hat{k}}$:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\hat{e_R}} = \frac{\bf\vec{r} - ({\bf\vec{r}.{\bf\hat{k}}){\bf\hat{k}}}}{\Vert\bf\vec{r} - ({\bf\vec{r}.{\bf\hat{k}}){\bf\hat{k}}\Vert}}
# \end{equation}
# </span>
#
# <img src="../images/polarCilindrical.png" width=500/>
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Time-derivative of the versors ${\bf\hat{e_R}}$ and ${\bf\hat{e_\theta}}$
#
# To obtain the expressions of the velocity and acceleration vectors, it is necessary to obtain the expressions of the time-derivative of the vectors ${\bf\hat{e_R}}$ and ${\bf\hat{e_\theta}}$.
#
# This can be done by noting that:
#
# <span class="notranslate">
# \begin{align}
# {\bf\hat{e_R}} &= \cos(\theta){\bf\hat{i}} + \sin(\theta){\bf\hat{j}}\\
# {\bf\hat{e_\theta}} &= -\sin(\theta){\bf\hat{i}} + \cos(\theta){\bf\hat{j}}
# \end{align}
# </span>
#
# Deriving ${\bf\hat{e_R}}$ we obtain:
#
# <span class="notranslate">
# \begin{equation}
# \frac{d{\bf\hat{e_R}}}{dt} = -\sin(\theta)\dot\theta{\bf\hat{i}} + \cos(\theta)\dot\theta{\bf\hat{j}} = \dot{\theta}{\bf\hat{e_\theta}}
# \end{equation}
# </span>
#
# Similarly, we obtain the time-derivative of ${\bf\hat{e_\theta}}$:
#
# <span class="notranslate">
# \begin{equation}
# \frac{d{\bf\hat{e_\theta}}}{dt} = -\cos(\theta)\dot\theta{\bf\hat{i}} - \sin(\theta)\dot\theta{\bf\hat{j}} = -\dot{\theta}{\bf\hat{e_R}}
# \end{equation}
# </span>
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Position, velocity and acceleration
# + [markdown] slideshow={"slide_type": "slide"}
# ### Position
#
# The position vector $\bf\vec{r}$, from the definition of $\bf\hat{e_R}$, is:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\vec{r}} = R{\bf\hat{e_R}} + z{\bf\hat{k}}
# \end{equation}
# </span>
#
# where $R = \Vert\bf\vec{r} - ({\bf\vec{r}.{\bf\hat{k}}){\bf\hat{k}}\Vert}$.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Velocity
#
# The velocity vector $\bf\vec{v}$ is obtained by deriving the vector $\bf\vec{r}$:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\vec{v}} = \frac{d(R{\bf\hat{e_R}})}{dt} + \dot{z}{\bf\hat{k}} = \dot{R}{\bf\hat{e_R}}+R\frac{d\bf\hat{e_R}}{dt}=\dot{R}{\bf\hat{e_R}}+R\dot{\theta}{\bf\hat{e_\theta}}+ \dot{z}{\bf\hat{k}}
# \end{equation}
# </span>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Acceleration
#
# The acceleration vector $\bf\vec{a}$ is obtained by deriving the velocity vector:
#
# <span class="notranslate">
# \begin{align}
# {\bf\vec{a}} =& \frac{d(\dot{R}{\bf\hat{e_R}}+R\dot{\theta}{\bf\hat{e_\theta}}+\dot{z}{\bf\hat{k}})}{dt}=\\
# =&\ddot{R}{\bf\hat{e_R}}+\dot{R}\frac{d\bf\hat{e_R}}{dt} + \dot{R}\dot{\theta}{\bf\hat{e_\theta}} + R\ddot{\theta}{\bf\hat{e_\theta}} + R\dot{\theta}\frac{d{\bf\hat{e_\theta}}}{dt} + \ddot{z}{\bf\hat{k}}=\\
# =&\ddot{R}{\bf\hat{e_R}}+\dot{R}\dot{\theta}{\bf\hat{e_\theta}} + \dot{R}\dot{\theta}{\bf\hat{e_\theta}} + R\ddot{\theta}{\bf\hat{e_\theta}} - R\dot{\theta}^2{\bf\hat{e_R}}+ \ddot{z}{\bf\hat{k}} =\\
# =&\ddot{R}{\bf\hat{e_R}}+2\dot{R}\dot{\theta}{\bf\hat{e_\theta}}+ R\ddot{\theta}{\bf\hat{e_\theta}} - {R}\dot{\theta}^2{\bf\hat{e_R}}+ \ddot{z}{\bf\hat{k}} =\\
# =&(\ddot{R}-R\dot{\theta}^2){\bf\hat{e_R}}+(2\dot{R}\dot{\theta} + R\ddot{\theta}){\bf\hat{e_\theta}}+ \ddot{z}{\bf\hat{k}}
# \end{align}
# </span>
#
# # + The term $\ddot{R}$ is an acceleration in the radial direction.
#
# # + The term $R\ddot{\theta}$ is an angular acceleration.
#
# # + The term $\ddot{z}$ is an acceleration in the $\bf\hat{k}$ direction.
#
# # + The term $-R\dot{\theta}^2$ is the well known centripetal acceleration.
#
# # + The term $2\dot{R}\dot{\theta}$ is known as Coriolis acceleration. This term may be difficult to understand. It appears when there is displacement in the radial and angular directions at the same time.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Important to note
#
# The reader must bear in mind that the use of a different basis to represent the position, velocity or acceleration vectors is only a different representation of the same vector. For example, for the acceleration vector:
#
# <span class="notranslate">
# \begin{equation}
# {\bf\vec{a}} = \ddot{x}{\bf\hat{i}}+ \ddot{y}{\bf\hat{j}} + \ddot{z}{\bf\hat{k}}=(\ddot{R}-R\dot{\theta}^2){\bf\hat{e_R}}+(2\dot{R}\dot{\theta} + R\ddot{\theta}){\bf\hat{e_\theta}}+ \ddot{z}{\bf\hat{k}}=\dot{\Vert\bf\vec{v}\Vert}{\bf\hat{e}_t}+{\Vert\bf\vec{v}\Vert}^2\Vert{\bf\vec{C}} \Vert{\bf\hat{e}_n}
# \end{equation}
# </span>
#
# In which the last equality is the acceleration vector represented in the path-coordinate of the particle (see http://nbviewer.jupyter.org/github/BMClab/bmc/blob/master/notebooks/Time-varying%20frames.ipynb).
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example
#
#
# Consider a particle following the spiral path described below:
# <span class="notranslate">
# \begin{equation}
# {\bf\vec{r}}(t) = (2\sqrt(t)\cos(t)){\bf\hat{i}}+ (2\sqrt(t)\sin(t)){\bf\hat{j}}
# \end{equation}
# </span>
# + slideshow={"slide_type": "slide"}
import numpy as np
import sympy as sym
from sympy.plotting import plot_parametric,plot3d_parametric_line
from sympy.vector import CoordSys3D
import matplotlib.pyplot as plt
# from matplotlib import rc
# rc('text', usetex=True)
sym.init_printing()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Solving numerically
# + slideshow={"slide_type": "slide"}
t = np.linspace(0.01,10,30).reshape(-1,1) #create a time vector and reshapes it to a column vector
R = 2*np.sqrt(t)
theta = t
rx = R*np.cos(t)
ry = R*np.sin(t)
r = np.hstack((rx, ry)) # creates the position vector by stacking rx and ry horizontally
# + slideshow={"slide_type": "slide"}
e_r = r/np.linalg.norm(r, axis=1, keepdims=True) # defines e_r vector
e_theta = np.cross([0,0,1],e_r)[:,0:-1] # defines e_theta vector
# + slideshow={"slide_type": "slide"}
dt = t[1] #defines delta_t
Rdot = np.diff(R, axis=0)/dt #find the R derivative
thetaDot = np.diff(theta, axis=0)/dt #find the angle derivative
v = Rdot*e_r[0:-1,:] +R[0:-1]*thetaDot*e_theta[0:-1,:] # find the linear velocity.
# + slideshow={"slide_type": "slide"}
Rddot = np.diff(Rdot, axis=0)/dt
thetaddot = np.diff(thetaDot, axis=0)/dt
# + slideshow={"slide_type": "slide"}
a = ((Rddot - R[1:-1]*thetaDot[0:-1]**2)*e_r[1:-1,:]
+ (2*Rdot[0:-1]*thetaDot[0:-1] + Rdot[0:-1]*thetaddot)*e_theta[1:-1,:])
# + slideshow={"slide_type": "slide"}
from matplotlib.patches import FancyArrowPatch
# %matplotlib inline
plt.rcParams['figure.figsize']=10,10
fig = plt.figure()
plt.plot(r[:,0],r[:,1],'.')
ax = fig.add_axes([0,0,1,1])
for i in np.arange(len(t)-2):
vec1 = FancyArrowPatch(r[i,:],r[i,:]+e_r[i,:],mutation_scale=30,color='r', label='e_r')
vec2 = FancyArrowPatch(r[i,:],r[i,:]+e_theta[i,:],mutation_scale=30,color='g', label='e_theta')
ax.add_artist(vec1)
ax.add_artist(vec2)
plt.xlim((-10,10))
plt.ylim((-10,10))
plt.grid()
plt.legend([vec1, vec2],[r'$\vec{e_r}$', r'$\vec{e_{\theta}}$'])
plt.show()
# + slideshow={"slide_type": "slide"}
from matplotlib.patches import FancyArrowPatch
# %matplotlib inline
plt.rcParams['figure.figsize']=10,10
fig = plt.figure()
plt.plot(r[:,0],r[:,1],'.')
ax = fig.add_axes([0,0,1,1])
for i in np.arange(len(t)-2):
vec1 = FancyArrowPatch(r[i,:],r[i,:]+v[i,:],mutation_scale=10,color='r')
vec2 = FancyArrowPatch(r[i,:],r[i,:]+a[i,:],mutation_scale=10,color='g')
ax.add_artist(vec1)
ax.add_artist(vec2)
plt.xlim((-10,10))
plt.ylim((-10,10))
plt.grid()
plt.legend([vec1, vec2],[r'$\vec{v}$', r'$\vec{a}$'])
plt.show()
# + [markdown] slideshow={"slide_type": "skip"}
# ### Solved simbolically (extra reading)
# + slideshow={"slide_type": "skip"}
O = sym.vector.CoordSys3D(' ')
t = sym.symbols('t')
# + slideshow={"slide_type": "skip"}
r = 2*sym.sqrt(t)*sym.cos(t)*O.i+2*sym.sqrt(t)*sym.sin(t)*O.j
r
# + slideshow={"slide_type": "skip"}
plot_parametric(r.dot(O.i),r.dot(O.j),(t,0,10))
# + slideshow={"slide_type": "skip"}
e_r = r - r.dot(O.k)*O.k
e_r = e_r/sym.sqrt(e_r.dot(O.i)**2+e_r.dot(O.j)**2+e_r.dot(O.k)**2)
# + slideshow={"slide_type": "skip"}
e_r
# + slideshow={"slide_type": "skip"}
e_theta = O.k.cross(e_r)
e_theta
# + slideshow={"slide_type": "skip"}
from matplotlib.patches import FancyArrowPatch
plt.rcParams['figure.figsize']=10,10
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("on")
time = np.linspace(0,10,30)
for instant in time:
vt = FancyArrowPatch([float(r.dot(O.i).subs(t,instant)),float(r.dot(O.j).subs(t,instant))],
[float(r.dot(O.i).subs(t,instant))+float(e_r.dot(O.i).subs(t,instant)), float(r.dot(O.j).subs(t, instant))+float(e_r.dot(O.j).subs(t,instant))],
mutation_scale=20,
arrowstyle="->",color="r",label='${{e_r}}$')
vn = FancyArrowPatch([float(r.dot(O.i).subs(t, instant)),float(r.dot(O.j).subs(t,instant))],
[float(r.dot(O.i).subs(t, instant))+float(e_theta.dot(O.i).subs(t, instant)), float(r.dot(O.j).subs(t, instant))+float(e_theta.dot(O.j).subs(t, instant))],
mutation_scale=20,
arrowstyle="->",color="g",label='${{e_{theta}}}$')
ax.add_artist(vn)
ax.add_artist(vt)
plt.xlim((-10,10))
plt.ylim((-10,10))
plt.legend(handles=[vt,vn],fontsize=20)
plt.grid()
plt.show()
# + slideshow={"slide_type": "skip"}
R = 2*sym.sqrt(t)
# + slideshow={"slide_type": "slide"}
Rdot = sym.diff(R,t)
Rddot = sym.diff(Rdot,t)
Rddot
# + slideshow={"slide_type": "skip"}
v = Rdot*e_r + R*e_theta
# + slideshow={"slide_type": "skip"}
v
# + slideshow={"slide_type": "skip"}
a = (Rddot - R)*e_r + (2*Rdot*1+0)*e_theta
aCor = 2*Rdot*1*e_theta
aCor
# + slideshow={"slide_type": "skip"}
a
# + slideshow={"slide_type": "slide"}
from matplotlib.patches import FancyArrowPatch
plt.rcParams['figure.figsize'] = 10,10
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("on")
time = np.linspace(0.1,10,30)
for instant in time:
vt = FancyArrowPatch([float(r.dot(O.i).subs(t,instant)),float(r.dot(O.j).subs(t,instant))],
[float(r.dot(O.i).subs(t,instant))+float(v.dot(O.i).subs(t,instant)), float(r.dot(O.j).subs(t, instant))+float(v.dot(O.j).subs(t,instant))],
mutation_scale=20,
arrowstyle="->",color="r",label='${{v}}$')
vn = FancyArrowPatch([float(r.dot(O.i).subs(t, instant)),float(r.dot(O.j).subs(t,instant))],
[float(r.dot(O.i).subs(t, instant))+float(a.dot(O.i).subs(t, instant)), float(r.dot(O.j).subs(t, instant))+float(a.dot(O.j).subs(t, instant))],
mutation_scale=20,
arrowstyle="->",color="g",label='${{a}}$')
vc = FancyArrowPatch([float(r.dot(O.i).subs(t, instant)),float(r.dot(O.j).subs(t,instant))],
[float(r.dot(O.i).subs(t, instant))+float(aCor.dot(O.i).subs(t, instant)), float(r.dot(O.j).subs(t, instant))+float(aCor.dot(O.j).subs(t, instant))],
mutation_scale=20,
arrowstyle="->",color="b",label='${{a_{Cor}}}$')
ax.add_artist(vn)
ax.add_artist(vt)
ax.add_artist(vc)
plt.xlim((-10,10))
plt.ylim((-10,10))
plt.legend(handles=[vt,vn,vc],fontsize=20)
plt.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problems
#
# 1. Problems from 15.1.1 to 15.1.14 from Ruina and Rudra's book,
# 2. Problems from 18.1.1 to 18.1.8 and 18.1.10 from Ruina and Rudra's book.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Reference
#
# - <NAME>, <NAME> (2019) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press.
# -
| notebooks/.ipynb_checkpoints/PolarCoordinates-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import os
os.environ['CLASSPATH'] = "H:/Relation-Classification/stanford/stanford-postagger-2017-06-09"
from nltk.tokenize.stanford import StanfordTokenizer
# +
trainFile = './corpus/SemEval2010_task8_training/TRAIN_FILE.TXT'
testFile = './corpus/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT'
op_trainFile = "./files/train_attn.txt"
op_testFile = "./files/test_attn.txt"
# -
# +
def createFile(filepath, outputpath):
def clean_tokens(sent_num, tokens):
ret = []
for t in tokens:
t = t.strip().split()
if len(t) > 1:
print(sent_num, t)
t = "_".join(t)
ret.append(t)
return ret
fOut = open(outputpath, 'w')
lines = [line.strip() for line in open(filepath)]
for idx in range(0, len(lines), 4):
sentence_num = lines[idx].split("\t")[0]
sentence = lines[idx].split("\t")[1][1:-1]
label = lines[idx+1]
sentence = sentence.replace("<e1>", " E1_START ").replace("</e1>", " E1_END ")
sentence = sentence.replace("<e2>", " E2_START ").replace("</e2>", " E2_END ")
#sentence = sentence.replace("<e1>", " _e1_ ").replace("</e1>", " _/e1_ ")
#sentence = sentence.replace("<e2>", " _e2_ ").replace("</e2>", " _/e2_ ")
tokens = StanfordTokenizer().tokenize(sentence)
tokens = clean_tokens(sentence_num, tokens)
fOut.write(" ".join([ label, " ".join(tokens) ]))
fOut.write("\n")
fOut.close()
print(outputpath, "created")
# +
createFile(trainFile, op_trainFile)
createFile(testFile, op_testFile)
print("Train / Test file created")
# +
# 2609 ['1', '1/2']
# 7589 ['1', '1/2']
# 10591 ['1', '1/4']
# 10665 ['6', '1/2']
# Train / Test file created
# -
| 01_create_train_test_attn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="pARL5s_sNHv2"
# # Automatic Differentiation
#
# In machine learning, we *train* models, updating them successively so that they get better and better as they see more and more data. Usually, *getting better* means minimizing a *loss function*, a score that answers the question "how *bad* is our model?" With neural networks, we typically choose loss functions that are differentiable with respect to our parameters.
# Put simply, this means that for each of the model's parameters, we can determine how much *increasing* or *decreasing* it might affect the loss. While the calculations for taking these derivatives are straightforward, requiring only some basic calculus, for complex models, working out the updates by hand can be a pain (and often error-prone).
# + [markdown] id="rpyYGWxGNHv9"
# The autograd package expedites this work by automatically calculating derivatives. And while many other libraries require that we compile a symbolic graph to take automatic derivatives, `autograd` allows us to take derivatives while writing ordinary imperative code. Every time we pass data through our model, `autograd` builds a graph on the fly, tracking which data combined through which operations to produce the output. This graph enables `autograd` to subsequently backpropagate gradients on command. Here *backpropagate* simply means to trace through the compute graph, filling in the partial derivatives with respect to each parameter.
# + id="5XnuKK9iNHv-"
import torch
from torch.autograd import Variable
# + [markdown] id="iOBsPSCzNHv_"
# ## A Simple Example
#
# As a toy example, say that we are interested in differentiating the mapping $y = 2\mathbf{x}^{\top}\mathbf{x}$ with respect to the column vector $\mathbf{x}$. To start, let's create the variable `x` and assign it an initial value.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="VM0iXJZtNHwA" outputId="d59e329f-5e69-4eb3-c061-51169a69a245"
x = Variable(torch.arange(4, dtype=torch.float32).reshape((4, 1)), requires_grad=True)
print(x)
# + [markdown] id="MODpmt4wNHwB"
# Once we compute the gradient of ``y`` with respect to ``x``, we will need a place to store it. We can tell a tensor that we plan to store a gradient by the ``requires_grad=True`` keyword.
# + [markdown] id="7YeCcrGTNHwC"
# Now we are going to compute ``y`` and PyTorch will generate a computation graph on the fly. Autograd is reverse automatic differentiation system. Conceptually, autograd records a graph recording all of the operations that created the data as you execute operations, giving you a directed acyclic graph whose leaves are the input tensors and roots are the output tensors. By tracing this graph from roots to leaves, you can automatically compute the gradients using the chain rule.
#
# Note that building the computation graph requires a nontrivial amount of computation. So PyTorch will *only* build the graph when explicitly told to do so. For a tensor to be “recordable”, it must be wrapped with torch.autograd.Variable. The Variable class provides almost the same API as Tensor, but augments it with the ability to interplay with torch.autograd.Function in order to be differentiated automatically. More precisely, a Variable records the history of operations on a Tensor.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="qlqJO2ANNHwC" outputId="f8161cf1-9c10-40c7-a894-8cecee6fa75b"
y = 2*torch.mm(x.t(),x)
print(y)
# + [markdown] id="055sbI9iNHwD"
# Since the shape of `x` is (4, 1), `y` is a scalar. Next, we can automatically find the gradient by calling the `backward` function. It should be noted that if `y` is not a scalar, PyTorch will first sum the elements in `y` to get the new variable by default, and then find the gradient of the variable with respect to `x`.
# + id="N_ehb6MENHwE"
y.backward()
# + [markdown] id="ogzOdu12NHwF"
# Since every Variable except for inputs is the result of an operation, each Variable has an associated grad_fn, which is the torch.autograd.Function that is used to compute the backward step. For inputs it is None:
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="YN8SfEMzNHwF" outputId="f7a39e1a-c5fe-4ff3-c662-daf902a5b3d5"
print("x.grad:", x.grad)
print("x.grad_fn:", x.grad_fn)
print("y.grad_fn:", y.grad_fn)
# + [markdown] id="1a_GAR75NHwG"
# The gradient of the function $y = 2\mathbf{x}^{\top}\mathbf{x}$ with respect to $\mathbf{x}$ should be $4\mathbf{x}$. Now let's verify that the gradient produced is correct.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="jHvO1YEJNHwG" outputId="d5dcdeab-c21e-4c5f-a6ce-d0109853b2dc"
print((x.grad - 4*x).norm().item() == 0)
print(x.grad)
# + [markdown] id="HTOkervHNHwH"
# ## Training Mode and Evaluation Mode
#
# `Model` will change the running mode to the evaluation mode on calling `model.eval()` or to the training mode on calling `model.train()`.
#
# In some cases, the same model behaves differently in the training and prediction modes (e.g. when using neural techniques such as dropout and batch normalization). In other cases, some models may store more auxiliary variables to make computing gradients easier. We will cover these differences in detail in later chapters. For now, you do not need to worry about them.
# + [markdown] id="C4tYrWGcNHwH"
# ## Computing the Gradient of Python Control Flow
#
# One benefit of using automatic differentiation is that even if the computational graph of the function contains Python's control flow (such as conditional and loop control), we may still be able to find the gradient of a variable. Consider the following program: It should be emphasized that the number of iterations of the loop (while loop) and the execution of the conditional judgment (if statement) depend on the value of the input `b`.
# + id="83S26YLWNHwI"
def f(a):
b = a * 2
while b.norm().item() < 1000:
b = b * 2
if b.sum().item() > 0:
c = b
else:
c = 100 * b
return c
# + [markdown] id="Az0abh03NHwI"
# Note that the number of iterations of the while loop and the execution of the conditional statement (if then else) depend on the value of `a`. To compute gradients, we need to `record` the calculation, and then call the `backward` function to calculate the gradient.
# + id="Q-qRa2EkNHwI"
a = torch.randn(size=(1,))
a.requires_grad=True
d = f(a)
d.backward()
# + [markdown] id="iVamDxunNHwI"
# Let's analyze the `f` function defined above. As you can see, it is piecewise linear in its input `a`. In other words, for any `a` there exists some constant such that for a given range `f(a) = g * a`. Consequently `d / a` allows us to verify that the gradient is correct:
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="BwXGi43jNHwJ" outputId="2324c6d5-6709-45c7-9562-a54fa8bcfec0"
print(a.grad == (d / a))
# + [markdown] id="AEkeomgkNHwJ"
# ## Head gradients and the chain rule
#
# *Caution: This part is tricky and not necessary to understanding subsequent sections. That said, it is needed if you want to build new layers from scratch. You can skip this on a first read.*
#
# Sometimes when we call the backward method, e.g. `y.backward()`, where
# `y` is a function of `x` we are just interested in the derivative of
# `y` with respect to `x`. Mathematicians write this as
# $\frac{dy(x)}{dx}$. At other times, we may be interested in the
# gradient of `z` with respect to `x`, where `z` is a function of `y`,
# which in turn, is a function of `x`. That is, we are interested in
# $\frac{d}{dx} z(y(x))$. Recall that by the chain rule
#
# $$\frac{d}{dx} z(y(x)) = \frac{dz(y)}{dy} \frac{dy(x)}{dx}.$$
#
# So, when ``y`` is part of a larger function ``z`` and we want ``x.grad`` to store $\frac{dz}{dx}$, we can pass in the *head gradient* $\frac{dz}{dy}$ as an input to ``backward()``. The default argument is ``torch.ones_like(y)``. See [Wikipedia](https://en.wikipedia.org/wiki/Chain_rule) for more details.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="HZvGe9O-NHwJ" outputId="61e933e6-f4e9-4e74-c18d-1d46d9e396d9"
x = Variable(torch.tensor([[0.],[1.],[2.],[3.]]), requires_grad=True)
y = x * 2
z = y * x
head_gradient = torch.tensor([[10], [1.], [.1], [.01]])
z.backward(head_gradient)
print(x.grad)
# + [markdown] id="8KD80FSoNHwK"
# ## Summary
#
# * PyTorch provides an `autograd` package to automate the derivation process.
# * PyTorch's `autograd` package can be used to derive general imperative programs.
# * The running modes of PyTorch include the training mode and the evaluation mode.
# + [markdown] id="s0vXkwaoNHwK" endofcell="--"
# ## Exercises
#
# 1. In the control flow example where we calculate the derivative of `d` with respect to `a`, what would happen if we changed the variable `a` to a random vector or matrix. At this point, the result of the calculation `f(a)` is no longer a scalar. What happens to the result? How do we analyze this?
# - They result would be vector or matrix. We can analyze it by externally compute gradient.
#
# 2. Redesign an example of finding the gradient of the control flow. Run and analyze the result.
# - Check the code below
#
# 3. In a second-price auction (such as in eBay or in computational advertising), the winning bidder pays the second-highest price. Compute the gradient of the final price with respect to the winning bidder's bid using `autograd`. What does the result tell you about the mechanism? If you are curious to learn more about second-price auctions, check out this paper by [Edelman, Ostrovski and Schwartz, 2005](https://www.benedelman.org/publications/gsp-060801.pdf).
# -
#
# 4. Why is the second derivative much more expensive to compute than the first derivative?
# - Because of the chain rule, we need to compute $N^2$ elements while we need to compute $N$ elements in the first derivative.
#
# 5. Derive the head gradient relationship for the chain rule. If you get stuck, use the ["Chain rule" article on Wikipedia](https://en.wikipedia.org/wiki/Chain_rule).
# - $\text{head gradient} = \frac{dz}{dy} = \frac{dz}{dx}\frac{dx}{dy}$
#
# 6. Assume $f(x) = \sin(x)$. Plot $f(x)$ and $\frac{df(x)}{dx}$ on a graph, where you computed the latter without any symbolic calculations, i.e. without exploiting that $f'(x) = \cos(x)$.
# - Check the code below
#
# --
# + id="aMYeHC8lSD1m"
def f0(a):
b = a * a + a
if b.sum().item() > 0:
c = b
else:
c = 100 * b
return c
# + [markdown] id="PMjCEITmOxfJ"
# $d = 2a^2 + a\\\text{a.grad} = 2a + 1$
# + id="LB9ZyUBSMpL4" outputId="17b03c9e-0a38-4e4e-da7c-fac538fab76a" colab={"base_uri": "https://localhost:8080/", "height": 0}
a = torch.randn(size=(1,))
a.requires_grad=True
d = f0(a)
d.backward()
print(a)
print(d)
print(a.grad)
# + id="bwVLRMdXM0L_" outputId="fecfa090-a0af-4043-ee68-f4f5a88dba86" colab={"base_uri": "https://localhost:8080/", "height": 334}
import numpy as np
from matplotlib import pyplot as plt
x = np.linspace(-np.pi, np.pi, 1000)
X = torch.tensor(x, requires_grad=True)
y = torch.sin(X).sum().backward()
plt.plot(X.detach().numpy(), np.sin(x))
plt.plot(X.detach().numpy(), X.grad)
plt.axis('equal')
| Ch04_The_Preliminaries_A_Crashcourse/Automatic_Differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""Process results from 230218."""
import os
import argparse
import sys
sys.path.append('..')
from bayescmd.results_handling import kde_plot
from bayescmd.results_handling import scatter_dist_plot
from bayescmd.results_handling import data_import
from bayescmd.results_handling import plot_repeated_outputs
from bayescmd.results_handling import histogram_plot
from bayescmd.results_handling import data_merge_by_batch
from bayescmd.abc import import_actual_data
from bayescmd.abc import priors_creator
from bayescmd.util import findBaseDir
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
from io import BytesIO
mpl.rc('figure', dpi=400)
from distutils import dir_util
def TIFF_exporter(fig, fname, fig_dir = '.', extra_artists=()):
"""
Parameters
----------
fig: matplotlib figure
"""
# save figure
# (1) save the image in memory in PNG format
png1 = BytesIO()
fig.savefig(png1, format='png', bbox_extra_artists=extra_artists)
# (2) load this image into PIL
png2 = Image.open(png1)
# (3) save as TIFF
png2.save(os.path.join(fig_dir,'{}.tiff'.format(fname)))
png1.close()
return True
BASEDIR = os.path.abspath(findBaseDir('BayesCMD'))
parent_dir = "/home/buck06191/Dropbox/phd/PLOS_paper/data/parameters/hypoxia/healthy/wide_params/inflection_SA/"
conf = "../examples/configuration_files/healthy_hypoxia_config.json"
pfile = os.path.abspath(os.path.join(parent_dir, 'posterior_parameters.csv'))# 'reduced_sorted_parameters.csv'))
with open(conf, 'r') as conf_f:
conf = json.load(conf_f)
# params = priors_creator(conf['priors']['defaults'],
# conf['priors']['variation'])
params = conf['priors']
input_path = os.path.join(BASEDIR,
'PLOS_paper',
'data',
'hypoxia_output.csv')
d0 = import_actual_data(input_path)
targets = conf['targets']
model_name = conf['model_name']
inputs = conf['inputs']
config = {
"model_name": model_name,
"targets": targets,
"inputs": inputs,
"parameters": params,
"input_path": input_path,
"zero_flag": conf['zero_flag']
}
results = data_import(pfile)
true_medians= {'P_v': 4.0,
'R_auto': 1.5,
'Xtot': 9.1,
'mu_max': 1.0,
'n_h': 2.5,
'n_m': 1.83,
'phi': 0.036,
'r_m': 0.027,
'r_t': 0.018,
'sigma_coll': 62.79}
# print(results.columns)
# Set accepted limit, lim
lim = 1000
# tols = [0.11]
d = "NRMSE"
figs = []
# TODO: Fix issue with plot formatting, cutting off axes etc
# TODO: Fix issue with time series cutting short.
# +
fig, ax = plot_repeated_outputs(results, n_repeats=25, limit=lim,
distance=d, **config)
for i, label in enumerate(["{} (%)", "$\Delta${} ($\mu M$)", "$\Delta${} ($\mu M$)", "$\Delta${} ($\mu M$)"]):
ax[i].set_ylabel(label.format(ax[i].get_ylabel()))
for i, y_lim in enumerate([(35,80), (-1.1, 0.1), (-2, 30), (-25, 2)]):
ax[i].set_ylim(y_lim)
fig.set_size_inches(18.5, 12.5)
figs.append(fig)
TIFF_exporter(fig, 'PLOS_healthy_{}_{}_TS'.format(str(lim).replace('.', '_'), d), fig_dir=figPath)
# -
for i, y_lim in enumerate([(35,80), (-1.1, 0.1), (-2, 30), (-25, 2)]):
ax[i].set_ylim(y_lim)
fig
# +
figPath = "/home/buck06191/Dropbox/phd/Bayesian_fitting/{}/{}/{}/{}/{}/{}/{}/"\
"Figures/{}".format(model_name, 'PLOS_paper', 'hypoxia',
'healthy', 'wide_range', 'limit', 'tolerance', d)
TIFF_exporter(fig, 'PLOS_healthy_{}_{}_TS'.format(str(lim).replace('.', '_'), d), fig_dir=figPath)
# -
for a in ax:
a.set_xlim(1046,1048)
a.set_xticks([1046.0, 1047.0, 1048.0])
ax[0].set_ylim(47.5, 49)
ax[1].set_ylim(-1.02, -0.9)
ax[2].set_ylim(25.4, 26.8)
ax[3].set_ylim(-22.2, -20.7)
figPath = "/home/buck06191/Dropbox/phd/Bayesian_fitting/{}/{}/{}/{}/{}/{}/{}/"\
"Figures/{}".format(model_name, 'PLOS_paper', 'hypoxia',
'healthy', 'wide_range', 'limit', 'tolerance', d)
TIFF_exporter(fig, 'PLOS_healthy_{}_{}_TS_zoom'.format(str(lim).replace('.', '_'), d), fig_dir=figPath)
| scripts/results_Processing/notebooks/healthy_hypoxia_zoom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (daModels)
# language: python
# name: damodels
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Identification and localization of Sea Lion anatomical features using transfer learning
#
# <NAME>
#
# - In my occasionally free moments.
# + slideshow={"slide_type": "skip"}
from IPython.display import HTML
# + slideshow={"slide_type": "slide"}
HTML('<iframe src="https://player.vimeo.com/video/266222134" width="640" height="360" frameborder="0" allowfullscreen></iframe>')
# + [markdown] slideshow={"slide_type": "slide"}
# # Goal
#
# #### Turn this
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# #### To this
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## And eventually
#
# Are these two sea-lions the same?
#
# | Lion 1| Lion 2|
# |:------:|:-------:|
# || |
# | | |
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Previous Sea Lion identification research
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# But how do you get those 180 images in the wild?
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Biological Analogies
#
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://goo.gl/images/LFMbNr)
# + [markdown] slideshow={"slide_type": "notes"}
# Rods and cones in a human eye.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](http://neuroclusterbrain.com/neuron_model.html)
# + [markdown] slideshow={"slide_type": "notes"}
# Multiple rods and cones are grouped together and activate a single neuron. This is one level of "aggregation" that happens.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "notes"}
# Our detail vision is only about the size as your thumbnail held at arm length. Your eye rapidly moves and scans the whole image.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://creativecommons.org/licenses/by-sa/4.0)
# + [markdown] slideshow={"slide_type": "notes"}
# The information is then passed through the brain for processing.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
#
# [Source](https://neuwritesd.org/2015/10/22/deep-neural-networks-help-us-read-your-mind/)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Computational Strategy
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](http://cs231n.github.io/classification/)
# + [markdown] slideshow={"slide_type": "notes"}
# The computer sees images as a matrix of numbers.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](http://www.big-data.tips/tensor-machine-learning)
# + [markdown] slideshow={"slide_type": "notes"}
# These are represented as 4-D matrices, representing: item, width, height, and color
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://medium.freecodecamp.org/an-intuitive-guide-to-convolutional-neural-networks-260c2de0a050)
# + [markdown] slideshow={"slide_type": "notes"}
# Convolutions allow us to sumarize nearby items.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](http://cs231n.github.io/convolutional-networks/)
# + [markdown] slideshow={"slide_type": "notes"}
# Convolutions are simply a matrix multiplication operation that slides across an image
# + [markdown] slideshow={"slide_type": "subslide"}
# ResNet is the current state of the art model. It has 25.6M parameters to tune.
# 
# + [markdown] slideshow={"slide_type": "notes"}
# These are subsequently stacked together.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
#
# [Source](https://neuwritesd.org/2015/10/22/deep-neural-networks-help-us-read-your-mind/)
# + [markdown] slideshow={"slide_type": "slide"}
# ### ImageNet & Transfer Learning
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
#
# 15 millions labeled high-resolution images with around 22,000 categories. This collection was used to train ResNet to ~5% top-5 error rate.
#
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://torres.ai/artificial-intelligence-content/deeplearning/)
# + [markdown] slideshow={"slide_type": "subslide"}
# Cool and all, but this is what we'd get. The ability to tell if the __whole image__ contained a sea lion.
#
# | Lion 97% | Lion 91% | Not Lion 0.12% |
# |:------:|:-------:|:-------:|
# || | 
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Retina Net
#
# If you want to know __where__ an object is. You have to change your question a bit.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://heartbeat.fritz.ai/gentle-guide-on-how-yolo-object-localization-works-with-keras-part-2-65fe59ac12d)
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# [Source](https://heartbeat.fritz.ai/gentle-guide-on-how-yolo-object-localization-works-with-keras-part-2-65fe59ac12d)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Annotating Images
#
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Current Results
# + [markdown] slideshow={"slide_type": "fragment"}
# Training was done on only 40 labeled images done by one annotator (me). I used the `mobilenet` architecture and the smallest model parameters possible.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# What's the image with the most "recognizable" face?
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Future Directions
#
# (Condensed from [Machine Learning is Fun! Part 4: Modern Face Recognition with Deep Learning by <NAME>](https://medium.com/@ageitgey/machine-learning-is-fun-part-4-modern-face-recognition-with-deep-learning-c3cffc121d78))
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Landmark detection
#
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ### That's so 1990
#
# - Does not generalize well with faces that are not "front facing".
# - False positive rate increases as the database size increases.
# - Are these really the "best" points to use?
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Deep Learning Way
#
# 
# + [markdown] slideshow={"slide_type": "notes"}
# Create a model that can turn a picture of a face into a set of numbers.
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# This strategy has many advantages:
# - Increasing the database __decreases__ false-positive rate.
# - Images can be taken from any angle.
# - Allows for "outlier" detection of novel faces that aren't in the database.
# - Face databases can be constructed in linear time and searched in `log(n)` time.
# - Generalizes to things beyond faces ... like whiskers and flippers.
# + [markdown] slideshow={"slide_type": "subslide"}
# Challenges yet to overcome:
# - We don't actually have the standard triplets. We have to make some assumptions about which sea lions are definitely not the same.
# - Sea Lions recorded on disparate beaches on the same day are likely different lions.
# - How do we deal with integrating flipper matching, face recognition, whisker recognition, etc into one score.
# - Orders of magnitude fewer images. FaceNet is trained with at least 10 images from >50 million faces; we have ~3000 images of ? unique sea lions.
# -
# ### Other uses
#
# Towards Deep Cellular Phenotyping in Placental Histology
#
# https://arxiv.org/pdf/1804.03270 (May 2018)
# | Detect cells in images | Quanfity Cell-level State | Slide-level visualization of cell-types |
# |:------:|:-------:|:-------:|
# |  || 
| slidedecks/2019-03-01-sealion-localization/Dampier-2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="2Pmxv2ioyCRw"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="b-2ShX25yNWf"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="pa49bUnKyRgF"
# # Time series forecasting
# + [markdown] colab_type="text" id="11Ilg92myRcw"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/time_series"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="GU8C5qm_4vZb"
# This tutorial is an introduction to time series forecasting using Recurrent Neural Networks (RNNs). This is covered in two parts: first, you will forecast a univariate time series, then you will forecast a multivariate time series.
# + colab={} colab_type="code" id="7rZnJaGTWQw0"
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
# + [markdown] colab_type="text" id="TokBlnUhWFw9"
# ## The weather dataset
# This tutorial uses a <a href="https://www.bgc-jena.mpg.de/wetter/" class="external">[weather time series dataset</a> recorded by the <a href="https://www.bgc-jena.mpg.de" class="external">Max Planck Institute for Biogeochemistry</a>.
#
# This dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by <NAME> for his book [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
# + colab={} colab_type="code" id="xyv_i85IWInT"
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
# + colab={} colab_type="code" id="TX6uGeeeWIkG"
df = pd.read_csv(csv_path)
# + [markdown] colab_type="text" id="VdbOWXiTWM2T"
# Let's take a glance at the data.
# + colab={} colab_type="code" id="ojHE-iCCWIhz"
df.head()
# + [markdown] colab_type="text" id="qfbpcV0MWQzl"
# As you can see above, an observation is recorded every 10 minutes. This means that, for a single hour, you will have 6 observations. Similarly, a single day will contain 144 (6x24) observations.
#
# Given a specific time, let's say you want to predict the temperature 6 hours in the future. In order to make this prediction, you choose to use 5 days of observations. Thus, you would create a window containing the last 720(5x144) observations to train the model. Many such configurations are possible, making this dataset a good one to experiment with.
#
# The function below returns the above described windows of time for the model to train on. The parameter `history_size` is the size of the past window of information. The `target_size` is how far in the future does the model need to learn to predict. The `target_size` is the label that needs to be predicted.
# + colab={} colab_type="code" id="7AoxQuTrWIbi"
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i)
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(dataset[indices], (history_size, 1)))
labels.append(dataset[i+target_size])
return np.array(data), np.array(labels)
# + [markdown] colab_type="text" id="qoFJZmXBaxCc"
# In both the following tutorials, the first 300,000 rows of the data will be the training dataset, and there remaining will be the validation dataset. This amounts to ~2100 days worth of training data.
# + colab={} colab_type="code" id="ia-MPAHxbInX"
TRAIN_SPLIT = 300000
# + [markdown] colab_type="text" id="EowWDtaNnH1y"
# Setting seed to ensure reproducibility.
# + colab={} colab_type="code" id="-x-GgENynHdx"
tf.random.set_seed(13)
# + [markdown] colab_type="text" id="8YEwr-NoWUpV"
# ## Part 1: Forecast a univariate time series
# First, you will train a model using only a single feature (temperature), and use it to make predictions for that value in the future.
#
# Let's first extract only the temperature from the dataset.
# + colab={} colab_type="code" id="nbdcnm1_WIY9"
uni_data = df['T (degC)']
uni_data.index = df['Date Time']
uni_data.head()
# + [markdown] colab_type="text" id="aQB-46MyWZMm"
# Let's observe how this data looks across time.
# + colab={} colab_type="code" id="ftOExwAqWXSU"
uni_data.plot(subplots=True)
# + colab={} colab_type="code" id="ejSEiDqBWXQa"
uni_data = uni_data.values
# + [markdown] colab_type="text" id="-eFckdUUHWmT"
# It is important to scale features before training a neural network. Standardization is a common way of doing this scaling by subtracting the mean and dividing by the standard deviation of each feature.You could also use a `tf.keras.utils.normalize` method that rescales the values into a range of [0,1].
# + [markdown] colab_type="text" id="mxbIic5TMlxx"
# Note: The mean and standard deviation should only be computed using the training data.
# + colab={} colab_type="code" id="Eji6njXvHusN"
uni_train_mean = uni_data[:TRAIN_SPLIT].mean()
uni_train_std = uni_data[:TRAIN_SPLIT].std()
# + [markdown] colab_type="text" id="8Gob1YJYH0cH"
# Let's standardize the data.
# + colab={} colab_type="code" id="BO55yRD6H0Dx"
uni_data = (uni_data-uni_train_mean)/uni_train_std
# + [markdown] colab_type="text" id="gn8A_nrccKtn"
# Let's now create the data for the univariate model. For part 1, the model will be given the last 20 recorded temperature observations, and needs to learn to predict the temperature at the next time step.
# + colab={} colab_type="code" id="aJJ-T49vWXOZ"
univariate_past_history = 20
univariate_future_target = 0
x_train_uni, y_train_uni = univariate_data(uni_data, 0, TRAIN_SPLIT,
univariate_past_history,
univariate_future_target)
x_val_uni, y_val_uni = univariate_data(uni_data, TRAIN_SPLIT, None,
univariate_past_history,
univariate_future_target)
# + [markdown] colab_type="text" id="aWpVMENsdp0N"
# This is what the `univariate_data` function returns.
# + colab={} colab_type="code" id="feDd95XFdz5H"
print ('Single window of past history')
print (x_train_uni[0])
print ('\n Target temperature to predict')
print (y_train_uni[0])
# + [markdown] colab_type="text" id="hni3Jt9OMR1_"
# Now that the data has been created, let's take a look at a single example. The information given to the network is given in blue, and it must predict the value at the red cross.
# + colab={} colab_type="code" id="qVukM9dRipop"
def create_time_steps(length):
return list(range(-length, 0))
# + colab={} colab_type="code" id="QQeGvh7cWXMR"
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Time-Step')
return plt
# + colab={} colab_type="code" id="Pd05iV-UWXKL"
show_plot([x_train_uni[0], y_train_uni[0]], 0, 'Sample Example')
# + [markdown] colab_type="text" id="b5rUJ_2YMWzG"
# ### Baseline
# Before proceeding to train a model, let's first set a simple baseline. Given an input point, the baseline method looks at all the history and predicts the next point to be the average of the last 20 observations.
# + colab={} colab_type="code" id="P9nYWcxMMWnr"
def baseline(history):
return np.mean(history)
# + colab={} colab_type="code" id="KMcdFYKQMWlm"
show_plot([x_train_uni[0], y_train_uni[0], baseline(x_train_uni[0])], 0,
'Baseline Prediction Example')
# + [markdown] colab_type="text" id="067m6t8cMakb"
# Let's see if you can beat this baseline using a recurrent neural network.
# + [markdown] colab_type="text" id="H4crpOcoMlSe"
# ### Recurrent neural network
#
# A Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state summarizing the information they've seen so far. For more details, read the [RNN tutorial](https://www.tensorflow.org/tutorials/sequences/recurrent). In this tutorial, you will use a specialized RNN layer called Long Short Term Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM))
#
# Let's now use `tf.data` to shuffle, batch, and cache the dataset.
# + colab={} colab_type="code" id="kk-evkrmMWh9"
BATCH_SIZE = 256
BUFFER_SIZE = 10000
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
# + [markdown] colab_type="text" id="n2AmKkyVS5Ht"
# The following visualisation should help you understand how the data is represented after batching.
#
# 
# + [markdown] colab_type="text" id="4nagdTRNfPuZ"
# You will see the LSTM requires the input shape of the data it is being given.
# + colab={} colab_type="code" id="IDbpHosCMWZO"
simple_lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
tf.keras.layers.Dense(1)
])
simple_lstm_model.compile(optimizer='adam', loss='mae')
# + [markdown] colab_type="text" id="NOGZtDAqMtSi"
# Let's make a sample prediction, to check the output of the model.
# + colab={} colab_type="code" id="2mPZbIKCMtLR"
for x, y in val_univariate.take(1):
print(simple_lstm_model.predict(x).shape)
# + [markdown] colab_type="text" id="QYz6RN_mMyau"
# Let's train the model now. Due to the large size of the dataset, in the interest of saving time, each epoch will only run for 200 steps, instead of the complete training data as normally done.
# + colab={} colab_type="code" id="0opH9xi5MtIk"
EVALUATION_INTERVAL = 200
EPOCHS = 10
simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_univariate, validation_steps=50)
# + [markdown] colab_type="text" id="euyPo_lyNryZ"
# #### Predict using the simple LSTM model
# Now that you have trained your simple LSTM, let's try and make a few predictions.
# + colab={} colab_type="code" id="S2rRLrs8MtGU"
for x, y in val_univariate.take(3):
plot = show_plot([x[0].numpy(), y[0].numpy(),
simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
plot.show()
# + [markdown] colab_type="text" id="Q-AVEJyRNvt0"
# This looks better than the baseline. Now that you have seen the basics, let's move on to part two, where you will work with a multivariate time series.
# + [markdown] colab_type="text" id="VlJYi3_HXcw8"
# ## Part 2: Forecast a multivariate time series
# + [markdown] colab_type="text" id="hoxNZ2GM7DPm"
# The original dataset contains fourteen features. For simplicity, this section considers only three of the original fourteen. The features used are air temperature, atmospheric pressure, and air density.
#
# To use more features, add their names to this list.
# + colab={} colab_type="code" id="DphrB7bxSNDd"
features_considered = ['p (mbar)', 'T (degC)', 'rho (g/m**3)']
# + colab={} colab_type="code" id="IfQUSiJfUpXJ"
features = df[features_considered]
features.index = df['Date Time']
features.head()
# + [markdown] colab_type="text" id="qSfhTZi5r15R"
# Let's have a look at how each of these features vary across time.
# + colab={} colab_type="code" id="QdgC8zvGr21X"
features.plot(subplots=True)
# + [markdown] colab_type="text" id="cqStgZ-O1b3_"
# As mentioned, the first step will be to standardize the dataset using the mean and standard deviation of the training data.
# + colab={} colab_type="code" id="W7VuNIwfHRHx"
dataset = features.values
data_mean = dataset[:TRAIN_SPLIT].mean(axis=0)
data_std = dataset[:TRAIN_SPLIT].std(axis=0)
# + colab={} colab_type="code" id="eJUeWDqploCt"
dataset = (dataset-data_mean)/data_std
# + [markdown] colab_type="text" id="LyuGuJUgjUK3"
# ### Single step model
# In a single step setup, the model learns to predict a single point in the future based on some history provided.
#
# The below function performs the same windowing task as above, however, here it samples the past observation based on the step size given.
# + colab={} colab_type="code" id="d-rVX4d3OF86"
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
# + [markdown] colab_type="text" id="HWVGYwbN2ITI"
# In this tutorial, the network is shown data from the last five (5) days, i.e. 720 observations that are sampled every hour. The sampling is done every one hour since a drastic change is not expected within 60 minutes. Thus, 120 observation represent history of the last five days. For the single step prediction model, the label for a datapoint is the temperature 12 hours into the future. In order to create a label for this, the temperature after 72(12*6) observations is used.
# + colab={} colab_type="code" id="HlhVGzPhmMYI"
past_history = 720
future_target = 72
STEP = 6
x_train_single, y_train_single = multivariate_data(dataset, dataset[:, 1], 0,
TRAIN_SPLIT, past_history,
future_target, STEP,
single_step=True)
x_val_single, y_val_single = multivariate_data(dataset, dataset[:, 1],
TRAIN_SPLIT, None, past_history,
future_target, STEP,
single_step=True)
# + [markdown] colab_type="text" id="CamMObrwPhnp"
# Let's look at a single data-point.
#
# + colab={} colab_type="code" id="_tVKm-ZIPls0"
print ('Single window of past history : {}'.format(x_train_single[0].shape))
# + colab={} colab_type="code" id="eCWG4xgQ3O6E"
train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_data_single = tf.data.Dataset.from_tensor_slices((x_val_single, y_val_single))
val_data_single = val_data_single.batch(BATCH_SIZE).repeat()
# + colab={} colab_type="code" id="0aWec9_nlxBl"
single_step_model = tf.keras.models.Sequential()
single_step_model.add(tf.keras.layers.LSTM(32,
input_shape=x_train_single.shape[-2:]))
single_step_model.add(tf.keras.layers.Dense(1))
single_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='mae')
# + [markdown] colab_type="text" id="oYhUfWjwOPFN"
# Let's check out a sample prediction.
# + colab={} colab_type="code" id="yY7FodHVOPsH"
for x, y in val_data_single.take(1):
print(single_step_model.predict(x).shape)
# + colab={} colab_type="code" id="U0jnt2l2mwkl"
single_step_history = single_step_model.fit(train_data_single, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_data_single,
validation_steps=50)
# + colab={} colab_type="code" id="-ZAdeAnP5c72"
def plot_train_history(history, title):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title(title)
plt.legend()
plt.show()
# + colab={} colab_type="code" id="l8lBKA-z5yYV"
plot_train_history(single_step_history,
'Single Step Training and validation loss')
# + [markdown] colab_type="text" id="DfjrGAlEUp7i"
# #### Predict a single step future
# Now that the model is trained, let's make a few sample predictions. The model is given the history of three features over the past five days sampled every hour (120 data-points), since the goal is to predict the temperature, the plot only displays the past temperature. The prediction is made one day into the future (hence the gap between the history and prediction).
# + colab={} colab_type="code" id="h1qmPLLVUpuN"
for x, y in val_data_single.take(3):
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
single_step_model.predict(x)[0]], 12,
'Single Step Prediction')
plot.show()
# + [markdown] colab_type="text" id="2GnE087bJYSu"
# ### Multi-Step model
# In a multi-step prediction model, given a past history, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predict a sequence of the future.
#
# For the multi-step model, the training data again consists of recordings over the past five days sampled every hour. However, here, the model needs to learn to predict the temperature for the next 12 hours. Since an obversation is taken every 10 minutes, the output is 72 predictions. For this task, the dataset needs to be prepared accordingly, thus the first step is just to create it again, but with a different target window.
# + colab={} colab_type="code" id="kZCk9fqyJZqX"
future_target = 72
x_train_multi, y_train_multi = multivariate_data(dataset, dataset[:, 1], 0,
TRAIN_SPLIT, past_history,
future_target, STEP)
x_val_multi, y_val_multi = multivariate_data(dataset, dataset[:, 1],
TRAIN_SPLIT, None, past_history,
future_target, STEP)
# + [markdown] colab_type="text" id="LImXPwAGRtWy"
# Let's check out a sample data-point.
# + colab={} colab_type="code" id="SpWDcBkQRwS-"
print ('Single window of past history : {}'.format(x_train_multi[0].shape))
print ('\n Target temperature to predict : {}'.format(y_train_multi[0].shape))
# + colab={} colab_type="code" id="cjR4PJArMOpA"
train_data_multi = tf.data.Dataset.from_tensor_slices((x_train_multi, y_train_multi))
train_data_multi = train_data_multi.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
val_data_multi = val_data_multi.batch(BATCH_SIZE).repeat()
# + [markdown] colab_type="text" id="IZcg8FWpSG8K"
# Plotting a sample data-point.
# + colab={} colab_type="code" id="ksXKVbwBV7D3"
def multi_step_plot(history, true_future, prediction):
plt.figure(figsize=(12, 6))
num_in = create_time_steps(len(history))
num_out = len(true_future)
plt.plot(num_in, np.array(history[:, 1]), label='History')
plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo',
label='True Future')
if prediction.any():
plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro',
label='Predicted Future')
plt.legend(loc='upper left')
plt.show()
# + [markdown] colab_type="text" id="LCQKetflZRMF"
# In this plot and subsequent similar plots, the history and the future data are sampled every hour.
# + colab={} colab_type="code" id="R6G8bacQR4w2"
for x, y in train_data_multi.take(1):
multi_step_plot(x[0], y[0], np.array([0]))
# + [markdown] colab_type="text" id="XOjz8DzZ4HFS"
# Since the task here is a bit more complicated than the previous task, the model now consists of two LSTM layers. Finally, since 72 predictions are made, the dense layer outputs 72 predictions.
# + colab={} colab_type="code" id="byAl0NKSNBP6"
multi_step_model = tf.keras.models.Sequential()
multi_step_model.add(tf.keras.layers.LSTM(32,
return_sequences=True,
input_shape=x_train_multi.shape[-2:]))
multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))
multi_step_model.add(tf.keras.layers.Dense(72))
multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0), loss='mae')
# + [markdown] colab_type="text" id="UvB7zBqVSMyl"
# Let's see how the model predicts before it trains.
# + colab={} colab_type="code" id="13_ZWvB9SRlZ"
for x, y in val_data_multi.take(1):
print (multi_step_model.predict(x).shape)
# + colab={} colab_type="code" id="7uwOhXo3Oems"
multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_data=val_data_multi,
validation_steps=50)
# + colab={} colab_type="code" id="UKfQoBjQ5l7U"
plot_train_history(multi_step_history, 'Multi-Step Training and validation loss')
# + [markdown] colab_type="text" id="oDg94-yq4pas"
# #### Predict a multi-step future
# Let's now have a look at how well your network has learnt to predict the future.
# + colab={} colab_type="code" id="dt22wq6fyIBU"
for x, y in val_data_multi.take(3):
multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
# + [markdown] colab_type="text" id="pOzaIRYBhqwg"
# ## Next steps
# This tutorial was a quick introduction to time series forecasting using an RNN. You may now try to predict the stock market and become a billionaire.
#
# In addition, you may also write a generator to yield data (instead of the uni/multivariate_data function), which would be more memory efficient. You may also check out this [time series windowing](https://www.tensorflow.org/guide/data#time_series_windowing) guide and use it in this tutorial.
#
# For further understanding, you may read Chapter 15 of [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/), 2nd Edition and Chapter 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
| site/en-snapshot/tutorials/structured_data/time_series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Loading 3D pose output files
# This short script demonstrates how to load and visualise the generated datasets.
#
# Each dataset consists of 3 file types:
# * **n** images
# * **1** 3D pose dataframe
# * **1** label lookup table, containing keypoint names
#
# The **.hdf5** formatted file dataframe contains the following entries for each generated image:
#
# * file_name 1 string (relative)
# * rot_mat 3 x 3 float
# * trans_mat 3 x 1 float
# * intrinsics_mat 3 x 3 float
# * bounding_box 4 x 1 float
# * key_points_3D 3 x k float (provide name sheet)
# * key_points_2D 2 x k float
# * visibility 1 x k int (0 occluded or 1 visible)
# +
import os
import numpy as np
import pandas as pd
target_dir = "../example_data/3D/"
out_df = pd.read_hdf(os.path.join(target_dir, "Data_3D_Pose.hdf5"))
out_df
# +
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def set_axes_equal(ax):
# workaround, as matplotlib's 3D plot has no option for equisised axes (10/2021)
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
# -
# ### Load and display 3D coordinates
# Change **show_entry** to the id of the sample, you want to examine
#
# Check the dataframe above to see which id corresponds to which image.
# +
show_entry = 0
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
display_points_3D = out_df.loc[show_entry]["key_points_3D"]
for i,xyz in enumerate(display_points_3D):
if out_df.loc[show_entry]["visibility"][i] == 1:
ax.scatter(xyz[0], xyz[1], xyz[2], marker='o',s=10)
"""
# additionally, plot the camera location
ax.scatter(out_df.loc[show_entry]["cam_trans"][0],
out_df.loc[show_entry]["cam_trans"][1],
out_df.loc[show_entry]["cam_trans"][2], marker='x')
"""
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
# use custom function to ensure equal axis proportions
set_axes_equal(ax)
# opens external plot
plt.title(out_df.loc[show_entry]["file_name"])
plt.show()
# -
# ### Load and display 2D coordinates
# as well as **3D coordinates** projected onto the **camera view**.
# +
# file_name cam_rot cam_trans cam_intrinsics bounding_box key_points_3D key_points_2D visibility
R = np.array(out_df.loc[show_entry]["cam_rot"])
T = np.reshape(np.array(out_df.loc[show_entry]["cam_trans"]),(3,1))
C = np.array(out_df.loc[show_entry]["cam_intrinsics"])
fig = plt.figure()
ax = fig.add_subplot()
# display the generated image
display_img = plt.imread(os.path.join(target_dir, out_df.loc[show_entry]["file_name"]))
ax.imshow(display_img)
for i, x in enumerate(display_points_3D):
X = np.reshape(np.array(out_df.loc[show_entry]["key_points_3D"][i]),(3,-1))
# given the above data, it should be possible to project the 3D points into the corresponding image,
# so they land in the correct position on the image
P = C @ np.hstack([R, T]) # projection matrix
X_hom = np.vstack([X, np.ones(X.shape[1])]) # 3D points in homogenous coordinates
X_hom = P @ X_hom # project the 3D points
X_2d = X_hom[:2, :] / X_hom[2, :] # convert them back to 2D pixel space
gt_x_2d = out_df.loc[show_entry]["key_points_2D"][i][0]
gt_y_2d = out_df.loc[show_entry]["key_points_2D"][i][1]
ax.scatter(gt_x_2d, gt_y_2d, marker='o', s=10)
ax.scatter(X_2d[0], display_img.shape[1]-X_2d[1], marker='^', s=2)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_xlim([0,display_img.shape[0]])
ax.set_ylim([0,display_img.shape[1]])
ax.set_aspect('equal')
ax.invert_yaxis()
plt.title(out_df.loc[show_entry]["file_name"] + "_projected")
plt.show()
# -
| evaluation/Example_3D_data_loader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# GAN-MNIST
# https://github.com/taki0112/GAN-Tensorflow/blob/master/Vanilla_GAN.py
# +
# https://arxiv.org/abs/1406.2661
# Generative Adversarial Network(GAN)
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
# Hyper parameter
total_epoch = 100
batch_size = 100
learning_rate = 0.0001
n_hidden = 256
n_input = 28 * 28
# The amount of noise to use as input to the generator
n_noise = 128
# Since GAN is also an unsupervised learning, it does not use Y like Autoencoder.
X = tf.placeholder(tf.float32, [None, n_input])
# Use noise Z as input value.
Z = tf.placeholder(tf.float32, [None, n_noise])
def generator(noise_z) :
with tf.variable_scope('generator') :
hidden = tf.layers.dense(inputs=noise_z, units=n_hidden, activation=tf.nn.relu)
output = tf.layers.dense(inputs=hidden, units=n_input, activation=tf.nn.sigmoid)
return output
def discriminator(inputs, reuse=None) :
with tf.variable_scope('discriminator') as scope:
# In order to make the variables of the models that discriminate the actual image from the images generated by the noise the same,
# Reuse the previously used variables.
if reuse :
scope.reuse_variables()
hidden = tf.layers.dense(inputs=inputs, units=n_hidden, activation=tf.nn.relu)
output = tf.layers.dense(inputs=hidden, units=1, activation=tf.nn.sigmoid)
return output
def get_noise(batch_size, n_noise) :
return np.random.normal(size=(batch_size, n_noise))
# Generate random images using noise
G = generator(Z)
# Returns the value determined using the real image.
D_real = discriminator(X)
# Returns a value that determines whether the image created using noise is a real image.
D_gene = discriminator(G, reuse=True)
"""
According to the paper, optimization of the GAN model maximizes loss_G and loss_D.
We minimize the value of D_gene to maximize loss_D.
This is because...
When you insert the real image in the discriminator, it tries to have the maximum value as: tf.log (D_real)
And the maximum value as: tf.log (1 - D_gene) even when you insert a fake image.
This makes the discriminator learn the discriminator neural network so that the image produced by the generator is judged to be fake.
"""
loss_D = tf.reduce_mean(tf.log(D_real) + tf.log(1 - D_gene))
tf.summary.scalar('loss_D', -loss_D)
"""
On the other hand, to maximize loss_G, we maximize the value of D_gene,
It learns the generator neural network so that when the false image is inserted, the discriminator judges that the image is as real as possible.
In the paper, we find a generator that minimizes to a formula such as loss_D,
This is the same as maximizing the D_gene value, so you can use: loss_G = tf.reduce_mean(tf.log(D_gene))
"""
loss_G = tf.reduce_mean(tf.log(D_gene))
tf.summary.scalar('loss_G', -loss_G)
# If you want to see another loss function, see the following link.
# http://bamos.github.io/2016/08/09/deep-completion/
# When loss_D is obtained, only variables used in the generator neural network are used,
vars_D = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
vars_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
# According to the GAN thesis formula, the loss should be maximized, but since the optimization function is used to minimize it, a negative sign is added to loss_D and loss_G to be optimized.
train_D = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(-loss_D, var_list=vars_D)
train_G = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(-loss_G, var_list=vars_G)
# +
# Start training !
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_batch = int(mnist.train.num_examples/batch_size)
loss_val_D, loss_val_G = 0, 0
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs', sess.graph)
for epoch in range(total_epoch) :
for i in range(total_batch) :
batch_x, batch_y = mnist.train.next_batch(batch_size)
noise = get_noise(batch_size, n_noise)
# It learns discriminator and generator neural network separately.
_, loss_val_D = sess.run([train_D, loss_D],
feed_dict={X : batch_x, Z : noise})
_, loss_val_G = sess.run([train_G, loss_G],
feed_dict={Z : noise})
summary = sess.run(merged, feed_dict={X: batch_x, Z: noise})
writer.add_summary(summary, global_step=epoch)
if epoch % 10 == 0:
print('Epoch:', '%04d' % epoch,
'D loss: {:.4}'.format(-loss_val_D),
'G loss: {:.4}'.format(-loss_val_G))
# Create and save images periodically to see how learning is going
if epoch == 0 or epoch % 10 == 0 or epoch == total_epoch-1:
sample_size = 10
noise = get_noise(sample_size, n_noise)
samples = sess.run(G, feed_dict={Z : noise})
fig, ax = plt.subplots(nrows=1, ncols=sample_size, figsize=(sample_size, 1))
for i in range(sample_size) :
ax[i].set_axis_off()
ax[i].imshow(np.reshape(samples[i], (28,28)))
plt.savefig('samples/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')
plt.close(fig)
print('Optimized!')
| tf/GAN-MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr><td><img style="height: 150px;" src="images/geo_hydro1.jpg"></td>
# <td bgcolor="#FFFFFF">
# <p style="font-size: xx-large; font-weight: 900; line-height: 100%">AG Dynamics of the Earth</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);">Juypter notebooks</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);"><NAME></p>
# </td>
# </tr>
# </table>
# # Angewandte Geophysik II: Quadratische Gleichungen
# ----
# *<NAME>,
# Geophysics Section,
# Institute of Geological Sciences,
# Freie Universität Berlin,
# Germany*
# Eine *quadratische Gleichung* hat die Form:
# $$
# a x^2 + b x + c = 0
# $$
# mit $a$, $b$ und $c$ den Koeffizienten.
# Dividieren wir die Gleichung durch $a$, folgt die *Normalenform*:
# $$
# x^2 + {b \over a} x + {c \over a} = 0
# $$
# ## Zerlegung in Linearfaktoren
#
# Die *quadratische Gleichung*
# $$
# ax^2+bx+c=0
# $$
# kann in *Linearfaktoren* zerlegt werden:
# $$
# a (x-x_1) (x-x_2) = 0
# $$
# ## Nullstellen
#
# Die Anzahl der Nullstellen kann durch die *Determinante* bestimmt werden.
# $$
# D=\sqrt{b^2 - 4ac}
# $$
#
# Es gilt
# - $D>0$: Zwei reelle Nullstellen $x_1$ und $x_2$
# - $D=0$: Eine reelle Nullstelle $x_1$
# - $D<0$: Keine reelle Nullstelle (aber ...)
# %matplotlib inline
import numpy as np
import cmath
import matplotlib.pyplot as plt
from ipywidgets import interactive, fixed
import ipywidgets as widgets
# Wir definieren das Polynom als Funktion:
def f(x,a,b,c):
"""
Meine ertse Funktion, ein quadratisches Polynom
"""
y = a*x**2 + b*x +c
return y
# Eine weiter Funktion zum Rechnen und plotten:
def plot_quadratic(minmax,a,b,c):
xmin=minmax[0];xmax=minmax[1]
x = np.linspace(xmin,xmax,51)
y = f(x,a,b,c)
# calculate determinant
det=cmath.sqrt(b**2-4*a*c)
# get roots
x0 = np.roots([a,b,c])
#print('roots: ',x0)
xs,ys = 0,0
if (a != 0):
xs = -b/(2*a)
ys = c - b**2/(4*a)
plt.figure(figsize=(12,8))
plt.xlim([xmin,xmax])
plt.ylim([-4,14])
plt.plot([xmin,xmax],[0,0],color='grey')
plt.plot(x,y)
plt.plot(x0,f(x0,a,b,c),linewidth='0',marker='.',markersize=40,label='Nullstellen')
plt.plot(xs,ys,linewidth='0',marker='.',markersize=40,label='Scheitelpunkt')
function='f(x)='+str(a)+'x$^2$+'+str(b)+'x+'+str(c)
plt.title(function+' -- det:'+str(np.around(det,2)))
plt.legend()
plot_quadratic([-4,4],a=1.,b=0.,c=-1.)
# Und das ganze interaktiv:
# +
w = dict(
minmax=widgets.FloatRangeSlider(min=-10,max=10,step=1,value=[-4,4],continuous_update=False,description='xmin'),
a=widgets.FloatSlider(min=-2,max=2,step=0.1,value=1.0,description='a'),
b=widgets.FloatSlider(min=-2,max=2,step=0.1,value=0.0,description='b'),
c=widgets.FloatSlider(min=-2,max=2,step=0.1,value=-1.0,description='c')
)
output = widgets.interactive_output(plot_quadratic, w)
box = widgets.HBox([widgets.VBox([*w.values()]), output])
display(box)
# -
# ## Lösungen
#
# Die Lösungen der quadratischen Gleichung lassen sich mit folgender Formel berechnen:
# $$
# x_{1,2} = {{-b \pm \sqrt{b^2 - 4ac}} \over {2a}}
# $$
import numpy as np
import cmath
a = 1
b = 0
c = -1
print ('Coefficients a,b,c: ',a,b,c)
x1 = (-b+np.sqrt(b**2-4*a*c) / (2*a))
x2 = (-b-np.sqrt(b**2-4*a*c) / (2*a))
print ('Solutions x1/2: ',x1,x2)
# Wie kommen wir auf die Lösungsformel?
#
# Starte mit der quadratischen Gleichung und ergänze, um eine binomische Formel zu bekommen:
# $$
# \begin{array}{rcll}
# ax^2+bx+c &=& 0 & | -c\\
# ax^2+bx &=& -c & |\times 4a\\
# 4a^2x^2+4abx &=& -4ac & | +b^2 \\
# (2ax)^2 + 2 \times 2abx + b^2 &=& b^2-4ac & | \mbox{umformen auf bin. Formel}\\
# (2ax+b)^2 &=& b^2-4ac & | \sqrt{}\\
# 2ax+b &=& \pm \sqrt{b^2-4ac} & | -b\\
# 2ax &=& -b \pm \sqrt{b^2-4ac} & |/(2a) \\
# x &=& {{-b \pm \sqrt{b^2-4ac}} \over {2a}}
# \end{array}
# $$
# ## Beispiele
import numpy as np
import cmath
import matplotlib.pyplot as plt
# define functions
x = np.linspace(-10,10,41)
y1 = x**2 + 2*x - 35
y2 = x**2 -4*x + 4
y3 = x**2+12*x+37
# plot functions
plt.figure(figsize=(12.0, 6.0))
plt.plot([-10,10],[0,0],linestyle='dashed',color='grey',linewidth=1)
plt.plot(x,y1,linestyle='solid',color='red',linewidth=3,label='x$^2$+2x-35')
plt.plot(x,y2,linestyle='solid',color='green',linewidth=3,label='x$^2$-4x+4')
plt.plot(x,y3,linestyle='solid',color='blue',linewidth=3,label='x$^2$+12x+37')
plt.legend()
plt.show()
# ... done
import math
x=0.
print(np.sqrt(x))
print(math.sqrt(x))
print(cmath.sqrt(x))
print(cmath.sqrt(x).real,cmath.sqrt(x).imag)
a=1j
print(a)
a=1j*-1
print(a)
-0==+0
| .ipynb_checkpoints/AGII_lab10_QuadratischeGleichungen-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Topic Modeling using LDA
#
#
# ### -<NAME>, MSDS' 20, UVA
# + [markdown] slideshow={"slide_type": "subslide"}
# Topic modeling can be described as the process of extracting abstract topics that occur in a collection of documents. It is a way to find out the latent topics based on the collection of words in the corpus.
#
# One way to model this is using Latent Dirichlet Allocation (LDA). This method follows these simple steps:
# 1. Each document can be considered as a mixture of latent topics
# 2. Each topic can be considered as a mixture of words
#
# Essentially, LDA helps in estimiating each of the above, i.e estimating a topic based on the words and further estimating the mixture of topics that describe a document. In order to set up the corpus for running an LDA model, there is some preprocessing required.
#
# Will see more of all this subsequently.
#
# **Index**
#
# [1. Dataset](#dataset)
#
# [2. Pre-processing](#preprocessing)
#
# * [Stemming-Lemmatization](#stem/lemma)
#
# * [Removing STOPWORDS](#stopwords)
#
# [3. LDA Model](#model)
#
# * [Bag of words](#bow)
#
# * [TF-IDF](#tfidf)
#
# [4. Topics within wine reviews](#wine)
#
#
# + slideshow={"slide_type": "skip"}
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'width': 1024,
'height': 768,
'scroll': True,
})
# + [markdown] slideshow={"slide_type": "slide"}
# ## <a name="dataset"></a> Loading the dataset - News Articles
# + slideshow={"slide_type": "slide"}
# required libraries
import pandas as pd # to work with dataframes
from wordcloud import WordCloud # to analyze frequency of different words in the corpus
import re # for using regular expressions
import matplotlib.pyplot as plt
# used for pre-processing the text data and unsupervised topic modeling
import gensim
from gensim import models
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from gensim.corpora import Dictionary
# used for natural language processing {NLTK: Natural Language Tool-Kit}
from nltk.stem import WordNetLemmatizer, SnowballStemmer, LancasterStemmer
from nltk.stem.porter import *
import nltk
nltk.download('wordnet')
# Set a seed to reproduce the results later. Seed used here is '2019'
import numpy as np
np.random.seed(2019)
# + slideshow={"slide_type": "subslide"}
pd.set_option('display.max_colwidth', -1)
# read in the csv file containing the news articles
news = pd.read_csv("abcnews-date-text.csv")
news
# + [markdown] slideshow={"slide_type": "fragment"}
# The dataset has news article headlines from Australia's ABC News (a public news service). This dataset has 1,103,663 different news headlines along with the date of publication. For the purpose of our analysis, the date of publication isn't required. All we need is the text.
#
# However, the other thing to note here is that each headline in itself isn't too long, i.e frequency of a word within a single document is going to be low. However, frequency of a word/term in the entire corpus would affect our model more than the frequency of a word/term within a document.
#
# We will see how this affects our models.
# + slideshow={"slide_type": "subslide"}
news = news.drop(columns = "publish_date") # since this data isn't really required for the topic modeling
news['index'] = news.index # we would want to give each article an index to reference back to it later
news[500:520]
# + [markdown] slideshow={"slide_type": "fragment"}
# Just from eyeballing this section of the data (rows 500 to 519), it can be observed that there are articles from politics, law, sports just to name a few. Let's see what we get from visualizing the words in these articles.
# + slideshow={"slide_type": "subslide"}
text = " ".join(headline for headline in news["headline_text"])
wordcloud = WordCloud(background_color="white").generate(text)
plt.figure(figsize = (12,12))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# + [markdown] slideshow={"slide_type": "fragment"}
# We see that the most important words turn out to be "say", "win", "new", "us" etc. which are all forms of stopwords. This shows us why it is important to filter these out before running our model.
#
# We do not wish to train our topic model based on words that do not give us any idea of the latent topics.
# + [markdown] slideshow={"slide_type": "slide"}
# ## <a name="preprocessing"></a> Data pre-processing
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Pre-processing includes the following steps:
#
# 1. <font color="blue"> Stemming-lemmatization </font>: This step is required to extract the rootwords from a document. For instance, the root word for happiness is "happy"
# 2. <font color="blue"> Removing stopwords </font>: Stopwords are the most commonly used words in natural language. Examples: "The", "a", "is", "at", "which" etc.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### <a name = "stem/lemma"></a> 1. Stemming-Lemmatization
# + [markdown] slideshow={"slide_type": "fragment"}
# There are many stemmers in the NLTK library.
#
# Some of the frequently used stemmers are:
# 1. SnowballStemmer
# 2. LancasterStemmer
# 3. PorterStemmer
#
# The lemmatizer in the NLTK library is: WordNetLemmatizer
#
# Let's look at how these would work on different words.
# + slideshow={"slide_type": "subslide"}
# initializing the stemmer and lemmatizer
stemmer = LancasterStemmer() # using the LancasterStemmer
lemmatizer = WordNetLemmatizer()
# + slideshow={"slide_type": "subslide"}
# sample words to stem/lemmatize
sample_words = ['happiness', 'flies', 'workers', 'dogs', 'agreed', 'owned', 'humbled', 'meeting', 'helper',
'drinks', 'watching', 'traditional', 'politics', 'player', 'curator', 'better', 'best',
'cooker', 'cooking']
stems = [stemmer.stem(plural) for plural in sample_words] # stemming process
lemmas = [lemmatizer.lemmatize(plural) for plural in sample_words] # lemmatization process
pd.DataFrame(data = {'original word': sample_words, 'stemmed': stems, 'lemma': lemmas})
# + [markdown] slideshow={"slide_type": "subslide"}
# From the dataframe above the following observations were made:
# 1. For some words, the lemmatization and stemming provide the exact same result (eg: "dogs", "drinks")
# 2. Lemmatization does not change certain words (eg: "agreed", "player", "watcher")
# 3. Stemming converts certain words to something that does not seem to be a word anymore (eg: "better", "curator", "politics", "traditional")
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### <font color = "black"> Q. There is an argument in the WordNetLemmatizer.lemmatize() called "pos" which stands for Part-of-speech. Do you think that could make a difference for the lemmatization process for the words given above? </font>
# + [markdown] slideshow={"slide_type": "fragment"}
# Note: by default, the "pos" argument is equal to "n"
#
# where, n stands for NOUN
# + slideshow={"slide_type": "subslide"}
# lemmatization process (pos = "verb")
lemma_v = [lemmatizer.lemmatize(plural, pos = "v") for plural in sample_words]
# lemmatization process (pos = "verb")
lemma_a = [lemmatizer.lemmatize(plural, pos = "a") for plural in sample_words]
# saves the results in a dictionary and creates a dataframe from it
pd.DataFrame(data = {'original word': sample_words, 'stemmed': stems, 'lemma-noun': lemmas, 'lemma-verb': lemma_v,
'lemma-adjective': lemma_a})
# + [markdown] slideshow={"slide_type": "subslide"}
# So, which part-of-speech argument should be used for this problem?
#
# For this case, since we are modeling topics for a corpus of news articles, using verbs for the part-of-speech argument does seem to be intuitive. This is because we want to associate the articles to different topics based on the words we observe in a cluster of documents.
#
# For instance, if we observe the words "cook", "chef", "vegetables", "restaurant", "soup", "wine" as the most commonly occuring words, we would want to choose a topic such as say "CULINARY NEWS" in the context of news articles.
#
# So, in order to get the right root, we might want to extract the root based on the part of speech being set to "verb".
# + [markdown] slideshow={"slide_type": "subslide"}
# Another way could be to stem the resultant lemmatized word. You will see a lot of people use a combination of these methods to extract the roots, but for this analysis, I shall be using the lemmas only. You can see the result below when lemmatization was used with pos = "v"
# + slideshow={"slide_type": "fragment"}
# combined lemmatization and stemming process (in that order)
stem_lemma = [stemmer.stem(WordNetLemmatizer().lemmatize(plural, pos = "v")) for plural in sample_words]
pd.DataFrame(data = {'original word': sample_words, 'stemmed': stems, 'lemma-verb': lemma_v,
'stem-lemma': stem_lemma})
# + [markdown] slideshow={"slide_type": "subslide"}
# Based on the dataframe above, I would prefer the lemma-verb column more than the others.
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's now define a function that lemmatizes given word:
# + slideshow={"slide_type": "fragment"}
# function to lemmatize a given word
def lemmatize(text):
return lemmatizer.lemmatize(text, pos = "v")
# + [markdown] slideshow={"slide_type": "subslide"}
# #### <a name = "stopwords"></a> 2. Removing stopwords
# + [markdown] slideshow={"slide_type": "fragment"}
# Stopwords can be removed from text by comparing individual words in a sentence with a defined list of STOPWORDS. The STOPWORDS from gensim.parsing.preprocessing library has a list of such stopwords in the english language.
# + slideshow={"slide_type": "subslide"}
# stopwords in the list
gensim.parsing.preprocessing.STOPWORDS
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's look at how it works for a few sentences:
# + slideshow={"slide_type": "fragment"}
sentence1 = "We are trying to learn how to implement topic modeling using LDA"
sentence2 = "This is a sample sentence to check how to remove stopwords from a sentence"
# + [markdown] slideshow={"slide_type": "fragment"}
# First, we shall tokenize the above sentences and then compare it with the stopwords. In order to do that, we will use the gensim.utils.simple_preprocess() function. Let's try that:
# + slideshow={"slide_type": "subslide"}
print(gensim.utils.simple_preprocess(sentence1))
print(gensim.utils.simple_preprocess(sentence2))
# + [markdown] slideshow={"slide_type": "fragment"}
# The tokenization works, so next let's try to remove all the stopwords from each of the sentences.
# + slideshow={"slide_type": "subslide"}
# initializing two empty lists for each of the sentences
res1 = []
res2 = []
for word in gensim.utils.simple_preprocess(sentence1):
if word not in gensim.parsing.preprocessing.STOPWORDS and len(word) > 3:
res1.append(word)
for word in gensim.utils.simple_preprocess(sentence2):
if word not in gensim.parsing.preprocessing.STOPWORDS and len(word) > 3:
res2.append(word)
print(res1)
print(res2)
# Note that we also checked for the length of the words along with removing stopwords
# + [markdown] slideshow={"slide_type": "subslide"}
# Now that we have a way to remove the stopwords, we can combine the two processes of stemming/lemmatization and removing stopwords and define a function to help us do this
# + slideshow={"slide_type": "fragment"}
# function to preprocess the text
def preprocess(text):
result = []
for word in gensim.utils.simple_preprocess(text):
if word not in gensim.parsing.preprocessing.STOPWORDS and len(word) > 3:
l = lemmatize(word)
if len(l) > 3:
result.append(l)
return result
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's see how this works for the news article at index = 500 from the data frame above that had the headline "<font color = "red">committal continues into goulburn jail riot</font>"
# + slideshow={"slide_type": "fragment"}
preprocess(news.iloc[500,0])
# + [markdown] slideshow={"slide_type": "fragment"}
# It works!!! Now we can implement this preprocessing function over the entire data we have.
# + slideshow={"slide_type": "subslide"}
processed_news = news['headline_text'].map(preprocess)
# + slideshow={"slide_type": "fragment"}
processed_news[500:520]
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's now see if the visualization of words from the pre-processed text shows us something different
# + slideshow={"slide_type": "fragment"}
text = " ".join(word for i in range(0, len(processed_news)) for word in processed_news[i])
wordcloud = WordCloud(background_color="white").generate(text)
plt.figure(figsize = (12,12))
plt.imshow(wordcloud, interpolation = 'bilinear')
plt.axis("off")
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## <a name = "model"></a> Modeling
# + [markdown] slideshow={"slide_type": "subslide"}
# In order to model, we will try to examine these approaches:
# 1. LDA using bag of words
# 2. LDA using TF-IDF
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Bag of words
# + [markdown] slideshow={"slide_type": "fragment"}
# This method uses frequency of occurence of words in the corpus as a feature. The bag of words approach uses the following:
# 1. Vocabulary of known words (dictionary)
# 2. A measure of presence/occurence of words from the vocabulary
#
# Since topic modeling algorithms cannot deal with raw text directly, it needs to be converted to vector of numbers. This is what the bag of words approach helps us achieve.
# + slideshow={"slide_type": "subslide"}
# creates a dictionary of unique words from the corpus
dictionary = Dictionary(processed_news)
# + slideshow={"slide_type": "fragment"}
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
# + slideshow={"slide_type": "fragment"}
# these are the number of unique words identified from the pre-processed news articles
len(dictionary)
# + [markdown] slideshow={"slide_type": "fragment"}
# Given that there are about 1.1 million articles in the dataset, the number of unique words that would be used for modeling still seem to be high. In order for us to filter this out further, we will try to remove the following type of words:
# 1. All of the words that appear in less than 'x' number of the articles
# 2. All of the words that appear in more than 'y%' of the articles
#
# In order to select the values of x and y, we will inspect our dictionary.
# + slideshow={"slide_type": "subslide"}
# document frequencies
words_in_articles = pd.DataFrame.from_dict(dictionary.dfs, orient = 'index', columns = ["number of documents"])
words_in_articles.sort_values("number of documents", ascending = False, inplace = True)
list1 = words_in_articles.index
words_freq = {}
for i in list1:
words_freq[dictionary[i]] = words_in_articles['number of documents'][i]
words_in_articles['word'] = words_freq.keys()
words_in_articles
# + slideshow={"slide_type": "subslide"}
# filter the extremes
dictionary.filter_extremes(no_below = 200, no_above = 0.5)
len(dictionary)
# + [markdown] slideshow={"slide_type": "fragment"}
# Now that we've reduced our dictionary to 3,588 unique words, the model should be able to provide us highly specific words to interpret the topic.
# + slideshow={"slide_type": "subslide"}
bag_of_words = [dictionary.doc2bow(news) for news in processed_news]
# + [markdown] slideshow={"slide_type": "fragment"}
# This method of converting the documents into vectors is what we shall use as input to the LDA model algorithm.
# + [markdown] slideshow={"slide_type": "subslide"}
# The LDA model can be run with the function models.LdaMulticore() which is available in the **gensim** library. The parameters it uses are:
# 1. The bag-of-words vector
# 2. The number of latent topics we wish to extract from this model
# 3. A dictionary of the known vocabulary from the corpus
# 4. Number of passes you wish to make over the entire corpus to fit the model (Note: by default passes = 1, anything more may give you a better result but may be computationally more expensive)
# 5. Number of cores you want to run it on for parallelization (optimal performance will be on {number of cores - 1})
# + slideshow={"slide_type": "fragment"}
# LDA model using the bag-of-words
lda_model_bow = models.LdaMulticore(bag_of_words, num_topics=10, id2word=dictionary, passes=2, workers=1)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's see the topics we got based on our LDA model.
# + slideshow={"slide_type": "fragment"}
# topics we got from our LDA model
for idx, topic in lda_model_bow.print_topics(-1):
print('Topic: {} \nWords: {}\n'.format(idx, topic))
# + [markdown] slideshow={"slide_type": "fragment"}
# **Could you identify a topic to allocate to some of these topics?**
# + slideshow={"slide_type": "subslide"}
# I am defining a function to gives us the actual headline and the associated processed list
def show_headline(n):
print(news.headline_text[n])
print(processed_news[n])
# + slideshow={"slide_type": "fragment"}
# we will use these to check which topics get allotted: 507, 508, 509, 510
t = bag_of_words[507]
show_headline(507)
# + slideshow={"slide_type": "fragment"}
for index, score in sorted(lda_model_bow[t], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_model_bow.print_topic(index, 10)))
# + [markdown] slideshow={"slide_type": "subslide"}
# We can see that the LDA model using bag of words, definitely allocates topics in the way we expect it to for most of the examples with a high probability.
#
# However, let's see if this could work for predictions on external news articles.
# + slideshow={"slide_type": "subslide"}
unseen_news = "Australia bushfires: New South Wales battles catastrophic conditions"
bow = dictionary.doc2bow(preprocess(unseen_news))
for index, score in sorted(lda_model_bow[bow], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_model_bow.print_topic(index, 10)))
# + slideshow={"slide_type": "subslide"}
unseen_news = "Scientists develop a new method for identifying potentially habitable planets that could host \
ALIEN LIFE outside of our solar system"
bow = dictionary.doc2bow(preprocess(unseen_news))
for index, score in sorted(lda_model_bow[bow], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_model_bow.print_topic(index, 10)))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### TF-IDF
# + [markdown] slideshow={"slide_type": "fragment"}
# This method helps with the problem that highly frequent words start to dominate in the document but may potentially not contain as much "informational content" on the latent topic.
#
# It has two components:
# 1. Term Frequency (TF): scores the frequency of a word in the document
# 2. Inverse Document Frequency(IDF): scores how rare a word is across documents.
#
# So essentially, it is scoring the frequency of the word in a given document but offsetting it by how rare the word is in the corpus. Let's try to build an LDA model using tf-idf corpus.
# + slideshow={"slide_type": "subslide"}
# building a tf-idf corpus from the bag of words created earlier
tfidf = models.TfidfModel(bag_of_words)
corpus_tfidf = tfidf[bag_of_words]
# + slideshow={"slide_type": "fragment"}
# LDA model using the tf-idf corpus
lda_tfidf = gensim.models.LdaMulticore(corpus_tfidf, num_topics=10, id2word=dictionary, passes=2, workers=1)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's see the topics we get with the LDA model using tf-idf
# + slideshow={"slide_type": "fragment"}
for idx, topic in lda_tfidf.print_topics(-1):
print('Topic: {} \nWords: {}\n'.format(idx, topic))
# + slideshow={"slide_type": "subslide"}
# we will use these to check which topics get allotted: 507, 508, 509, 510
t = bag_of_words[507]
show_headline(507)
# + slideshow={"slide_type": "fragment"}
for index, score in sorted(lda_tfidf[t], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_tfidf.print_topic(index, 10)))
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, let's see how well this works for predictions on external news articles.
# + slideshow={"slide_type": "subslide"}
unseen_news = "Australia bushfires: New South Wales battles catastrophic conditions"
bow = dictionary.doc2bow(preprocess(unseen_news))
for index, score in sorted(lda_tfidf[bow], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_tfidf.print_topic(index, 10)))
# + slideshow={"slide_type": "subslide"}
unseen_news = "Scientists develop a new method for identifying potentially habitable planets that could host \
ALIEN LIFE outside of our solar system"
bow = dictionary.doc2bow(preprocess(unseen_news))
for index, score in sorted(lda_tfidf[bow], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_tfidf.print_topic(index, 10)))
# + [markdown] slideshow={"slide_type": "subslide"}
# How do these models work when the document length is large? Let's find out using a dataset for wine reviews.
# + [markdown] slideshow={"slide_type": "slide"}
# ## <a name = "wine"></a> Wine reviews - Topic modeling
# + [markdown] slideshow={"slide_type": "subslide"}
# This dataset has 118,840 wine reviews of different kinds of wine. Since wine reviews are usually quite verbose and the fact that the words used for description are quite frequent, it might be interesting what we find as the latent topics for these reviews. Let's dive in!
# + slideshow={"slide_type": "subslide"}
# loading the data set
wines = pd.read_csv("wines.csv")
wines
# + slideshow={"slide_type": "subslide"}
# all columns except description could be dropped
wines = pd.DataFrame(wines['description'])
wines['index'] = wines.index
wines
# + slideshow={"slide_type": "subslide"}
# pre-processing the data
processed_wines = wines['description'].map(preprocess)
processed_wines[1000:1020]
# + slideshow={"slide_type": "fragment"}
# creating a dictionary
wine_dict = Dictionary(processed_wines)
len(wine_dict)
# + slideshow={"slide_type": "subslide"}
words_in_reviews = pd.DataFrame.from_dict(wine_dict.dfs, orient = 'index', columns = ["number of documents"])
words_in_reviews.sort_values("number of documents", ascending = False, inplace = True)
list2 = words_in_reviews.index
freq = {}
for i in list2:
freq[wine_dict[i]] = words_in_reviews['number of documents'][i]
words_in_reviews['word'] = freq.keys()
words_in_reviews[0:15]
# + slideshow={"slide_type": "subslide"}
# filtering out extremes
wine_dict.filter_extremes(50, 0.5)
len(wine_dict)
# + slideshow={"slide_type": "subslide"}
# bag-of-words for wine reviews
bow_wines = [wine_dict.doc2bow(review) for review in processed_wines]
# tfidf for wine reviews
tfidf_wines = models.TfidfModel(bow_wines)
corpus_wines = tfidf_wines[bow_wines]
# + slideshow={"slide_type": "subslide"}
# lda model with bag of words approach
lda_wine_bow = models.LdaMulticore(bow_wines, num_topics=10, id2word=wine_dict, passes=1, workers=1)
# + slideshow={"slide_type": "fragment"}
# lda model with tfidf approach
lda_wine_tfidf = models.LdaMulticore(corpus_wines, num_topics=10, id2word=wine_dict, passes=1, workers=1)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's examine the topics we get from each of the models:
# + slideshow={"slide_type": "fragment"}
# LDA with bow
print("LDA with bow\n")
for idx, topic in lda_wine_bow.print_topics(-1):
print('Topic: {} \nWords: {}\n'.format(idx, topic))
print("\n")
# LDA with tfidf
print("LDA with tfidf\n")
for idx, topic in lda_wine_tfidf.print_topics(-1):
print('Topic: {} \nWords: {}\n'.format(idx, topic))
# + slideshow={"slide_type": "subslide"}
# defining a function to provide the review along with its processed state
def show_review(n):
print(wines.description[n])
print("\n")
print(processed_wines[n])
# + slideshow={"slide_type": "fragment"}
# sample reviews to check: 3696, 29736, 99336
t2 = bow_wines[99336]
show_review(99336)
# + slideshow={"slide_type": "subslide"}
print("LDA with bow\n")
for index, score in sorted(lda_wine_bow[t2], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_wine_bow.print_topic(index, 10)))
print("\n")
print("LDA with tfidf\n")
for index, score in sorted(lda_wine_tfidf[t2], key = lambda x: -1*x[1]):
print("\nScore: {}\t \nTopic {}: {}\n".format(score, index, lda_wine_tfidf.print_topic(index, 10)))
# + [markdown] slideshow={"slide_type": "fragment"}
# As you can see, the larger each document in the corpus, the better it is to allocate a latent topic. In addition, the tfidf method helps solve the issue of a word with high frequency dominating the allocation.
#
# The tfidf method provides lower number of topics than suggested by the bow since it can better accomodate and tackle the uncertainty amongst topics.
# + [markdown] slideshow={"slide_type": "slide"}
# # Thank you
#
# ### Any questions?
| Topic-Modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic stats using `Scipy`
# In this example we will go over how to draw samples from various built in probability distributions and define your own custom distributions.
#
# ## Packages being used
# + `scipy`: has all the stats stuff
# + `numpy`: has all the array stuff
#
# ## Relevant documentation
# + `scipy.stats`: http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html, http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.html#scipy.stats.rv_continuous, http://docs.scipy.org/doc/scipy/reference/stats.html#module-scipy.stats
import numpy as np
import scipy.stats as st
# some special functions we will make use of later on
from scipy.special import erfc
from matplotlib import pyplot as plt
from astropy.visualization import hist
import mpl_style
# %matplotlib notebook
plt.style.use(mpl_style.style1)
# There are many probability distributions that are already available in `scipy`: http://docs.scipy.org/doc/scipy/reference/stats.html#module-scipy.stats. These classes allow for the evaluations of PDFs, CDFs, PPFs, moments, random draws, and fitting. As an example lets take a look at the normal distribution.
norm = st.norm(loc=0, scale=1)
x = np.linspace(-5, 5, 1000)
plt.figure(1, figsize=(8, 10))
plt.subplot2grid((2, 2), (0, 0))
plt.plot(x, norm.pdf(x))
plt.xlabel('x')
plt.ylabel('PDF(x)')
plt.xlim(-5, 5)
plt.subplot2grid((2, 2), (0, 1))
plt.plot(x, norm.cdf(x))
plt.xlabel('x')
plt.ylabel('CDF(x)')
plt.xlim(-5, 5)
plt.subplot2grid((2, 2), (1, 0))
sample_norm = norm.rvs(size=100000)
hist(sample_norm, bins='knuth', histtype='step', lw=1.5, density=True)
plt.xlabel('x')
plt.ylabel('Random Sample')
plt.tight_layout()
# You can calculate moments and fit data:
# +
for i in range(4):
print('moment {0}: {1}'.format(i+1, norm.moment(i+1)))
print('best fit: {0}'.format(st.norm.fit(sample_norm)))
# -
# # Custom probability distributions
# Sometimes you need to use obscure PDFs that are not already in `scipy` or `astropy`. When this is the case you can make your own subclass of `st.rv_continuous` and overwrite the `_pdf` or `_cdf` methods. This new sub class will act exactly like the built in distributions.
#
# The methods you can override in the subclass are:
#
# + \_rvs: create a random sample drawn from the distribution
# + \_pdf: calculate the PDF at any point
# + \_cdf: calculate the CDF at any point
# + \_sf: survival function, a.k.a. 1-CDF(x)
# + \_ppf: percent point function, a.k.a. inverse CDF
# + \_isf: inverse survival function
# + \_stats: function that calculates the first 4 moments
# + \_munp: function that calculates the nth moment
# + \_entropy: differential entropy
# + \_argcheck: function to check the input arguments are valid (e.g. var>0)
#
# You should override any method you have analytic functions for, otherwise (typically slow) numerical integration, differentiation, and function inversion are used to transform the ones that are specified.
#
# ## The exponentially modified Gaussian distribution
# As and example lets create a class for the EMG distribution (https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution). This is the distributions resulting from the sum of a Gaussian random variable and an exponential random variable. The PDF and CDF are:
#
# \begin{align}
# f(x;\mu,\sigma, \lambda) & = \frac{\lambda}{2} \exp{\left( \frac{\lambda}{2} \left[ 2\mu+\lambda\sigma^{2}-2x \right] \right)} \operatorname{erfc}{\left( \frac{\mu + \lambda\sigma^{2}-x}{\sigma\sqrt{2}} \right)} \\
# F(x; \mu, \sigma, \lambda) & = \Phi(u, 0, v) - \Phi(u, v^2, v) \exp{\left( -u + \frac{v^2}{2} \right)} \\
# \Phi(x, a, b) & = \frac{1}{2} \left[ 1 + \operatorname{erf}{\left( \frac{x - a}{b\sqrt{2}} \right)} \right] \\
# u & = \lambda(x - \mu) \\
# v & = \lambda\sigma
# \end{align}
# +
# create a generating class
class EMG_gen1(st.rv_continuous):
def _pdf(self, x, mu, sig, lam):
u = 0.5 * lam * (2 * mu + lam * sig**2 - 2 * x)
v = (mu + lam * sig**2 - x)/(sig * np.sqrt(2))
return 0.5 * lam * np.exp(u) * erfc(v)
def _cdf(self, x, mu, sig, lam):
u = lam * (x - mu)
v = lam * sig
phi1 = st.norm.cdf(u, loc=0, scale=v)
phi2 = st.norm.cdf(u, loc=v**2, scale=v)
return phi1 - phi2 * np.exp(-u + 0.5 * v**2)
def _stats(self, mu, sig, lam):
# reutrn the mean, variance, skewness, and kurtosis
mean = mu + 1 / lam
var = sig**2 + 1 / lam**2
sl = sig * lam
u = 1 + 1 / sl**2
skew = (2 / sl**3) * u**(-3 / 2)
v = 3 * (1 + 2 / sl**2 + 3 / sl**4) / u**2
kurt = v - 3
return mean, var, skew, kurt
def _argcheck(self, mu, sig, lam):
return np.isfinite(mu) and (sig > 0) and (lam > 0)
class EMG_gen2(EMG_gen1):
def _ppf(self, q, mu, sig, lam):
# use linear interpolation to solve this faster (not exact, but much faster than the built in method)
# pick range large enough to fit the full cdf
var = sig**2 + 1 / lam**2
x = np.arange(mu - 50 * np.sqrt(var), mu + 50 * np.sqrt(var), 0.01)
y = self.cdf(x, mu, sig, lam)
return np.interp(q, y, x)
class EMG_gen3(EMG_gen1):
def _rvs(self, mu, sig, lam):
# redefine the random sampler to sample based on a normal and exp dist
return st.norm.rvs(loc=mu, scale=sig, size=self._size) + st.expon.rvs(loc=0, scale=1/lam, size=self._size)
# use generator to make the new class
EMG1 = EMG_gen1(name='EMG1')
EMG2 = EMG_gen2(name='EMG2')
EMG3 = EMG_gen3(name='EMG3')
# -
# Lets look at how long it takes to create readom samples for each of these version of the EMG:
# %time EMG1.rvs(0, 1, 0.5, size=1000)
print('=========')
# %time EMG2.rvs(0, 1, 0.5, size=1000)
print('=========')
# %time EMG3.rvs(0, 1, 0.5, size=1000)
print('=========')
# As you can see, the numerical inversion of the CDF is very slow, the approximation to the inversion is much faster, and defining `_rvs` in terms of the `normal` and `exp` distributions is the fastest.
#
# Lets take a look at the results for `EMG3`:
dist = EMG3(0, 1, 0.5)
x = np.linspace(-5, 20, 1000)
plt.figure(2, figsize=(8, 10))
plt.subplot2grid((2, 2), (0, 0))
plt.plot(x, dist.pdf(x))
plt.xlabel('x')
plt.ylabel('PDF(x)')
plt.subplot2grid((2, 2), (0, 1))
plt.plot(x, dist.cdf(x))
plt.xlabel('x')
plt.ylabel('CDF(x)')
plt.subplot2grid((2, 2), (1, 0))
sample_emg = dist.rvs(size=10000)
hist(sample_emg, bins='knuth', histtype='step', lw=1.5, density=True)
plt.xlabel('x')
plt.ylabel('Random Sample')
plt.tight_layout()
# As with the built in functions we can calculate moments and do fits to data. **Note** Since we are not using the built in `loc` and `scale` params they are fixed to 0 and 1 in the fit below.
# +
for i in range(4):
print('moment {0}: {1}'.format(i+1, dist.moment(i+1)))
print('best fit: {0}'.format(EMG3.fit(sample_emg, floc=0, fscale=1)))
# -
# For reference here is how `scipy` defines this distriubtion (found under the name `exponnorm`):
import scipy.stats._continuous_distns as cd
np.source(cd.exponnorm_gen)
# %time st.exponnorm.rvs(0.5, size=1000)
| Stats_with_Scipy.ipynb |
# +
"""
7. How to get the items not common to both series A and series B?
"""
"""
Difficulty Level: L2
"""
"""
Get all items of ser1 and ser2 not common to both.
"""
"""
Input
"""
"""
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
"""
# Input
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
# Solution
ser_u = pd.Series(np.union1d(ser1, ser2)) # union
ser_i = pd.Series(np.intersect1d(ser1, ser2)) # intersect
ser_u[~ser_u.isin(ser_i)]
| pset_pandas_ext/101problems/solutions/nb/p7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generate Adversarial Samples for Deep Learning Models with the Adversarial Robustness Toolbox (ART)
#
# This notebook shows how to use adversarial attack techniques from the [Adversarial Robustness Toolbox (ART)](https://developer.ibm.com/code/open/projects/adversarial-robustness-toolbox/) on Deep Learning models trained with *FfDL*. The *ART* library supports crafting and analyzing various attack and defense methods for deep learning models.
#
# In this notebook, you will learn how to incorporate one of the attack methods supported by *ART*, the *Fast Gradient Method* (*FGM*), into your training pipeline to generate adversarial samples for the purposes of evaluating the robustness of the trained model. The model is a Convolutional Neural Network (CNN) trained on the *[MNIST handwritten digit data](http://yann.lecun.com/exdb/mnist/)* using [Keras](https://keras.io/) with a [TensorFlow](https://www.tensorflow.org/) backend.
#
# The *ART* Github repository can be found here - https://github.com/IBM/adversarial-robustness-toolbox
#
# This notebook uses Python 3.
#
#
# ## Contents
#
# 1. [Set up the environment](#setup)
# 2. [Create a Keras model](#model)
# 3. [Train the model](#train)
# 4. [Generate adversarial samples for a robustness check](#art)
# 5. [Summary and next steps](#summary)
# <a id="setup"></a>
# ## 1. Setup
#
# It is recommended that you run this notebook inside a Python 3 virtual environment. Make sure you have all required libraries installed.
#
# To store model and training data, this notebook requires access to a Cloud Object Storage (COS) instance. [BlueMix Cloud Object Storage](https://console.bluemix.net/catalog/services/cloud-object-storage) offers a free *lite plan*. Follow [these instructions](https://dataplatform.ibm.com/docs/content/analyze-data/ml_dlaas_object_store.html) to create your COS instance and generate [service credentials](https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials) with [HMAC keys](https://console.bluemix.net/docs/services/cloud-object-storage/hmac/credentials.html#using-hmac-credentials).
#
# **Enter your cluster and object storage information:**
# +
import os
user_data = {
"ffdl_dir" : os.environ.get("FFDL_DIR"),
"ffdl_cluster_name" : os.environ.get("CLUSTER_NAME"),
"vm_type" : os.environ.get("VM_TYPE"),
"cos_hmac_access_key_id" : os.environ.get("AWS_ACCESS_KEY_ID"),
"cos_hmac_secret_access_key" : os.environ.get("AWS_SECRET_ACCESS_KEY"),
"cos_region_name" : os.environ.get("AWS_DEFAULT_REGION"),
"cos_service_endpoint" : os.environ.get("AWS_ENDPOINT_URL")
}
# +
unset_vars = [key for (key, value) in user_data.items() if not value]
for var in unset_vars:
print("Dictionary 'user_data' is missing '%s'" % var)
assert not unset_vars, "Enter 'user_data' to run this notebook!"
# -
# ### 1.1. Verify or Install Required Python Libraries
# +
import sys
def is_venv():
return (hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))
try:
import keras, tensorflow, requests, wget, boto3, art
print("All required libraries are installed.")
# !cat requirements.txt
except ModuleNotFoundError:
if is_venv:
print("Installing required libraries into virtual environment.")
# !python -m pip install -r requirements.txt
else:
print("Please install the required libraries.")
# !cat requirements.txt
# -
# ### 1.2. Connect to Cloud Object Storage (COS)
# Create a `boto3.resource` to interact with the COS instance. The `boto3` library allows Python developers to manage Cloud Object Storage (COS).
cos = boto3.resource("s3",
aws_access_key_id = user_data["cos_hmac_access_key_id"],
aws_secret_access_key = user_data["cos_hmac_secret_access_key"],
endpoint_url = user_data["cos_service_endpoint"],
region_name = user_data["cos_region_name"]
)
# +
# for bucket in cos.buckets.all():
# print(bucket.name)
# -
# Create two buckets, which you will use to store training data and training results.
#
# **Note:** The bucket names must be unique.
# +
from uuid import uuid4
bucket_uid = str(uuid4())
training_data_bucket = 'training-data-' + bucket_uid
training_result_bucket = 'training-results-' + bucket_uid
def create_buckets(bucket_names):
for bucket in bucket_names:
print('Creating bucket "{}" ...'.format(bucket))
try:
cos.create_bucket(Bucket=bucket)
except boto3.exceptions.botocore.client.ClientError as e:
print('Error: {}.'.format(e.response['Error']['Message']))
buckets = [training_data_bucket, training_result_bucket]
create_buckets(buckets)
# -
# Now you should have 2 buckets.
# ### 1.3. Download MNIST Training Data and Upload it to the COS Buckets
# Select a data set (https://keras.io/datasets/):
# - `mnist.npz`
# - `fashion_mnist.npz`
# +
datasets = ["mnist.npz", "fashion_mnist.npz"]
dataset_filename = datasets[1] # 'fashion_mnist.npz'
# -
# Download the training data and upload it to the `training-data` bucket.
# +
from keras.datasets import mnist, fashion_mnist
import numpy as np
if "fashion" in dataset_filename:
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
else:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
np.savez_compressed(dataset_filename, x_train=x_train , y_train=y_train, x_test=x_test, y_test=y_test)
bucket_obj = cos.Bucket(training_data_bucket)
print("Uploading files to {}:".format(training_data_bucket))
bucket_obj.upload_file(dataset_filename, dataset_filename)
print('- {} was uploaded'.format(dataset_filename))
# -
# Have a look at the list of the created buckets and their contents.
# +
def print_bucket_contents(buckets):
for bucket_name in buckets:
print(bucket_name)
bucket_obj = cos.Bucket(bucket_name)
for obj in bucket_obj.objects.all():
print(" File: {}, {:4.2f}kB".format(obj.key, obj.size/1024))
print_bucket_contents(buckets)
# -
# You are done with COS, and you are ready to train your model!
# <a id="model"></a>
# ## 2. Create the Keras model
#
# In this section we:
#
# - [2.1 Package the model definition](#zip)
# - [2.2 Prepare the training definition metadata](#manifest)
#
# ### 2.1. Create the Model Zip File <a id="zip"></a>
#
# Let's create the model [`convolutional_keras.py`](../edit/convolutional_keras.py) and add it to a zip file.
script_filename = "convolutional_keras.py"
archive_filename = 'model.zip'
# +
# %%writefile $script_filename
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import keras
import numpy as np
import sys
import os
batch_size = 128
num_classes = 10
epochs = 1
img_rows, img_cols = 28, 28
def main(argv):
if len(argv) < 2:
sys.exit("Not enough arguments provided.")
global image_path
i = 1
while i <= 2:
arg = str(argv[i])
if arg == "--data":
image_path = os.path.join(os.environ["DATA_DIR"], str(argv[i+1]))
i += 2
if __name__ == "__main__":
main(sys.argv)
# load training and test data from npz file
f = np.load(image_path)
x_train = f['x_train']
y_train = f['y_train']
x_test = f['x_test']
y_test = f['y_test']
f.close()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model_wt_path = os.environ["RESULT_DIR"] + "/keras_original_model.hdf5"
model.save(model_wt_path)
print("Model saved to file: %s" % model_wt_path)
model_def_path = os.environ["RESULT_DIR"] + "/keras_original_model.json"
model_json = model.to_json()
with open(model_def_path, "w") as json_file:
json_file.write(model_json)
print("Model definition saved to file: %s" % model_def_path)
# +
import zipfile
zipfile.ZipFile(archive_filename, mode='w').write(script_filename)
# -
# ### 2.2. Prepare the Training Definition Metadata <a id="manifest"></a>
# - *FfDL* does not have a *Keras* community image so we need to `pip`-install *Keras* prior to running the `training_command`
# - Your COS credentials are referenced in the `data_stores` > `connection` data.
# +
import yaml
training_command = "pip3 install keras; python3 %s --data ${DATA_DIR}/%s" % (script_filename, dataset_filename)
manifest = {
"name": "keras_digit_recognition",
"description": "Hand-written Digit Recognition Training",
"version": "1.0",
"gpus": 0,
"cpus": 2,
"memory": "2Gb",
"data_stores": [
{
"id": "sl-internal-os",
"type": "s3_datastore",
"training_data": {
"container": training_data_bucket
},
"training_results": {
"container": training_result_bucket
},
"connection": {
"type": "s3_datastore",
"auth_url": user_data["cos_service_endpoint"],
"user_name": user_data["cos_hmac_access_key_id"],
"password": user_data["cos_hmac_secret_access_key"]
}
}
],
"framework": {
"name": "tensorflow",
"version": "1.5.0-py3",
"command": training_command
},
"evaluation_metrics": {
"type": "tensorboard",
"in": "$JOB_STATE_DIR/logs/tb"
}
}
yaml.dump(manifest, open("manifest.yml", "w"), default_flow_style=False)
# -
# ## 3. Train the Model<a id="train"></a>
#
# In this section, learn how to:
# - [3.1 Setup the command line environment](#cmd_setup)
# - [3.2 Train the model in the background](#backg)
# - [3.3 Monitor the training log](#log)
# - [3.4 Cancel the training](#cancel)
# ### 3.1. Setup the Command Line Environment <a id="cmd_setup"></a>
# Load the Kubernetes cluster configuration using the [BlueMix CLI](https://console.bluemix.net/docs/cli/index.html#overview). Make sure your machine is logged in with `bx login`.
try:
# %env VM_TYPE {user_data["vm_type"]}
# %env CLUSTER_NAME {user_data["ffdl_cluster_name"]}
# cluster_config = !bx cs cluster-config {user_data["ffdl_cluster_name"]} | grep "export KUBECONFIG="
# %env KUBECONFIG {cluster_config[-1].split("=")[-1]}
except IndexError:
print("The cluster %s could not be found." % {user_data["ffdl_cluster_name"]})
print("Run 'bx cs clusters' to list all clusters you have access to.")
# #!bx cs clusters
raise
# Setup the DLaaS URL, username and password
# +
node_ip = !(cd {user_data["ffdl_dir"]} && make --no-print-directory kubernetes-ip)
# restapi_port = !kubectl get service ffdl-restapi -o jsonpath='{.spec.ports[0].nodePort}'
dlaas_url = "http://%s:%s" % (node_ip[0], restapi_port[0])
# %env DLAAS_URL $dlaas_url
# %env DLAAS_USERNAME = test-user
# %env DLAAS_PASSWORD = <PASSWORD>
# -
# Obtain the correct FfDL CLI for your machine
# +
import platform
ffdl = "%s/cli/bin/ffdl-%s" % (user_data["ffdl_dir"], "osx" if platform.system() == "Darwin" else "linux")
# -
# ### 3.2. Start the Training Job<a id="backg"></a>
#
out = !{ffdl} train "manifest.yml" "model.zip"
out
# ### 3.3. Monitor the Training Logs<a id="log"></a>
if "Model ID" in out[1]:
model_id = out.fields()[1][-1]
# !{ffdl} logs --follow {model_id}
# ## 4. Generate Adversarial Samples <a id="art"></a>
#
# In this section, we learn how to:
# - [4.1 Generate adversarial samples with ART (synchronously in notebook)](#artLocal)
# - [4.2 Generate adversarial samples with ART (asynchronously using FfDL)](#artWithFfDL)
# ### 4.1. Generate Adversarial Samples Locally <a id="artLocal"></a>
#
# This section shows how to use the ART Fast Gradient Method (FGM) to generate adversarial samples for the model previously trained synchronously in this notebook.
#
# A trained model should have been created in the `training_result_bucket`. Now ART can be used to check the robustness of the trained model.
#
# The original dataset used to train the model as well as the trained model serve as inputs to the `robustness_check.py` script. We can download both from the `training_data_bucket` and the `training_result_bucket` respectively.
# First, download the original data set and the trained model from Cloud Object Store.
weights_filename = "keras_original_model.hdf5"
network_definition_filename = "keras_original_model.json"
# Print contents of COS buckets used in the previous training run
print_bucket_contents([training_data_bucket, training_result_bucket])
# +
# download network definition and weights to current working directory
weights_file_in_cos_bucket = os.path.join(model_id, weights_filename)
network_definition_file_in_cos_bucket = os.path.join(model_id, network_definition_filename)
bucket_obj = cos.Bucket(training_result_bucket)
bucket_obj.download_file(weights_file_in_cos_bucket, weights_filename)
print('Downloaded', weights_filename)
bucket_obj.download_file(network_definition_file_in_cos_bucket, network_definition_filename)
print('Downloaded', network_definition_filename)
# -
# Load & compile the model that we created using `convolutional_keras.py`
# +
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.models import model_from_json
print('Network Definition:', network_definition_filename)
print('Weights: ', weights_filename)
# load model
json_file = open(network_definition_filename, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights(weights_filename)
comp_params = {'loss': 'categorical_crossentropy',
'optimizer': 'adam',
'metrics': ['accuracy']}
model.compile(**comp_params)
# -
# After loading & compiling the model, the next step is to create a KerasClassifier
# +
# create ART classifier object
from art.estimators.classification import KerasClassifier
classifier = KerasClassifier(clip_values=(0, 1), model=model)
# -
# Load the test data and labels from `.npz` file
# +
from keras.utils import np_utils
f = np.load(dataset_filename)
x_original = f['x_test']
y = f['y_test']
f.close()
# -
# Visualize the original (non-adversarial) sample
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.figure(figsize=(2, 2))
plt.imshow(x_original[1], cmap='gray')
print(y[1])
# -
# Standardize the Numpy array
# preprocess
x_original = np.expand_dims(x_original, axis=3)
x_original = x_original.astype('float32') / 255
y = np_utils.to_categorical(y, 10)
# Evaluate the model and calculated test accuracy
# evaluate
scores = model.evaluate(x_original, y, verbose=0)
print('model test loss: ', scores[0]*100)
print('model test accuracy:', scores[1]*100)
model_accuracy = scores[1]*100
# ART exposes many attacks like FGM, NewtonFool, DeepFool, Carlini etc. The code below shows how to use one of ART's attack methods (Fast Gradient Method or FGM) to craft adversarial samples based on x_test
# +
from art.attacks import FastGradientMethod
# configuration
epsilon = 0.2
# create crafter object
crafter = FastGradientMethod(classifier, eps=epsilon)
# craft samples on x_test (stored in variable x_original)
x_adv_samples = crafter.generate(x_original)
adv_samples_filename = "adv_samples.npz"
np.savez(adv_samples_filename, x_original=x_original, x_adversarial=x_adv_samples, y=y)
print("Number of adversarial samples crafted:", len(x_adv_samples))
print("adversarial samples saved to:", adv_samples_filename)
# -
# The following functions can be used for gathering metrics like model robustness, confidence metric, perturbation metric
# +
import numpy.linalg as la
import json
def get_metrics(model, x_original, x_adv_samples, y):
scores = model.evaluate(x_original, y, verbose=0)
model_accuracy_on_non_adversarial_samples = scores[1] * 100
y_pred = model.predict(x_original, verbose=0)
y_pred_adv = model.predict(x_adv_samples, verbose=0)
scores = model.evaluate(x_adv_samples, y, verbose=0)
model_accuracy_on_adversarial_samples = scores[1] * 100
pert_metric = get_perturbation_metric(x_original, x_adv_samples, y_pred, y_pred_adv, ord=2)
conf_metric = get_confidence_metric(y_pred, y_pred_adv)
data = {
"model accuracy on test data:": model_accuracy_on_non_adversarial_samples,
"model accuracy on adversarial samples": model_accuracy_on_adversarial_samples,
"reduction in confidence": conf_metric * 100,
"average perturbation": pert_metric * 100
}
return data
def get_perturbation_metric(x_original, x_adv, y_pred, y_pred_adv, ord=2):
idxs = (np.argmax(y_pred_adv, axis=1) != np.argmax(y_pred, axis=1))
if np.sum(idxs) == 0.0:
return 0
perts_norm = la.norm((x_adv - x_original).reshape(x_original.shape[0], -1), ord, axis=1)
perts_norm = perts_norm[idxs]
return np.mean(perts_norm / la.norm(x_original[idxs].reshape(np.sum(idxs), -1), ord, axis=1))
# This computes the change in confidence for all images in the test set
def get_confidence_metric(y_pred, y_pred_adv):
y_classidx = np.argmax(y_pred, axis=1)
y_classconf = y_pred[np.arange(y_pred.shape[0]), y_classidx]
y_adv_classidx = np.argmax(y_pred_adv, axis=1)
y_adv_classconf = y_pred_adv[np.arange(y_pred_adv.shape[0]), y_adv_classidx]
idxs = (y_classidx == y_adv_classidx)
if np.sum(idxs) == 0.0:
return 0
idxnonzero = y_classconf != 0
idxs = idxs & idxnonzero
return np.mean((y_classconf[idxs] - y_adv_classconf[idxs]) / y_classconf[idxs])
# -
# Display the robustness check metrics
#
# 1. Model accuracy on test data
# 2. Model robustness on adversarial samples
# 3. Reduction in confidence
# 4. Perturbation metric
# +
result = get_metrics(model, x_original, x_adv_samples, y)
print(json.dumps(result, indent=4, sort_keys=False))
# -
# Compare original images with adversarial samples and test model predictions
# +
# https://keras.io/datasets/#fashion-mnist-database-of-fashion-articles
fashion_labels = {
0: "T-shirt/top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle boot"
}
def get_label(y):
if "fashion" in dataset_filename:
return fashion_labels[y]
else:
return "Predict: %i" % y
# +
# x_adv_samples = np.load("adv_samples_from_cos.npz")
# x_original = x_adv_samples["x_original"]
# x_adversarial = x_adv_samples["x_adversarial"]
# y = x_adv_samples["y"]
x_adversarial = x_adv_samples
x_orig = ((x_original ) * 255).astype('int')[:, :, :, 0]
x_adv = ((x_adversarial) * 255).astype('int')[:, :, :, 0]
y_pred_orig = model.predict(x_original, verbose=0)
y_pred_adv = model.predict(x_adversarial, verbose=0)
fig = plt.figure(figsize=(15, 3))
cols = 10
rows = 2
images = list(x_orig[:cols]) + list(x_adv[:cols])
preds = list(y_pred_orig[:cols]) + list(y_pred_adv[:cols])
labels = list(y[:cols]) + list(y[:cols])
for i in range(0, len(images)):
ax = fig.add_subplot(rows, cols, i+1)
y_pred = np.argmax(preds[i])
y_orig = np.argmax(labels[i])
ax.set_xlabel(get_label(y_pred),
color = "green" if y_pred == y_orig else "red")
ax.tick_params(axis='both', which='both',
bottom=False, top=False,
right=False, left=False,
labelbottom=False, labelleft=False)
plt.imshow(images[i], cmap='gray')
plt.show()
# -
# ## 5. Summary and Next Steps <a id="summary"></a>
#
# This notebook only looked at one adversarial robustness technique (FGM). The *ART* library contains many more attacks, metrics and defenses to help you understand and improve your model's robustness. You can use this notebook as a template to experiment with all aspects of *ART*. Find more state-of-the-art methods for attacking and defending classifiers here:
#
# https://github.com/IBM/adversarial-robustness-toolbox
# ## Acknowledgements
#
# Special thanks to [Anupama-Murthi](https://github.ibm.com/Anupama-Murthi) and [<NAME>](https://github.ibm.com/vijay-arya) who created the original notebook which we modified here to showcase how to use *ART* with *FfDL*. If you would like to try *[Watson Machine Learning (WML) Service](https://console.bluemix.net/catalog/services/machine-learning)* with *ART* check out Anupama and Vijay's notebook here:
#
# [https://github.ibm.com/robust-dlaas/ART-in-WML/Use ART to check robustness of deep learning models.ipynb](https://github.ibm.com/robust-dlaas/ART-in-WML/blob/master/Use%20ART%20to%20check%20robustness%20of%20deep%20learning%20models.ipynb)
# Copyright © 2017, 2018 IBM. This notebook and its source code are released under the terms of the MIT License.
| notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="LwFExL0AaU7G" colab_type="code" outputId="59a28131-8687-4cfe-cd28-da5dd09b9e89" colab={"base_uri": "https://localhost:8080/", "height": 35}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="NwMUupd3c__M" colab_type="code" colab={}
import os
# + id="Sv57YN9idD3z" colab_type="code" colab={}
os.chdir('gdrive/My Drive/data')
# + id="xhMaC1H0g8wc" colab_type="code" outputId="2ac4e844-47c4-44e1-b5c3-f1a3db681785" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install ktrain
# + id="kvELXJAf-A3H" colab_type="code" colab={}
import ktrain
from ktrain import text
import pandas as pd
import tensorflow as tf
import timeit
import argparse
from utils import load_data, train_dev_test, make_map_from_nested, word_vectors, trigram_vectors, all_vectors
# + id="S1ZPhQ_M-A3R" colab_type="code" colab={}
def available_device(device_name):
if device_name != '/device:GPU:0':
print('Calculations will be made on CPU.')
else:
print('Calculations will be made on GPU.')
return device_name
# + id="57nkfPUU-A3Y" colab_type="code" colab={}
seed = 42
database, _ = load_data('./')
# + id="kOeQjl1oFJ0t" colab_type="code" colab={}
def available_device(device_name):
if device_name != '/device:GPU:0':
print('Calculations will be made on CPU.')
else:
print('Calculations will be made on GPU.')
return device_name
# + id="MozOOVl5eXO9" colab_type="code" colab={}
database = database.drop('username', axis=1).rename(columns={"text": "TEXT", "class": "LABEL"})
# + id="pELYlZFCqv_z" colab_type="code" colab={}
max_words = max([len(t.split()) for t in database.TEXT])
# + id="kSxLKhGbq6DS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="44ec885f-41fc-4157-86d0-b070f3bd17c6"
max_words
# + id="AvzdQfmtiiXc" colab_type="code" colab={}
def train(device_name, lr, epochs, batch, weight_save_path, df, text_column_df_name, possible_targets_list,
predictor_save_path, maxlen=450):
with tf.device(device_name):
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_df(df, text_column_df_name,
possible_targets_list,
preprocess_mode='bert', maxlen=maxlen)
model = text.text_classifier('bert', (x_train, y_train), preproc=preproc)
learner = ktrain.get_learner(model, train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=batch)
# learner.fit_onecycle(lr, epochs, checkpoint_folder=weight_save_path)
learner.autofit(lr, epochs, checkpoint_folder=weight_save_path)
predictor = ktrain.get_predictor(learner.model, preproc)
predictor.save(predictor_save_path)
# + id="XYQOUOooic3u" colab_type="code" outputId="d48b842c-3926-4ecf-a210-7c1cde63160c" colab={"base_uri": "https://localhost:8080/", "height": 35}
lr = 2e-5
batch = 12
epochs = 1
text_column_name = 'TEXT'
possible_targets_list = list(set((database.LABEL)))
weight_save_path = './bert_weights'
predictor_save_path = './my_predictor'
device_name = tf.test.gpu_device_name()
possible_device = available_device(device_name)
# + id="6VCJxw6IkdyQ" colab_type="code" outputId="884f7388-b47f-428c-e845-ff1f4480f490" colab={"base_uri": "https://localhost:8080/", "height": 125}
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_df(database, text_column_name,
label_columns=['LABEL'], lang='ru',
preprocess_mode='bert', maxlen=max_words)
# + id="7XRQXJaijooW" colab_type="code" outputId="ed53b365-3440-42f1-ee43-4ae0f13dd54c" colab={"base_uri": "https://localhost:8080/", "height": 72}
model = text.text_classifier('bert', (x_train, y_train), preproc=preproc)
# + id="urVN4bOSj0AI" colab_type="code" colab={}
learner = ktrain.get_learner(model, train_data=(x_train, y_train), val_data=(x_test, y_test), batch_size=batch)
# + id="aA3YqL-Dm4eM" colab_type="code" outputId="f6806e50-bd55-4f08-a14c-1aa87f613c9f" colab={"base_uri": "https://localhost:8080/", "height": 127}
learner.autofit(lr, epochs, checkpoint_folder=weight_save_path)
# + id="LQFaaaq5m7nE" colab_type="code" colab={}
predictor = ktrain.get_predictor(learner.model, preproc)
predictor.save(predictor_save_path)
# + id="e0EbKipVX6e-" colab_type="code" colab={}
| Bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/notebooks/samples/pytorch/text_classification_using_pytorch_and_ai_platform.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/notebooks/samples/pytorch/text_classification_using_pytorch_and_ai_platform.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] colab_type="text" id="UWvrShlZjZwr"
# ## Overview
#
# This notebook illustrates the new feature of serving custom model prediction code on AI Platform. It allows us to execute arbitrary python pre-processing code prior to invoking a model, as well as post-processing on the produced predictions. In addition, you can use a model build by your **favourite Python-based ML framework**!
#
# This is all done server-side so that the client can pass data directly to AI Platform Serving in the unprocessed state.
#
# We will take advantage of this for text classification because it involves pre-processing that is not easily accomplished using native TensorFlow. Instead we will execute the the non TensorFlow pre-processing via python code on the server side.
#
# We will build a text classification model using [PyTorch](https://pytorch.org), while performing text preproessing using Keras. PyTorch is an open source deep learning platform that provides a seamless path from research prototyping to production deployment.
#
#
# ## Dataset
# [Hacker News](https://bigquery.cloud.google.com/table/fh-bigquery:hackernews.stories) is one of many public datasets available in [BigQuery](https://cloud.google.com/bigquery). This dataset includes titles of articles from several data sources. For the following tutorial, we extracted the titles that belong to either GitHub, The New York Times, or TechCrunch, and saved them as CSV files in a publicly shared Cloud Storage bucket at the following location: **gs://cloud-training-demos/blogs/CMLE_custom_prediction**
#
# ## Objective
# The goal of this tutorial is to:
# 1. Process the data for text classification.
# 2. Train a [PyTorch](https://pytorch.org) Text Classifier (locally).
# 3. Deploy the [PyTorch](https://pytorch.org) Text Classifier, along with the preprocessing artifacts, to AI Platform Serving, using the Custom Online Prediction code.
#
# This tutorial focuses more on using this model with AI Platform Serving than on the design of the text classification model itself. For more details about text classification, please refer to [Google developer's Guide to Text Classification](https://developers.google.com/machine-learning/guides/text-classification/).
#
# ### Costs
#
# This tutorial uses billable components of Google Cloud Platform (GCP):
#
# * AI Platform
# * Cloud Storage
#
# Learn about [AI Platform
# pricing](https://cloud.google.com/ml-engine/docs/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# -
# ### Authenticate your GCP account
#
# **If you are using AI Platform Notebooks**, your environment is already
# authenticated. Skip this step.
# **If you are using Colab**, run the cell below and follow the instructions
# when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# 1. In the GCP Console, go to the [**Create service account key**
# page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
#
# 2. From the **Service account** drop-down list, select **New service account**.
#
# 3. In the **Service account name** field, enter a name.
#
# 4. From the **Role** drop-down list, select
# **Machine Learning Engine > AI Platform Admin** and
# **Storage > Storage Object Admin**.
#
# 5. Click *Create*. A JSON file that contains your key downloads to your
# local environment.
#
# 6. Enter the path to your service account key as the
# `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
# + colab={} colab_type="code" id="66JlKmfzvPhN" tags=["no_execute"]
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
else:
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] colab_type="text" id="eQWHJ3X7vLeQ"
# Run the following cell to install Python dependencies needed to train the model locally. When you run the training job in AI Platform,
# dependencies are preinstalled based on the [runtime
# version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list)
# you choose.
# + colab={} colab_type="code" id="PEuI7Hw5PquP" tags=["no_execute"]
# %load_ext autoreload
# %autoreload 2
# + colab={"base_uri": "https://localhost:8080/", "height": 649} colab_type="code" id="r733GnVjSwgp" outputId="1c6b2d77-2534-4d02-f56f-d4a6620a4a09"
# !pip install tensorflow==1.15.2 --user
# !pip install torch --user
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="bupGswGOcMW2" outputId="aba75458-3190-4d34-f9f5-b8a2f9ac3bb2"
import tensorflow as tf
import torch
import os
print(tf.__version__)
print(torch.__version__)
# -
# ### Set up your GCP project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a GCP project.](https://console.cloud.google.com/cloud-resource-manager)
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the AI Platform ("Cloud Machine Learning Engine") and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
#
# 4. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + colab={} colab_type="code" id="ewdEsJuVvNqm"
PROJECT_ID = '[your-project-id]' # TODO (Set up your GCP Project name)
# !gcloud config set project {PROJECT_ID}
# -
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you submit a training job using the Cloud SDK, you upload a Python package
# containing your training code to a Cloud Storage bucket. AI Platform runs
# the code from this package. In this tutorial, AI Platform also saves the
# trained model that results from your job in the same bucket. You can then
# create an AI Platform model version based on this output in order to serve
# online predictions.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
#
# You may also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Make sure to [choose a region where Cloud
# AI Platform services are
# available](https://cloud.google.com/ml-engine/docs/tensorflow/regions).
BUCKET_NAME = '[your-bucket-name]' #@param {type:"string"}
REGION = 'us-central1' #@param {type:"string"}
ROOT='torch_text_classification'
MODEL_DIR=os.path.join(ROOT,'models')
PACKAGES_DIR=os.path.join(ROOT,'packages')
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="Qe49zwLLSXzu" outputId="58d8081d-b58e-494b-e504-065a768a9a36"
# Delete any previous artifacts from Google Cloud Storage
# !gsutil rm -r gs://{BUCKET_NAME}/{ROOT}
# + [markdown] colab_type="text" id="V12s5BFCkYhC"
# ## Download and Explore Data
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="ww6dAQ7EkHzZ" outputId="af58faac-c405-4e14-a644-a7f54dfa08c9" language="bash"
# gsutil cp gs://cloud-training-demos/blogs/CMLE_custom_prediction/keras_text_pre_processing/train.tsv .
# gsutil cp gs://cloud-training-demos/blogs/CMLE_custom_prediction/keras_text_pre_processing/eval.tsv .
# + colab={} colab_type="code" id="g1XEyk4toH76"
# !head eval.tsv
# + [markdown] colab_type="text" id="bHqYoDlFsw02"
# ## Preprocessing
# + [markdown] colab_type="text" id="yIarI3u3r6vd"
# ### Pre-processing class to be used in both training and serving
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="GdkNipQtr8-P" outputId="8dd22601-e4f6-4a2d-9be6-22f7fe94c493"
# %%writefile preprocess.py
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.keras.preprocessing import text
class TextPreprocessor(object):
def __init__(self, vocab_size, max_sequence_length):
self._vocabb_size = vocab_size
self._max_sequence_length = max_sequence_length
self._tokenizer = None
def fit(self, text_list):
# Create vocabulary from input corpus.
tokenizer = text.Tokenizer(num_words=self._vocabb_size)
tokenizer.fit_on_texts(text_list)
self._tokenizer = tokenizer
def transform(self, text_list):
# Transform text to sequence of integers
text_sequence = self._tokenizer.texts_to_sequences(text_list)
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
padded_text_sequence = sequence.pad_sequences(
text_sequence, maxlen=self._max_sequence_length)
return padded_text_sequence
# + [markdown] colab_type="text" id="pEk_bHo08a1n"
# ### Test Prepocessing Locally
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="sKtHuVUw8kFO" outputId="0d881cc9-b898-42a6-b5c9-6faaf79d4da4"
from preprocess import TextPreprocessor
processor = TextPreprocessor(5, 5)
processor.fit(['hello machine learning'])
processor.transform(['hello machine learning'])
# + [markdown] colab_type="text" id="lsTGmBn4s2fC"
# ## Model Creation
# + [markdown] colab_type="text" id="eFKEHCk_WYQU"
# ### Metadata
# + colab={} colab_type="code" id="P-1_lAwsWNFf"
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
NUM_CLASSES = 3
VOCAB_SIZE = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
# + [markdown] colab_type="text" id="jkzmfu3jV-78"
# ### Prepare data for training and evaluation
# + colab={} colab_type="code" id="HDX_3jquWCBY"
import pandas as pd
import numpy as np
from preprocess import TextPreprocessor
def load_data(train_data_path, eval_data_path):
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_train = df_train.sample(frac=1)
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
((train_texts, train_labels), (eval_texts, eval_labels)) = load_data(
'train.tsv', 'eval.tsv')
# Create vocabulary from training corpus.
processor = TextPreprocessor(VOCAB_SIZE, MAX_SEQUENCE_LENGTH)
processor.fit(train_texts)
# Preprocess the data
train_texts_vectorized = processor.transform(train_texts)
eval_texts_vectorized = processor.transform(eval_texts)
# + [markdown] colab_type="text" id="r9sDfoXesC1d"
# ### Build the model
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="E7hifM2esEe9" outputId="ca199fd7-bb34-4cee-c1fa-c69bbc663380"
# %%writefile torch_model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class TorchTextClassifier(nn.Module):
def __init__(self, vocab_size, embedding_dim, seq_length, num_classes,
num_filters, kernel_size, pool_size, dropout_rate):
super(TorchTextClassifier, self).__init__()
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim)
self.conv1 = nn.Conv1d(seq_length, num_filters, kernel_size)
self.max_pool1 = nn.MaxPool1d(pool_size)
self.conv2 = nn.Conv1d(num_filters, num_filters*2, kernel_size)
self.dropout = nn.Dropout(dropout_rate)
self.dense = nn.Linear(num_filters*2, num_classes)
def forward(self, x):
x = self.embeddings(x)
x = self.dropout(x)
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool1d(x, x.size()[2]).squeeze(2)
x = self.dropout(x)
x = self.dense(x)
x = F.softmax(x, 1)
return x
# + [markdown] colab_type="text" id="53Gef0A4sjdV"
# ### Train and save the model
# + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="PCkmyHfASX0g" outputId="354cb0fc-bbcc-452a-cf50-17bf0fd5b0b4"
import torch
from torch.autograd import Variable
import torch.nn.functional as F
LEARNING_RATE=.001
FILTERS=64
DROPOUT_RATE=0.2
EMBEDDING_DIM=200
KERNEL_SIZE=3
POOL_SIZE=3
NUM_EPOCH=1
BATCH_SIZE=128
train_size = len(train_texts)
steps_per_epoch = int(len(train_labels)/BATCH_SIZE)
print("Train size: {}".format(train_size))
print("Batch size: {}".format(BATCH_SIZE))
print("Number of epochs: {}".format(NUM_EPOCH))
print("Steps per epoch: {}".format(steps_per_epoch))
print("Vocab Size: {}".format(VOCAB_SIZE))
print("Embed Dimensions: {}".format(EMBEDDING_DIM))
print("Sequence Length: {}".format(MAX_SEQUENCE_LENGTH))
print("")
def get_batch(step):
start_index = step*BATCH_SIZE
end_index = start_index + BATCH_SIZE
x = Variable(torch.Tensor(train_texts_vectorized[start_index:end_index]).long())
y = Variable(torch.Tensor(train_labels[start_index:end_index]).long())
return x, y
from torch_model import TorchTextClassifier
model = TorchTextClassifier(VOCAB_SIZE,
EMBEDDING_DIM,
MAX_SEQUENCE_LENGTH,
NUM_CLASSES,
FILTERS,
KERNEL_SIZE,
POOL_SIZE,
DROPOUT_RATE)
model.train()
loss_metric = F.cross_entropy
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
for epoch in range(NUM_EPOCH):
for step in range(steps_per_epoch):
x, y = get_batch(step)
optimizer.zero_grad()
y_pred = model(x)
loss = loss_metric(y_pred, y)
loss.backward()
optimizer.step()
if step % 50 == 0:
print('Batch [{}/{}] Loss: {}'.format(step+1, steps_per_epoch, round(loss.item(),5)))
print('Epoch [{}/{}] Loss: {}'.format(epoch+1, NUM_EPOCH, round(loss.item(),5)))
print('Final Loss: {}'.format(epoch+1, NUM_EPOCH, round(loss.item(),5)))
torch.save(model, 'torch_saved_model.pt')
# + [markdown] colab_type="text" id="MakbLwTbuMsZ"
# ### Save pre-processing object
#
# We need to save this so the same tokenizer used at training can be used to pre-process during serving
# + colab={} colab_type="code" id="sziwQgs0uZzx"
import pickle
with open('./processor_state.pkl', 'wb') as f:
pickle.dump(processor, f)
# + [markdown] colab_type="text" id="4AWJZP3stCta"
# ## Custom Model Prediction Preparation
# + [markdown] colab_type="text" id="KeR_jDYjuymX"
# ### Copy model and pre-processing object to GCS
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="OJwsuGK3ub4S" outputId="ec1facd0-b502-4fc6-d41a-52fffaf3bb56"
# !gsutil cp torch_saved_model.pt gs://{BUCKET_NAME}/{MODEL_DIR}/
# !gsutil cp processor_state.pkl gs://{BUCKET_NAME}/{MODEL_DIR}/
# + [markdown] colab_type="text" id="DZ0H1GKAueAp"
# ### Define Model Class
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="xLvpSsMiufVr" outputId="4f9aee42-7c50-45f4-f667-72ee55f051e2"
# %%writefile model.py
import os
import pickle
import numpy as np
import torch
from torch.autograd import Variable
class CustomModelPrediction(object):
def __init__(self, model, processor):
self._model = model
self._processor = processor
def _postprocess(self, predictions):
labels = ['github', 'nytimes', 'techcrunch']
label_indexes = [np.argmax(prediction)
for prediction in predictions.detach().numpy()]
return [labels[label_index] for label_index in label_indexes]
def predict(self, instances, **kwargs):
preprocessed_data = self._processor.transform(instances)
predictions = self._model(Variable(torch.Tensor(preprocessed_data).long()))
labels = self._postprocess(predictions)
return labels
@classmethod
def from_path(cls, model_dir):
import torch
import torch_model
model = torch.load(os.path.join(model_dir,'torch_saved_model.pt'))
model.eval()
with open(os.path.join(model_dir, 'processor_state.pkl'), 'rb') as f:
processor = pickle.load(f)
return cls(model, processor)
# + [markdown] colab_type="text" id="kL-jv3GDg_zD"
# ### Test Model Class Locally
# + colab={} colab_type="code" id="ynL_ovR32Od0"
# Headlines for Predictions
techcrunch=[
'Uber shuts down self-driving trucks unit',
'Grover raises €37M Series A to offer latest tech products as a subscription',
'Tech companies can now bid on the Pentagon’s $10B cloud contract'
]
nytimes=[
'‘Lopping,’ ‘Tips’ and the ‘Z-List’: Bias Lawsuit Explores Harvard’s Admissions',
'A $3B Plan to Turn Hoover Dam into a Giant Battery',
'A MeToo Reckoning in China’s Workplace Amid Wave of Accusations'
]
github=[
'Show HN: Moon – 3kb JavaScript UI compiler',
'Show HN: Hello, a CLI tool for managing social media',
'Firefox Nightly added support for time-travel debugging'
]
requests = (techcrunch+nytimes+github)
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="zx53zlPK6YIK" outputId="ec8bcefd-786e-4fc4-d485-f5c0f25438b5"
from model import CustomModelPrediction
local_prediction = CustomModelPrediction.from_path('.')
local_prediction.predict(requests)
# + [markdown] colab_type="text" id="Q4BEQqoGyZP1"
# ### Package up files and copy to GCS
#
# Create a setup.py script to bundle **model.py**,**preprocess.py** and **torch_model.py** in a tarball package. Notice that setup.py does not include the dependencies of `model.py` in the package. These dependencies are provided to your model version in other ways:
#
# `numpy` and `google-cloud-storage` are both included as part of AI Platform Prediction runtime version 1.15.
#
# `torch` is provided in a separate package, as described in a following section.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DgQ9UJG_u6Jk" outputId="f7fed653-37f5-4f0f-ce3b-23d92e0e6705"
# %%writefile setup.py
from setuptools import setup
REQUIRED_PACKAGES = ['keras']
setup(
name="text_classification",
version="0.1",
scripts=["preprocess.py", "model.py", "torch_model.py"],
include_package_data=True,
install_requires=REQUIRED_PACKAGES
)
# + colab={"base_uri": "https://localhost:8080/", "height": 649} colab_type="code" id="IjwWftmpybCI" outputId="5ba3cbba-6110-464a-9844-7cd7578debc4"
# !python setup.py sdist
# !gsutil cp ./dist/text_classification-0.1.tar.gz gs://{BUCKET_NAME}/{PACKAGES_DIR}/text_classification-0.1.tar.gz
# + [markdown] colab_type="text" id="pU4D8prVtLNI"
# ## Model Deployment to AI Platform
# + colab={} colab_type="code" id="WUEA9FKcy8fM"
MODEL_NAME='torch_text_classification'
MODEL_VERSION='v1'
RUNTIME_VERSION='1.15'
REGION='us-central1'
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="YpZkN4o71PUY" outputId="4937b721-664c-42e9-e769-215ac7405432"
# Delete model version if any
# ! gcloud ai-platform versions delete {MODEL_VERSION} --model {MODEL_NAME} --quiet # run if version already created
# Delete model resource
# ! gcloud ai-platform models delete {MODEL_NAME} --quiet
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="JBao9v7e1OU0" outputId="af4b8219-f9c5-4f8f-d9a6-b0acf561f7de"
# !gcloud beta ai-platform models create {MODEL_NAME} --regions {REGION} --enable-logging --enable-console-logging
# -
# # Pytorch compatible packages
#
# You need to specify two Python packages when you create your version resource. One of these is the package containing `model.py` that you uploaded to Cloud Storage in a previous step. The other is a package containing the version of PyTorch that you need.
#
# Google Cloud provides a collection of PyTorch packages in the `gs://cloud-ai-pytorch` Cloud Storage bucket. These packages are mirrored from the official builds.
#
# For this tutorial, use `gs://cloud-ai-pytorch/torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl` as your PyTorch package. This provides your version resource with PyTorch 1.3.1 for Python 3.7, built to run on a CPU in Linux.
#
# Use the following command to create your version resource:
# + colab={} colab_type="code" id="WbE2cKVE1PaX"
# !gcloud beta ai-platform versions create {MODEL_VERSION} --model {MODEL_NAME} \
# --origin=gs://{BUCKET_NAME}/{MODEL_DIR}/ \
# --python-version=3.7 \
# --runtime-version={RUNTIME_VERSION} \
# --package-uris=gs://{BUCKET_NAME}/{PACKAGES_DIR}/text_classification-0.1.tar.gz,gs://cloud-ai-pytorch/torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl \
# --machine-type=mls1-c4-m4 \
# --prediction-class=model.CustomModelPrediction
# + [markdown] colab_type="text" id="Jn6EFbPUzUTm"
# ## Online Predictions from AI Platform Prediction
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="mNFvql_-zC4N" outputId="38101952-d6e7-4714-9366-705535670a7c"
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
# JSON format the requests
request_data = {'instances': requests}
# Authenticate and call CMLE prediction API
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials)
parent = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME, MODEL_VERSION)
print("Model full name: {}".format(parent))
response = api.projects().predict(body=request_data, name=parent).execute()
print(response['predictions'])
# -
# ## Cleaning up
#
# To clean up all GCP resources used in this project, you can [delete the GCP
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Alternatively, you can clean up individual resources by running the following
# commands:
# +
# Delete model version resource
# !gcloud ai-platform versions delete {MODEL_VERSION} --model {MODEL_NAME} --quiet
# Delete model resource
# ! gcloud ai-platform models delete {MODEL_NAME} --quiet
# + [markdown] colab_type="text" id="Pm3d3buc0Bi1"
# ## Authors
# + [markdown] colab_type="text" id="CA7T5kIZ0Bsq"
# <NAME> & <NAME>
#
# **Disclaimer**: This is not an official Google product. The sample code provided for an educational purpose.
#
| notebooks/samples/pytorch/text_classification/text_classification_using_pytorch_and_ai_platform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/alirezash97/Time-frequency-analysis-course/blob/main/TayPaper/Tay2005.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ap6z5iG2nPKd"
import math
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
from sympy import *
# + id="Mx-6dOFOl3d-"
def K(x):
output = []
for i in range(int((filter_length-1)/2)):
combination_statement = math.factorial(filter_length) / (math.factorial(i) * math.factorial(filter_length-i))
second_statement = np.multiply( np.power(x, i), np.power( (1-x), filter_length-i ))
final_statement = np.multiply(combination_statement, second_statement)
output.append(final_statement)
return np.sum(output)
######################################
# + id="zCF3VFlSqASj"
def kl(x, l):
combination_statement = math.factorial(filter_length) / (math.factorial(l) * math.factorial(filter_length-l))
second_statement = np.multiply(np.power(x, l), np.power((1-x), (filter_length-l)))
tirth_statement = np.multiply( np.power(x, (filter_length-l)), np.power((1-x), l))
final_statement = np.multiply(combination_statement, (second_statement - tirth_statement))
return final_statement
#####################################
# + id="u4yEhsr_wcK1"
def B(x, alpha_list):
sigma = []
for l in range( Vanishing_moments, int((filter_length-1)/2) ):
sigma.append(np.multiply( kl(x, l), alpha_list[l]))
final_equation = K(x) - np.sum(sigma)
return final_equation
# + id="JJjsu_9bh38m"
def main_function():
# inputs
global filter_length
global Vanishing_moments
filter_length = int(input("Please enter filter length: "))
Vanishing_moments = int(input("Please enter the number of vanishing moments: "))
while int(((filter_length-1)/2-Vanishing_moments)) %2 != 0:
Vanishing_moments = int(input("Please enter another number for vanishing moments: "))
else:
pass
global number_of_pin
number_of_pin = int(1/2*((filter_length - 1) /2-Vanishing_moments))
print("You have to choose %d"%number_of_pin, "pins")
global zero_pinning
zero_pinning = []
for i in range(number_of_pin):
temp = float(input("Enter %dth pin: " %(i+1)))
zero_pinning.append(temp)
#############
# create symbols
global alpha_list
alpha_list = []
for i in range(1, filter_length+1):
alpha_list.append(sym.symbols('alpha%d'%i))
global x_list
x_list = []
for i in range(len(zero_pinning)):
x_list.append(sym.symbols('x%d'%i))
#############
# create equations
global my_equations
my_equations = []
for i in range(len(x_list)):
Eq1 = sym.Eq(B(x_list[i], alpha_list), 0)
my_equations.append(Eq1)
Eq2 = sym.Eq(diff(B(x_list[i], alpha_list), x_list[i]))
my_equations.append(Eq2)
##############
# replace x with zero pinning values
global replaced_equations
replaced_equations = []
for i, equation in enumerate(my_equations):
replaced = equation.subs(x_list[math.floor(i/2)], zero_pinning[math.floor(i/2)])
replaced_equations.append(replaced)
###############
# find alphas using equations
global alpha_results
alpha_results = solve([i for i in replaced_equations], [j for j in alpha_list[Vanishing_moments : int((filter_length-1)/2)]])
###############
# plot
my_array = []
for key in alpha_results:
my_array.append(alpha_results[key])
alpha_values = np.zeros((len(alpha_list)))
alpha_values[Vanishing_moments : int((filter_length-1)/2)] = my_array
x = np.linspace(0, 1, num=100)
fx = []
for i in range(len(x)):
fx.append(B(x[i], alpha_values))
plt.plot(x, fx)
return alpha_values, alpha_results
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="1FwQhrogjE_n" outputId="064e7c74-dc12-45c5-8210-e06fea67a1d1"
alphas_list, alpha_results = main_function()
print(alpha_results)
# + colab={"base_uri": "https://localhost:8080/", "height": 366} id="fLhPpafZn6Cf" outputId="e9cd886f-7d91-44fe-ea3e-84b730f4442e"
alphas_list, alpha_results = main_function()
print(alpha_results)
# + colab={"base_uri": "https://localhost:8080/", "height": 366} id="HEmd_MYLoHkO" outputId="05bebcfd-5a9f-47f0-e9bb-bb11975d2ccc"
alphas_list, alpha_results = main_function()
print(alpha_results)
# + colab={"base_uri": "https://localhost:8080/"} id="EaAWQ_cmqgN2" outputId="9b5826ad-e60b-47c6-95fa-0665be09ddb4"
# spectoral factorization
z = sym.symbols('z')
spectoral_factorization = np.multiply(-1/4*z, np.power((1-np.power(z, -1)), 2))
based_on_z = B(spectoral_factorization, alphas_list)
print(based_on_z)
# + id="DCGcrsKKvccd"
| TayPaper/Tay2005.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/andrewwgordon/WMAPPowerSpectrum/blob/master/WMAP_power_spectrum_analysis_with_HealPy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="-qGGfzAEt_WS" colab_type="code" cellView="both" colab={}
#@title
# !pip install healpy
# !pip install astroML
# + id="gqHxxzLFulR2" colab_type="code" colab={}
# %matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
# warning: due to a bug in healpy, importing it before pylab can cause
# a segmentation fault in some circumstances.
import healpy as hp
from astroML.datasets import fetch_wmap_temperatures
#------------------------------------------------------------
# Fetch the data
wmap_unmasked = fetch_wmap_temperatures(masked=False)
wmap_masked = fetch_wmap_temperatures(masked=True)
white_noise = np.ma.asarray(np.random.normal(0, 0.062, wmap_masked.shape))
#------------------------------------------------------------
# plot the unmasked map
fig = plt.figure(1)
hp.mollview(wmap_unmasked, min=-1, max=1, title='Unmasked map',
fig=1, unit=r'$\Delta$T (mK)')
#------------------------------------------------------------
# plot the masked map
# filled() fills the masked regions with a null value.
fig = plt.figure(2)
hp.mollview(wmap_masked.filled(), title='Masked map',
fig=2, unit=r'$\Delta$T (mK)')
#------------------------------------------------------------
# compute and plot the power spectrum
cl = hp.anafast(wmap_masked.filled(), lmax=1024)
ell = np.arange(len(cl))
cl_white = hp.anafast(white_noise, lmax=1024)
fig = plt.figure(3)
ax = fig.add_subplot(111)
ax.scatter(ell, ell * (ell + 1) * cl,
s=4, c='black', lw=0,
label='data')
ax.scatter(ell, ell * (ell + 1) * cl_white,
s=4, c='gray', lw=0,
label='white noise')
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$\ell(\ell+1)C_\ell$')
ax.set_title('Angular Power (not mask corrected)')
ax.legend(loc='upper right')
ax.grid()
ax.set_xlim(0, 1100)
plt.show()
# + id="asdgGl2JyCO-" colab_type="code" colab={}
wmap_unmasked
| WMAP_power_spectrum_analysis_with_HealPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from keras.layers import Embedding, Dense, Input, Flatten, Concatenate, Dropout
from keras.models import Model, load_model, Sequential
from keras.utils import to_categorical, plot_model
import pydot
import keras.optimizers as kop
from keras import backend as K
import json
from nltk.corpus import movie_reviews
from NNmodels import OntoEmbeding2, Relation
MODEL_NAME = "IMDB_ONTO_EMBEDING2"
ttt = json.load(open(MODEL_NAME+'_data.json'))
entities = ttt['entities']
vocab = ttt['vocabulary']
relations = ttt['relations']
del ttt
cv = CountVectorizer(vocabulary=vocab)
modelonto = load_model(MODEL_NAME+'.model',custom_objects={'Relation':Relation})
# parámetros de la red
sentence_size = len(vocab)
neurons_per_ent = 10
neurons_per_rel = 2*neurons_per_ent
# construir la red ontológica
# entrada
sentence_input = Input(shape=(sentence_size,), name='input')
# red
onto = OntoEmbeding2(entities,relations)(sentence_input,neurons_per_ent,neurons_per_rel)
# modelo final
prmodel = Model(inputs=sentence_input, outputs=onto,name='OntologyEmbeding')
#copiar pesos de las capas entrenadas a la nueva red
for i in prmodel.layers:
if i.weights and i.name:
try:
tt = modelonto.get_layer(i.name)
except ValueError:
continue
i.set_weights(tt.get_weights())
prmodel.trainable = False
model = Sequential()
model.add(prmodel)
ll = len(entities)*neurons_per_ent + len(relations)*neurons_per_rel
model.add(Dense(ll,activation = 'relu'))
model.add(Dense(8*ll//10,activation = 'relu'))
model.add(Dense(8*ll//10,activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid', name='out'))
#opt = kop.SGD(lr=0.5,momentum=0.9,decay=0.9,nesterov=True)
model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['acc'])
del modelonto
# +
MODEL_NAME +='_problem'
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
mm=model_to_dot(model, rankdir='LR').create(prog='dot', format='pdf')
with open(MODEL_NAME+'.pdf','wb') as f:
f.write(mm)
# -
model.summary()
# +
#xtrain,xtest,ytrain,ytest = train_test_split(data, res, train_size=0.9)
#del data,res
# -
import pickle
xtrain=pickle.load(open('xtrain.pickle','rb'))
ytrain=pickle.load(open('ytrain.pickle','rb'))
xtest=pickle.load(open('xtest.pickle','rb'))
ytest=pickle.load(open('ytest.pickle','rb'))
ltrain=len(xtrain)
ltest = len(xtest)
def generate(batch_size=32, train = False):
while True:
data = []
res = []
if train:
samples_n = np.random.randint(ltrain,size=batch_size)
for i in samples_n:
data.append(xtrain[i])
res.append(ytrain[i])
else:
samples_n = np.random.randint(ltest,size=batch_size)
for i in samples_n:
data.append(xtest[i])
res.append(ytest[i])
data = cv.transform(data)
res = np.array(res)
yield data.toarray(),res
model.fit_generator(generate(100,True), validation_data=generate(100), validation_steps=10, epochs=10, steps_per_epoch=100)
model.save(MODEL_NAME+'.model')
model.layers[0].trainable=True
model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
model.fit_generator(generate(100,True), validation_data=generate(100), validation_steps=10, epochs=50, steps_per_epoch=100)
model.save(MODEL_NAME+'.model')
loss, accuracy = model.evaluate_generator(generate(100),100)
print(f"Test loss: {loss:.3}")
print(f"Test accuracy: {accuracy:.3%}")
| notebooks/IMDB_train_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3
# language: python
# name: Python3
# ---
# ### Qiskit Metal Overview
#
# #### You'll use Qiskit Metal in 4 stages
#
# 1. Choose a design class to instantiate.
#
# 2. Add and modify pre-built components (qubits, coplanar wave guides, etc.) from the QComponent library to your design. (Or, [create your own components](../3%20QComponent%20Designer/3.1%20Creating%20a%20QComponent%20-%20Basic.ipynb))
#
#
#
# 3. Render to Simulate & Analyze
# * Current Rendering Options:
# * Ansys
# * HFSS Renderer - for high frequency simulations (eigenmode, modal, terminal)
# * EPR Analysis - Uses eigenmode simulation to perform energy participation ratio analysis
# * Q3D Renderer - for extracting equivalent circuit values of a layout, such as capacitance
# * LOM Analysis - Uses the capacitance matrix from Q3D to determine the parameters of a transmon qubit
#
#
#
# 4. Render for Fabrication
# * Current Rendering Options:
# * GDS
#
#
#
#
# These steps are shown visually below in the following diagram
# 
# ### *_This tutorial is for steps 1 and 2._*
# ### Using this Tutorial
#
# Metal can be used three different ways:
# * Jupyter Notebooks
# * For interactive code
# * To Use:
# 1. Just press run :D
# * Python scripts
# * For setting internal breakpoints
# * To Use:
# 1. Copy snippets of code from these Notebooks and save as a Python file.
# 2. Run in your favorite editor. (We like VS Code!)
# * Metal GUI
# * _In the future, we anticipate Metal GUI to have full functionality._
# * To Use:
# 1. You _must_ first use either Jupyter Notebooks or Python Scripts to add components to your QDesign.
# 2. Use the GUI to visualize and manually edit your components.
#
#
# Let's dive in!
#
# ### QDesign (need-to-know)
# Each time you create a new quantum circuit design, you start by instantiating a QDesign class.
#
# There are different design classes in the design library `qiskit_metal.designs` for different design layouts. For example the design class `DesignPlanar` is best for 2D circuit designs.
#
# Every design class (except `QDesign`) inherits from the base `QDesign` class. `QDesign` defines basic functionality for all other design classes and should not be directly instantiated.
# ### QDesign (in-depth)
#
# QDesign keeps track of each of the components (qubits, coplanar wave guides, etc.) that you add to your circuit and the relationships between them.
#
# As you can see below, QDesign keeps track of many things:
# 
#
# * QComponents - do *not* directly instantiate
# * Components of your design
# * Example:
# * Transmon Qubits
# * CPWs
# * etc.
# * Upon creation, the QComponent's `make` function runs and adds the QComponent's geometries (rectangles, line segments, etc.) to the QGeometryTables
#
#
# * QGeometryTables - instantiate during init of QDesign
# * Stores backend information about components
# * Populated when QComponents are added to QDesign
#
#
# * QNet.net_info - instantiate during init of QDesign
# * Stores backend information on existing connections between components
# * Instantiated in the backend by QDesign
# * Populated during connections of QComponents
#
# * QRenderer - instantiate during init of QDesign
# * This is what allows you to export your designs into Ansys, GDS, etc.
# * `qiskit_metal/config.py` contains list of all instantiated renderers
#
#
# # Coding Time!
#
# Today we'll be creating a 2D design and adding a single qcomponent
#
# So, let us dive right in. For convenience, let's begin by enabling [automatic reloading of modules](https://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html?highlight=autoreload) when they change.
# %load_ext autoreload
# %autoreload 2
# ## Import Qiskit Metal
# +
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, open_docs
# %metal_heading Welcome to Qiskit Metal!
# -
# Here, we import the folders designs, draw, MetalGUI, Dict, and open_docs from the qiskit_metal code.
# ## My First Quantum Design (QDesign)
# Choose a design layout.
# We will start with the simple planar QDesign.
design = designs.DesignPlanar()
# +
# Since we are likely to be making many changes while tuning and modifying our design, we will enable overwriting.
# If you disable the next line, then you will need to delete a component [<component>.delete()] before recreating it.
design.overwrite_enabled = True
# -
# %metal_heading Hello Quantum World!
# We can also check all of the chip properties to see if we want to change the size or any other parameter.
# By default the name of chip is "main".
design.chips.main
design.chips.main.size.size_x = '11mm'
design.chips.main.size.size_y = '9mm'
# Launch Qiskit Metal GUI to interactively view, edit, and simulate QDesign: Metal GUI
gui = MetalGUI(design)
# ## My First Quantum Component (QComponent)
# ### A transmon qubit
# You can create a ready-made transmon qubit from the QComponent Library, `qiskit_metal.qlibrary.qubits`.
# `transmon_pocket.py` is the file containing our qubit so `transmon_pocket` is the module we import.
# The `TransmonPocket` class is our transmon qubit. Like all quantum components, `TransmonPocket` inherits from `QComponent`
#
# * Let's create a new qubit by creating an object of this class.
# +
# Select a QComponent to create (The QComponent is a python class named `TransmonPocket`)
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
q1 = TransmonPocket(design, 'Q1', options=dict(connection_pads=dict(a=dict()))) # Create a new Transmon Pocket object with name 'Q1'
gui.rebuild() # rebuild the design and plot
gui.edit_component('Q1') # set Q1 as the editable component
gui.autoscale() #resize GUI to see QComponent
# -
# Let's see what the Q1 object looks like
q1 #print Q1 information
# #### What are the default options?
# The QComponent comes with some default options like the length of the pads for our transmon pocket.
# * Options are parsed internally by Qiskit Metal via the component's `make` function.
# * You can change option parameters from the gui or the script api.
# %metal_print How do I edit options? API or GUI
# You can now use the Metal GUI to edit, plot, and modify quantum components.
# Equivalently, you can also do everything from the Jupyter Notebooks/Python scripts (which call the Python API directly).
# The GUI is just calling the Python API for you.
#
# *You must use a string when setting options!
# +
# Change options
q1.options.pos_x = '0.5 mm'
q1.options.pos_y = '0.25 mm'
q1.options.pad_height = '225 um'
q1.options.pad_width = '250 um'
q1.options.pad_gap = '50 um'
# +
gui.rebuild() # Update the component geoemtry, since we changed the options
# Get a list of all the qcomponents in QDesign and then zoom on them.
all_component_names = design.components.keys()
gui.zoom_on_components(all_component_names)
# An alternate way to view within GUI. If want to try it, remove the "#" from the beginning of line.
#gui.autoscale() #resize GUI
# -
# ## Closing the Qiskit Metal GUI
gui.main_window.close()
| tutorials/1 High Level Demo/1.1 High Level Demo of Qiskit Metal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="j8TIGsmtji-y"
# **Anomaly Infused Classification**
#
# This notebook tests an idea of mine: Can you use anomaly detection to improve a classification. If this works (and it seems like it does, currently at ~2 sigma), you can improve any classification network (at least like here with some symmetry in the data (aka convolutions, gnns), but I guess this should also work without) without any other input.
#
# To do this, I use an anomaly detection algorithm I invented (and call oneoff networks), to define anomality for each point on the image. Afterwards I just add this anomality to the input of a convolutional network and compare the result to the same network trained the same way, but without a second input.
# + [markdown] id="ZzPU36WEDsqa"
# The first part is the comparison network, so just a copy of https://keras.io/examples/vision/mnist_convnet/
#
# I dont change much, as I guess you can assume that the usual keras tutorial is well optimized. This suggests that the final result (anomaly infused network) is not as good as it could be.
# + id="HRhV-0piDPyl"
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
# + colab={"base_uri": "https://localhost:8080/"} id="OpkCj5gZDZxP" outputId="8ccab70d-4da5-4e42-9ea6-5b1560fff604"
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# + colab={"base_uri": "https://localhost:8080/"} id="BCqAHeY1DbwQ" outputId="84c9446b-abbc-4a46-a2a4-f0e081ba5537"
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="csFK8F1yDnFR" outputId="b7cb24e2-3b9d-41b0-bdf0-4b91b9afe068"
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
hn=model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="7v2bZwLMDoZ9" outputId="5af717de-f5fd-42b0-ad79-2f3ad3405315"
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
# + [markdown] id="85VmYiomnEyZ"
# **now for the oneoff network**
# here I train an image to image network resulting in 1 while removing biases
# + colab={"base_uri": "https://localhost:8080/"} id="cguVrdVrEtLl" outputId="9628395d-8efa-4eef-a741-935125c56b56"
modeloo = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(7, 7), activation="relu",padding="same",use_bias=False),
#layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(4, 4), activation="relu",padding="same",use_bias=False),
layers.Conv2D(16, kernel_size=(4, 4), activation="relu",padding="same",use_bias=False),
layers.Conv2D(4, kernel_size=(2, 2), activation="relu",padding="same",use_bias=False),
#layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(1,kernel_size=(1,1),activation="linear",padding="same",use_bias=False),
]
)
modeloo.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="9qbpq3qAEfy9" outputId="06f370af-c7ec-4c08-af4d-ccc757d50ce1"
batch_size = 128
epochs = 5
modeloo.compile(loss="mse", optimizer="adam", metrics=[])
modeloo.fit(x_train, np.ones_like(x_train), batch_size=batch_size, epochs=epochs, validation_split=0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="I_w-qbSGGL-e" outputId="adf8075f-f224-4944-e54a-e2c007ab838c"
oo=modeloo.predict(x_train)
oot=modeloo.predict(x_test)
print(oo.shape)
of=oo.flatten()
print(of.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 276} id="S4yiye27P2te" outputId="df86a59c-717d-4b8b-f7a9-84ba0e72d5c0"
import matplotlib.pyplot as plt
plt.hist(of,bins=50)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="oIi8y6fMP5LC" outputId="970d5bca-c851-4ba5-eb49-1414b0f7b58f"
delta=np.abs(of-1)
plt.hist(delta,bins=50)
plt.yscale("log",nonposy="clip")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="YYJxudsaU5Tu" outputId="77548fca-417c-4de1-baad-6b00a4f13d81"
#some statistics processing
print(np.mean(delta))
print(np.std(delta))
print(np.max(delta))
# + id="1Oyv0dMLVNzf"
stat_mean=np.mean(delta)
stat_sigma=np.std(delta)
# + id="X70ied0ZVXHj"
def corr(q):
return np.exp(0.5*(np.abs(q-1)-stat_mean)/(stat_sigma))
# + colab={"base_uri": "https://localhost:8080/"} id="QsAExHvZVY3Q" outputId="9b241e94-82cd-4763-cab1-badda766b435"
print(corr(np.max(delta+1)))
print(corr(1))
# + id="fn63Zy7CV0oG"
oc=corr(oo)
oct=corr(oot)
oct-=np.mean(oc)
oct/=np.std(oct)
oc-=np.mean(oc)
oc/=np.std(oc)
# + colab={"base_uri": "https://localhost:8080/"} id="g6elf9A8V-ML" outputId="c29122e5-504c-414a-9a44-00d1cc968233"
print(oc.shape)
print(x_train.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="O3TTcv4iiVWQ" outputId="4d90a0ce-583f-4b90-8771-47f1951eb2ec"
print(np.mean(oct))
print(np.std(oct))
# + [markdown] id="9xykqKsXngPG"
# **Now lets use this for anomaly infused training**
# + colab={"base_uri": "https://localhost:8080/", "height": 274} id="dqqzki9PWRPm" outputId="04bbc924-377d-4f1c-9d66-60faadcb8bf8"
#at first concat this as new inputs
cc=np.concatenate((x_train,oc),axis=-1)
print(cc.shape)
cct=np.concatenate((x_test,oct),axis=-1)
print(cct.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="VbF-NpuJWrOp" outputId="97291914-412b-4bcd-db16-26735e170b64"
modelc = keras.Sequential(
[
keras.Input(shape=cc.shape[1:]),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
modelc.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="uaOldK9NWtZM" outputId="14450636-8a05-4ec0-dc74-01e6ce3d9ecc"
batch_size = 128
epochs = 15
modelc.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
hc=modelc.fit(cc, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="LuBanpTYi7EZ" outputId="e306edb0-c144-43dc-bfad-7d122511f488"
scorec = modelc.evaluate(cct, y_test, verbose=0)
print("Test loss:", scorec[0])
print("Test accuracy:", scorec[1])
# + colab={"base_uri": "https://localhost:8080/"} id="TldueAMWW_u8" outputId="24f98653-0d1c-4e10-d4b1-958e632015ea"
acn=hn.history["accuracy"]
acc=hc.history["accuracy"]
print(acn,acc)
# + [markdown] id="qqb-cr0WoCgo"
# sadly the images are not very decisive
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="jdtYHyAzZbLF" outputId="b9a6062a-94ca-4682-d23c-c1024d168ece"
plt.plot(acn,label="normal training")
plt.plot(acc,label="anomaly infused")
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="-tw2kRLehPU8" outputId="df631bc5-4eca-45b7-f780-4c893f08c9c5"
plt.plot([a-b for a,b in zip(acn,acc)])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="2VhVx8uMhfh2" outputId="5bd353e4-f1e1-4437-dd20-4fbddad5d99c"
lon=hn.history["loss"]
loc=hc.history["loss"]
print(lon,loc)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="w1l5-xpBjRAT" outputId="a4fdb009-4a4c-49ce-b108-02eb32420315"
plt.plot(lon,label="normal training",alpha=0.5)
plt.plot(loc,label="anomaly infused",alpha=0.5)
plt.legend()
plt.yscale("log")
plt.show()
# + [markdown] id="OsUAr3tJjlPF"
# Quick compare:
# + colab={"base_uri": "https://localhost:8080/"} id="1qPlvXi2jZKd" outputId="9e554998-c5bc-4e62-9ece-9d86a25b2d85"
print("without",score)
print("with ",scorec)
# + colab={"base_uri": "https://localhost:8080/"} id="6cTJQ33Ojs-L" outputId="364cb873-9ca0-4496-cdd8-9dc81ff6278f"
print("both should be positive")
print("loss",score[0]-scorec[0])
print("acc",scorec[1]-score[1])
# + [markdown] id="0VY2FOf19IM_"
# Rerunning results in
#
# + [markdown] id="pHf-9pmp9LtX"
# both should be positive
# loss -0.0016080308705568314
# acc 0.00010001659393310547
# + [markdown] id="RhtvLoLl9L4c"
# both should be positive
# loss 0.00017721019685268402
# acc -0.000299990177154541
# + [markdown] id="sYrmvr669L8h"
# both should be positive
# loss 0.005704481154680252
# acc 0.00279998779296875
# + [markdown] id="f885N--P9MAI"
# both should be positive
# loss 0.0008180048316717148
# acc 0.000599980354309082
# + [markdown] id="zJ91-dbH9MD1"
# both should be positive
# loss 0.0008532833307981491
# acc -0.00010001659393310547
# + [markdown] id="A2whCbd91wNQ"
# both should be positive
# loss 0.0019453037530183792
# acc 0.0006000399589538574
# + [markdown] id="EsRaxCPi1wa5"
# both should be positive
# loss 0.005755022168159485
# acc 0.002200007438659668
# + [markdown] id="D9E4VA3j1wxW"
# both should be positive
# loss 3.5393983125686646e-05
# acc 0.00019997358322143555
# + [markdown] id="yFBwkbs81w9Q"
# both should be positive
# loss -0.0015629995614290237
# acc -0.000800013542175293
# + [markdown] id="ES0_a85x6l5H"
# 14/20 >0, sigma~2, so about 2 sigma significance
# + [markdown] id="dTIMsxVDhKsF"
# Having done this quick and dirty, lets do this more proffesional:
# + id="A2gxseLm17Ga"
ll=[[0.02173512801527977,0.0233431588858366],
[0.023866165429353714,0.02368895523250103],
[0.0278194360435009,0.022114954888820648],
[0.023399805650115013,0.0225818008184433],
[0.025487083941698074,0.024633800610899925],
[0.02620108425617218,0.0242557805031538],
[0.027982046827673912,0.022227024659514427],
[0.026208026334643364,0.026172632351517677],
[0.023979149758815765,0.02554214932024479],
[0.026425588876008987,0.022080114111304283]]
# + [markdown] id="8-HSfcPKidJr"
# I look only at losses, because there less random. First collumn: without, second with
# + colab={"base_uri": "https://localhost:8080/"} id="fvZ3047miZqX" outputId="9d8071b6-3c95-43ed-d2a0-0c0893d78722"
fr=len([1 for zw in ll if zw[0]>zw[1]])/len(ll)
print("fraction of improvements",fr)
# + id="JMIeivrbiwLA"
ln=[zw[0] for zw in ll]
lc=[zw[1] for zw in ll]
import numpy as np
mn=np.mean(ln)
mc=np.mean(lc)
sn=np.std(ln)/np.sqrt(len(ln))
sc=np.std(lc)/np.sqrt(len(lc))
# + colab={"base_uri": "https://localhost:8080/"} id="t53Tp-qWjBGZ" outputId="47373d75-d4fe-435b-b611-0abcd2c7f5db"
diff=mn-mc
sigma=np.sqrt(sn**2+sc**2)
print("Difference",diff)
print("With error",sigma)
print("This means a significance of")
print(diff/sigma)
# + [markdown] id="Yp1aYikWjQe8"
# So still about a 2 sigma improvement
# + id="BKUm2tqBjNhk"
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# <img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# # Working with Watson OpenScale - Initial Setup
# ## The notebook will configure OpenScale to monitor a machine learning deployment.
# ### Contents
#
# - [1.0 Install Python Packages](#setup)
# - [2.0 Configure Credentials](#credentials)
# - [3.0 OpenScale configuration](#openscale)
# - [4.0 Create Datamart](#datamart)
# - [5.0 Bind Machine Learning engines](#bind)
# - [6.0 Check and setup subscriptions](#subscriptions)
# - [7.0 Score the model](#score)
# - [8.0 Store the variables](#store)
# # 1.0 Install Python Packages <a name=setup></a>
# +
# !rm -rf /home/spark/shared/user-libs/python3.6*
# !pip install --upgrade ibm-ai-openscale==2.2.1 --no-cache | tail -n 1
# !pip install --upgrade watson-machine-learning-client-V4==1.0.55 | tail -n 1
# -
# ### Action: restart the kernel!
# # 2 .0 Configure credentials <a name="credentials"></a>
import warnings
warnings.filterwarnings('ignore')
# ### The url for `WOS_CREDENTIALS` is the url of the Cloud Pak for Data cluster, i.e. `https://zen-cpd-zen.apps.com`. `username` and `password` are the credentials used to log in to the Cloud Pak for Data cluster.
WOS_CREDENTIALS = {
"url": "https://******",
"username": "******",
"password": "******"
}
WML_CREDENTIALS = WOS_CREDENTIALS.copy()
WML_CREDENTIALS['instance_id']='openshift'
WML_CREDENTIALS['version']='2.5.0'
# The `DATABASE_CREDENTIALS` will be provided for you.
# +
DATABASE_CREDENTIALS = {
}
# -
SCHEMA_NAME = "NEWMANUALCONFIG"
# # 3.0 Configure OpenScale <a name="openscale"></a>
# The notebook will now import the necessary libraries and configure OpenScale
# +
from watson_machine_learning_client import WatsonMachineLearningAPIClient
import json
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
# -
from ibm_ai_openscale import APIClient4ICP
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
ai_client = APIClient4ICP(WOS_CREDENTIALS)
ai_client.version
# # 4.0 Create datamart <a name="datamart"></a>
# ## 4.1 Set up datamart
# Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
#
# Prior instances of the Credit model will be removed from OpenScale monitoring.
try:
data_mart_details = ai_client.data_mart.get_details()
print('Using existing external datamart')
except:
print('Setting up external datamart')
ai_client.data_mart.setup(db_credentials=DATABASE_CREDENTIALS, schema=SCHEMA_NAME)
data_mart_details
# ## 5.0 Bind machine learning engines <a name="bind"></a>
# Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model.
# ### Create the binding if it doesn't already exist.
# +
binding_uid = None
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance4ICP(wml_credentials=WML_CREDENTIALS))
bindings_details = ai_client.data_mart.bindings.get_details()
binding_uid
# -
ai_client.data_mart.bindings.list()
# ### 5.1 get list of assets
ai_client.data_mart.bindings.list_assets()
# ### 5.2 Action: Set the MODEL_NAME to your depoloyed mllib model below:
MODEL_NAME = "******"
ai_client.data_mart.bindings.get_details(binding_uid)
# ## 6.0 Subscriptions <a name="subscriptions"></a>
# ### Only if needed, remove existing credit risk subscriptions
# This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
# This should not be needed and is only removed to cleanup a problem situation.
# +
# subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
# for subscription in subscriptions_uids:
# sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
# if sub_name == MODEL_NAME:
# ai_client.data_mart.subscriptions.delete(subscription)
# print('Deleted existing subscription for', MODEL_NAME)
# -
# ### 6.1 Get the list of deployment spaces and use the GUID to set the default_space
wml_client.spaces.list()
# ### 6.2 Action: We'll use the `GUID` for your Deployment space as listed above to replace `******` for the `default_space` below:
default_space = "******)"
wml_client.set.default_space(default_space)
# +
wml_models = wml_client.repository.get_model_details()
model_uid = None
for model_in in wml_models['resources']:
if MODEL_NAME == model_in['entity']['name']:
model_uid = model_in['metadata']['guid']
break
print(model_uid)
# -
# ### 6.3 Action: Set the DEPLOYMENT_NAME
# Use the name of the Deployment that is associated with your machine learning model
wml_client.deployments.list()
DEPLOYMENT_NAME = "******"
# +
wml_deployments = wml_client.deployments.get_details()
deployment_uid = None
for deployment in wml_deployments['resources']:
print(deployment['entity']['name'])
if DEPLOYMENT_NAME == deployment['entity']['name']:
deployment_uid = deployment['metadata']['guid']
break
print(deployment_uid)
# -
# ## 6.3 This code creates the model subscription in OpenScale using the Python client API.
# > Note that we need to provide the model unique identifier,and some information about the model itself.
# ### Check to see if subscription already exists, and use it if it does
subscription = None
if subscription is None:
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == MODEL_NAME:
print("Found existing subscription")
subscription = ai_client.data_mart.subscriptions.get(sub)
if subscription is None:
print("No subscription found. Run the cell below to add the subscription")
# ### If the subscription is not found, add it now
if subscription is None:
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
problem_type=ProblemType.BINARY_CLASSIFICATION,
input_data_type=InputDataType.STRUCTURED,
label_column='Risk',
prediction_column='predictedLabel',
probability_column='probability',
feature_columns = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration",
"InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing",
"ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
categorical_columns = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan",
"OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
))
# Get subscription list
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
ai_client.data_mart.subscriptions.list()
subscription_details = subscription.get_details()
# ### 7.0 Score the model so we can configure monitors <a name="score"></a>
# Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model. First, the code gets the model deployment's endpoint URL, and then sends a few records for predictions.
# +
credit_risk_scoring_endpoint = None
print(deployment_uid)
for deployment in wml_client.deployments.get_details()['resources']:
if deployment_uid in deployment['metadata']['guid']:
credit_risk_scoring_endpoint = deployment['entity']['status']['online_url']['url']
print(credit_risk_scoring_endpoint)
# +
fields = ['CHECKINGSTATUS', 'LOANDURATION', 'CREDITHISTORY', 'LOANPURPOSE', 'LOANAMOUNT', 'EXISTINGSAVINGS', 'EMPLOYMENTDURATION',
'INSTALLMENTPERCENT', 'SEX', 'OTHERSONLOAN', 'CURRENTRESIDENCEDURATION', 'OWNSPROPERTY', 'AGE', 'INSTALLMENTPLANS', 'HOUSING',
'EXISTINGCREDITSCOUNT', 'JOB', 'DEPENDENTS', 'TELEPHONE', 'FOREIGNWORKER']
values = [
["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"],
["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"],
["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"],
["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"],
["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"],
["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"]
]
payload_scoring = {"fields": fields,"values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
print('Single record scoring result:', '\n fields:', scoring_response['predictions'][0]['fields'], '\n values: ', scoring_response['predictions'][0]['values'][0])
# -
# ## 8.0 Store the variables <a name="store"></a>
# ### This will store the important variables for use in future notebooks
# +
DEFAULT_SPACE = default_space
# %store MODEL_NAME
# %store DEPLOYMENT_NAME
# %store DEFAULT_SPACE
# %store model_uid
# %store binding_uid
| notebooks/openscale-initial-setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Data Sets used in this tutorial courtesy of UCI Machine Learning Repository
# Citation Request:
# We suggest the following pseudo-APA reference format for referring to this repository:
# <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
#
# Dataset Found here: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
import pandas as pd
datapath = '../data_sets/'
sep = ','
### Download the Breast Cancer data set from: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
df = pd.read_csv(datapath+'breast_cancer.csv',sep=sep, index_col=None)
#df = pd.read_csv(datapath+'boston.csv',sep=sep, index_col=None)
df = df.sample(frac=1.0, random_state=0)
target = 'diagnosis'
print(df.shape)
df.head()
num = int(0.9*df.shape[0])
train = df[:num]
test = df[num:]
sample_submission=''
scoring_parameter = ''
from autoviml.Auto_ViML import Auto_ViML
#### If Boosting_Flag = True => XGBoost, Fase=>ExtraTrees, None=>Linear Model
m, feats, trainm, testm = Auto_ViML(train, target, test, sample_submission,
scoring_parameter=scoring_parameter,
hyper_param='GS',feature_reduction=True,
Boosting_Flag=None,Binning_Flag=False,
Add_Poly=0, Stacking_Flag=False,
Imbalanced_Flag=False,
verbose=1)
# + active=""
# #### Use this to Test Regression Problems Only #####
# import numpy as np
# def rmse(results, y_cv):
# return np.sqrt(np.mean((results - y_cv)**2, axis=0))
# from autoviml.Auto_ViML import print_regression_model_stats
# ####################
# modelname='Linear'
# print(rmse(test[target].values,testm[target+'_'+modelname+'_predictions'].values))
# print_regression_model_stats(test[target].values,testm[target+'_'+modelname+'_predictions'].values)
# -
######## Use this to Test Classification Problems Only ####
modelname='Linear'
def accu(results, y_cv):
return (results==y_cv).astype(int).sum(axis=0)/(y_cv.shape[0])
from sklearn.metrics import classification_report, confusion_matrix
try:
print('Test results since target variable is present in test data:')
modelname = 'Bagging'
print(confusion_matrix(test[target].values,testm[target+'_'+modelname+'_predictions'].values))
print('\nTest Accuracy = %0.2f%%\n' %(100*accu(test[target].values, testm[target+'_'+modelname+'_predictions'].values)))
print(classification_report(test[target].values,testm[target+'_'+modelname+'_predictions'].values))
except:
print('No target variable present in test data. No results')
| Auto_ViML_Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to Build Transformer
# ## Generate Transformer from Method
# +
from __future__ import annotations
import numpy as np
from dtoolkit.transformer.factory import methodtf_factory
# -
# Generate a plus / minus constant transformer:
# +
def plus_constant(X: np.ndarray, constant: int | float) -> np.ndarray:
"""Plus constant to each element of ``X``"""
return X + constant
def minus_constant(X: np.ndarray, constant: int | float) -> np.ndarray:
"""Minus constant to each element of ``X``"""
return X - constant
PlusTF = methodtf_factory(plus_constant, minus_constant)
# -
# Use this transformer:
a = np.array([1, 2, 3])
tf = PlusTF(constant=1).update_invargs(constant=1)
tf.transform(a)
tf.inverse_transform(a)
# ## Build DataFrame Transformer
# Port `numpy`'s method to transformer.
# +
from dtoolkit.transformer import NumpyTF
class MyTF(NumpyTF):
"""Doc here"""
transform_method = staticmethod("numpy's inner method")
| doc/source/guide/build_transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Extract and save bottleneck feature vectors with an example model
# Step 2 in
# 1. Generate Nosaic MNIST (`python make_nmnist.py`).
# 2. Extract and save features in TFRecords format (save_featureTFR_nmnist.ipynb).
# 3. Plot SAT curve (plot_SAT_curve.ipynb)
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, time
import PIL
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from datasets.data_processing import decode_nosaic_mnist, reshape_for_featext,\
normalize_images_nosaic_mnist, binarize_labels_nosaic_mnist,\
read_tfrecords_nosaic_mnist, decode_feat
from models.backbones_fe import ResNetModel, get_ressize_dependent_params
from models.losses import get_loss_fe
# +
# Utility functions
def set_gpu_devices(gpu):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_visible_devices(physical_devices[gpu], 'GPU')
tf.config.experimental.set_memory_growth(physical_devices[gpu], True)
def np_to_tfr_fe(x, y, writer):
"""Save a np.array to a tfrecord file. DO NOT FORGET writer.close().
Args:
x: data: np.ndarray, dtype=float32
y: label: int, dtype=int64
writer: tf.io.TFRecordWriter object. Don't forget writer.close()
"""
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
# Make an Example object that has one record data
example = tf.train.Example(features=tf.train.Features(feature={
'video': _bytes_feature(x.tostring()),
'label': _int64_feature(y)
}))
# Serialize the example object and make a TFRecord file
writer.write(example.SerializeToString())
def _read_tfrecords_nmnist(record_file_train, record_file_test, batch_size):
"""Reads TFRecord file and make parsed dataset tensors."""
def _parse_image_function(example_proto):
return tf.io.parse_single_example(example_proto, {
'video': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([],tf.int64)
})
raw_image_dataset = tf.data.TFRecordDataset(record_file_train)
raw_image_dataset_test = tf.data.TFRecordDataset(record_file_test)
parsed_image_dataset_train = raw_image_dataset.map(_parse_image_function)
parsed_image_dataset_test = raw_image_dataset_test.map(_parse_image_function)
parsed_image_dataset_train = parsed_image_dataset_train.batch(batch_size, drop_remainder=False)
parsed_image_dataset_test = parsed_image_dataset_test.batch(batch_size, drop_remainder=False)
return parsed_image_dataset_train, parsed_image_dataset_test
def _checkpoint_logger(model, flag_resume, root_ckptlogs,
subproject_name, exp_phase, comment, time_stamp, path_resume=None,
max_to_keep=3, config_path=None):
"""Make ckpt and manager objects, and restore the latest checkpoint if necessary.
Args:
model: A tf.keras.Model object.
flag_resume: A boolean. Whether to resume training from the latest ckpt.
root_ckptlogs: A string. Used for path to ckpts.
subproject_name: A string. Used for path to ckpts.
comment: A string. Used for path to ckpts.
time_stamp: A string. Used for path to ckpts.
path_resume: A string or None. The path to ckpt logs to be resumed.
path_resume is ignored if flag_resume=False.
max_to_keep: An int. Set max_to_keep=0 or None to keep all the ckpts.
config_path: A string.
Returns:
ckpt: tf.train.Checkpoint object.
ckpt_manager: tf.train.CheckpointManager object.
Remark:
Path to checkpoint files is
'root_ckptlogs'/'subproject_name'_'exp_phase'/'comment'_'time_stamp'/ckptXXX
"""
# Naming rule
dir_ckptlogs = "{}/{}_{}/{}_{}".format(
root_ckptlogs, subproject_name, exp_phase, comment, time_stamp)
if not os.path.exists(path_resume):
os.makedirs(path_resume)
# Create ckpt
ckpt = tf.train.Checkpoint(net=model)
# If resume
if flag_resume:
assert os.path.exists(path_resume), "Not exist: path_ckpt = {}".format(
path_resume)
# Create ckpt and manager for restore
ckpt_manager_restore = tf.train.CheckpointManager(
ckpt, path_resume, max_to_keep=max_to_keep)
# Restore the latest ckpt log.
ckpt.restore(ckpt_manager_restore.latest_checkpoint)
print("Restored from {}".format(ckpt_manager_restore.latest_checkpoint))
# Create manager
ckpt_manager = tf.train.CheckpointManager(
ckpt, dir_ckptlogs, max_to_keep=max_to_keep)
return ckpt, ckpt_manager
def ext_and_save(parsed_image_datasets, record_files, list_numdata):
# Extraction and save TFR
global_iter = 0
for parsed_image_dataset, record_file, num_data in zip(parsed_image_datasets, record_files, list_numdata):
with tf.io.TFRecordWriter(record_file) as writer:
# Start loop
for iter_b, feats in enumerate(parsed_image_dataset):
# 1. Decode features, normalize images, and binarize classification labels
x_batch, y_batch = decode_nosaic_mnist(feats)
y_batch = binarize_labels_nosaic_mnist(y_batch)
iter_bs = y_batch.shape[0]
labels_batch = np.int64(y_batch.numpy())
x_batch, y_batch = reshape_for_featext(x_batch, y_batch, (28, 28, 1))
# (bs*duration, 28,28,1), (bs*duration,)
x_batch = normalize_images_nosaic_mnist(x_batch)
# 2. Extract features
_, losses, _, feats_batch = get_loss_fe(
model,
x_batch,
y_batch,
training=False,
param_wd=None,
flag_wd=False,
calc_grad=False
)
# Reshape (batch, duration, final size)
feats_batch = tf.reshape(feats_batch, (iter_bs, duration, final_size))
feats_batch = np.float32(feats_batch.numpy())
# 3. Save images
assert len(feats_batch) == len(labels_batch), "{}, {}".format(feats_batch.shape, labels_batch.shape)
for feat, label in zip(feats_batch, labels_batch):
assert (label == 1) or (label == 0)
np_to_tfr_fe(x=feat, y=label, writer=writer)
global_iter += 1
# 4. Verbose
if (iter_b+1) % 10 == 0:
print("")
print("Global Iter={:7d} Iter={:5d}/{:5d} xent loss={:7.5f}: writing {}"
.format(
global_iter,
iter_b + 1,
(num_data // batch_size) + 1 if num_data % batch_size != 0 else num_data // batch_size,
losses[1],
record_file))
print(feat.shape)
print(labels_batch)
print("Done")
# -
# # User Defined Parameters
# User defined
tfr_train = './data-directory/nosaic_mnist_train.tfrecords' # NMNIST data
tfr_test = './data-directory/nosaic_mnist_test.tfrecords' # NMNIST data
tfr_feat_train = './data-directory/nosaic_mnist_feat_train.tfrecords' # extracted features to be saved here
tfr_feat_test = './data-directory/nosaic_mnist_feat_test.tfrecords' # extracted features to be saved here
batch_size = 50 # 64
gpu = 0 # GPU number
assert not os.path.exists(tfr_feat_train), tfr_feat_train + "exists. Remove or rename."
assert not os.path.exists(tfr_feat_test), tfr_feat_test + "exists. Remove or rename."
# # Start Extraction
# Fixed parameters
duration = 20
path_resume = "./example_ckpts/feature_extractor/ResNetv1"
resnet_size = 110
resnet_version = 1
nb_cls = 2
final_size = 128
root_ckptlogs = "./tmp"
subproject_name = "_"
exp_phase = "_"
comment = "_"
set_gpu_devices(gpu) # GPU number
# +
# Read Nosaic MNIST
# Make sure to run ./make_nmnist.py in advance
parsed_image_dataset_train, parsed_image_dataset_test = _read_tfrecords_nmnist(
record_file_train=tfr_train,
record_file_test=tfr_test,
batch_size=batch_size)
print(parsed_image_dataset_train)
print(parsed_image_dataset_test)
# +
# Load model
dict_resparams = get_ressize_dependent_params(resnet_version, resnet_size)
model = ResNetModel(
resnet_size=resnet_size,
bottleneck=dict_resparams["bottleneck"],
num_classes=nb_cls,
kernel_size=dict_resparams["kernel_size"],
conv_stride=dict_resparams["conv_stride"],
first_pool_size=dict_resparams["first_pool_size"],
first_pool_stride=dict_resparams["first_pool_stride"],
block_sizes=dict_resparams["block_sizes"],
block_strides=dict_resparams["block_strides"],
final_size=final_size,
resnet_version=resnet_version,
data_format='channels_last',
dtype=tf.float32
)
# Checkpoint
now = "0"
_, ckpt_manager = _checkpoint_logger(
model,
True,
root_ckptlogs,
subproject_name,
exp_phase,
comment,
now,
path_resume)
# -
# test data
# +
# Feature Extraction and Save TFR
parsed_image_datasets = [parsed_image_dataset_test]
record_files = [tfr_feat_test]
list_numdata = [10000]
ext_and_save(parsed_image_datasets, record_files, list_numdata)
# -
# training data (takes a little long time)
# +
# Feature Extraction and Save TFR
parsed_image_datasets = [parsed_image_dataset_train]
record_files = [tfr_feat_train]
list_numdata = [60000] # = train 50000 + valid 10000
ext_and_save(parsed_image_datasets, record_files, list_numdata)
# -
# ### <font color=red>Extraction done. See ./data-directory</font>
# # Appendix: Read Feature TFR
# Runs without error only after saving `tfr_feat_train` and `tfr_feat_test`.
import time
# +
# Read Feature TFR
feat_dim = final_size
bs_tmp = 1
dtype_feat = tf.float32
dtype_label = tf.int32
pid_tr, pid_val, pid_test =\
read_tfrecords_nosaic_mnist(tfr_feat_train, tfr_feat_test, bs_tmp)
# Show
show_pid = pid_val
for i, features in enumerate(show_pid):
vb, lb = decode_feat(features, duration, feat_dim, dtype_feat=tf.float32, dtype_label=tf.int32)
if (i+1) % 100 == 0:
#time.sleep(0.01)
print("iter {}".format(i+1))
#print(vb)
#print(lb)
print("=================================\ntotal num of validation datapoints: ", i+1)
show_pid = pid_test
for i, features in enumerate(show_pid):
vb, lb = decode_feat(features, duration, feat_dim, dtype_feat=tf.float32, dtype_label=tf.int32)
if (i+1) % 100 == 0:
#time.sleep(0.01)
print("iter {}".format(i+1))
#print(vb)
#print(lb)
print("=================================\ntotal num of test datapoints: ", i+1)
show_pid = pid_tr
for i, features in enumerate(show_pid):
vb, lb = decode_feat(features, duration, feat_dim, dtype_feat=tf.float32, dtype_label=tf.int32)
if (i+1) % 100 == 0:
#time.sleep(0.01)
print("iter {}".format(i+1))
#print(vb)
#print(lb)
print("=================================\ntotal num of training datapoints: ", i+1)
| save_featureTFR_nmnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 3 - Review of Algebraic Operations and the Properties of Real Numbers
# This chapter summarizes properties of real numbers like equivalence relations, sum and product operators, and ordered domains. These are treated as axioms in the scope of this book and not repeated here.
| Geometry/Euclid/Chapter03_AlgebraReview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RNN Character Model + Lots More
#
# This example trains a RNN to create plausible words from a corpus. But it includes lots of interesting "bells and whistles"
#
# The data used for training is one of :
# * a vocabulary/dictionary collected from the 1-Billion-Word Corpus
# * a list of Indian names (voters rolls, by year) : TODO
#
#
# Adversarial networks : http://carpedm20.github.io/faces/
#
# Doing this with RNNs may be pretty novel : https://www.quora.com/Can-generative-adversarial-networks-be-used-in-sequential-data-in-recurrent-neural-networks-How-effective-would-they-be
# +
import numpy as np
import theano
import lasagne
#from lasagne.utils import floatX
import pickle
import gzip
import random
import time
WORD_LENGTH_MAX = 16
# +
# Load an interesting corpus (vocabulary words with frequencies from 1-billion-word-corpus) :
with gzip.open('../data/RNN/ALL_1-vocab.txt.gz') as f:
lines = [ l.strip().lower().split() for l in f.readlines() ]
lines[0:10]
# -
# Here are our characters : '[a-z\- ]'
import re
invalid_chars = r'[^a-z\- ]'
lines_valid = [ l for l in lines if not re.search(invalid_chars, l[0]) ]
#lines_valid = lines_valid[0:50000]
lines_valid[0:10], len(lines_valid)
# /usr/share/dict/linux.words
with open('/usr/share/dict/linux.words','rt') as f:
linux_words = [ l.strip() for l in f.readlines() ]
linux_wordset = set(linux_words)
#'united' in wordset
lines_filtered = [l for l in lines_valid
if len(l[0])>=3 # Require each word to have 3 or more characters
and l[0] in linux_wordset # Require each word to be found in regular dictionary
and len(l[0])<WORD_LENGTH_MAX # And limit length (to avoid crazy roll-out of RNN)
]
lines_filtered[0:10], len(lines_filtered)
# +
# Split apart the words and their frequencies (Assume these are in sorted order, at least initial few)
words = [ l[0] for l in lines_filtered ]
wordset = set(words)
wordsnp = np.array(words)
freqs_raw = np.array( [ int(l[1]) for l in lines_filtered ] )
freq_tot = float(freqs_raw.sum())
# Frequency weighting adjustments
freqs = freqs_raw / freq_tot
cutoff_index = 30 # All words with highter frequencies will be 'limited' at this level
freqs[0:cutoff_index] = freqs[cutoff_index]
freqs = freqs / freqs.sum()
freqs[0:50]
# -
test_cum = np.array( [.1, .5, .9, 1.0] )
test_cum.searchsorted([ .05, 0.45, .9, .95])
# Cumulative frequency, so that we can efficiently pick weighted random words...
# using http://docs.scipy.org/doc/numpy/reference/generated/numpy.searchsorted.html
freqs_cum = freqs.cumsum()
freqs_cum[:10], freqs_cum[-10:],
# ### Network Parameters from Corpus
# Find the set of characters used in the corpus and construct mappings between characters, integer indices, and one hot encodings
# +
CHARS_VALID = "abcdefghijklmnopqrstuvwxyz- "
CHARS_SIZE = len(CHARS_VALID)
CHAR_TO_IX = {c: i for i, c in enumerate(CHARS_VALID)}
IX_TO_CHAR = {i: c for i, c in enumerate(CHARS_VALID)}
CHAR_TO_ONEHOT = {c: np.eye(CHARS_SIZE)[i] for i, c in enumerate(CHARS_VALID)}
#CHAR_TO_IX
# -
# ### Unigram frequency distribution
# Single letter frequencies
unigram_freq = np.zeros( (CHARS_SIZE,))
idx_end = CHAR_TO_IX[' ']
for i,w in enumerate(words):
word_freq = freqs[i]
for c in w:
unigram_freq[ CHAR_TO_IX[c] ] += word_freq
unigram_freq[ idx_end ] += word_freq
unigram_freq /= unigram_freq.sum()
unigram_freq_cum = unigram_freq.cumsum()
[ (CHARS_VALID[i], "%6.3f" % f) for i,f in enumerate(unigram_freq.tolist()) ]
#CHARS_VALID[ unigram_freq_cum.searchsorted(0.20) ]
def unigram_word():
s=[]
while True:
idx = np.searchsorted(unigram_freq_cum, np.random.uniform())
c = IX_TO_CHAR[idx]
if c==' ':
if len(s)>0:
break
else:
continue
s.append(c)
return ''.join(s)
' '.join([ unigram_word() for i in range(0,20) ])
# ### Bigram frequency distribution
# two-letter frequencies
bigram_freq = np.zeros( (CHARS_SIZE,CHARS_SIZE) )
for i,w in enumerate(words):
w2 = ' '+w+' '
word_freq = freqs[i]
for j in range(0, len(w2)-1):
bigram_freq[ CHAR_TO_IX[ w2[j] ], CHAR_TO_IX[ w2[j+1] ] ] += word_freq
#[ (CHARS_VALID[i], "%6.3f" % f) for i,f in enumerate(bigram_freq[ CHAR_TO_IX['q'] ].tolist()) ]
#bigram_freq.sum(axis=1)[CHAR_TO_IX['q']]
bigram_freq /= bigram_freq.sum(axis=1)[:, np.newaxis] # Trick to enable unflattening of sum()
bigram_freq_cum = bigram_freq.cumsum(axis=1)
#[ (CHARS_VALID[i], "%6.3f" % f) for i,f in enumerate(bigram_freq_cum[ CHAR_TO_IX['q'] ].tolist()) ]
#bigram_freq.sum(axis=1)[CHAR_TO_IX['q']]
#(bigram_freq/ bigram_freq.sum(axis=1)).sum(axis=0)
#bigram_freq.sum(axis=1)[CHAR_TO_IX['q']]
#bigram_freq[CHAR_TO_IX['q'], :].sum()
#(bigram_freq / bigram_freq.sum(axis=1)[:, np.newaxis]).cumsum(axis=1)
#Letter relative frequency for letters following 'q'
[ (CHARS_VALID[i], "%6.3f" % f) for i,f in enumerate(bigram_freq[ CHAR_TO_IX['q'] ].tolist()) if f>0.001]
#bigram_freq_cum[4]
def bigram_word():
s=[]
idx_last = CHAR_TO_IX[' ']
while True:
idx = np.searchsorted(bigram_freq_cum[idx_last], np.random.uniform())
c = IX_TO_CHAR[idx]
if c==' ':
if len(s)>0:
#if len(s)<50: continue
break
else:
continue
s.append(c)
idx_last=idx
return ''.join(s)
' '.join([ bigram_word() for i in range(0,20) ])
# ### Trigram frequency distribution
# Three-letter frequencies
trigram_freq = np.zeros( (CHARS_SIZE,CHARS_SIZE,CHARS_SIZE) )
for i,w in enumerate(words):
w3 = ' '+w+' '
word_freq = freqs[i]
for j in range(0, len(w3)-2):
trigram_freq[ CHAR_TO_IX[ w3[j] ], CHAR_TO_IX[ w3[j+1] ], CHAR_TO_IX[ w3[j+2] ] ] += word_freq
trigram_freq /= trigram_freq.sum(axis=2)[:, :, np.newaxis] # Trick to enable unflattening of sum()
trigram_freq_cum = trigram_freq.cumsum(axis=2)
[ "ex-%s %6.3f" % (CHARS_VALID[i], f)
for i,f in enumerate(trigram_freq[ CHAR_TO_IX['e'], CHAR_TO_IX['x'] ].tolist()) if f>0.001 ]
def trigram_word():
s=[]
idx_1 = idx_2 = CHAR_TO_IX[' ']
while True:
idx = np.searchsorted(trigram_freq_cum[idx_1, idx_2], np.random.uniform())
c = IX_TO_CHAR[idx]
if c==' ':
if len(s)>0:
#if len(s)<50: continue
break
else:
continue
s.append(c)
idx_1, idx_2 = idx_2, idx
return ''.join(s)
' '.join([ trigram_word() for i in range(0,20) ])
# ### Generate base-line scores
sample_size=10000
ngram_hits = [0,0,0]
for w in [ unigram_word() for i in range(0, sample_size) ]:
if w in wordset: ngram_hits[0] += 1
#print("%s %s" % (("YES" if w in wordset else " - "), w, ))
for w in [ bigram_word() for i in range(0, sample_size) ]:
if w in wordset: ngram_hits[1] += 1
#print("%s %s" % (("YES" if w in wordset else " - "), w, ))
for w in [ trigram_word() for i in range(0, sample_size) ]:
if w in wordset: ngram_hits[2] += 1
#print("%s %s" % (("YES" if w in wordset else " - "), w, ))
for i,hits in enumerate(ngram_hits):
print("%d-gram : %4.2f%%" % (i+1, hits*100./sample_size ))
#[ (i,w) for i,w in enumerate(words) if 'mq' in w]
# Find the distribution of unigrams by sampling (sanity check)
if False:
sample_size=1000
arr=[]
for w in [ unigram_word() for i in range(0, sample_size) ]:
arr.append(w)
s = ' '.join(arr)
s_len = len(s)
for c in CHARS_VALID:
f = len(s.split(c))-1
print("%s -> %6.3f%%" % (c, f*100./s_len))
# ### RNN Main Parameters
BATCH_SIZE = 64
RNN_HIDDEN_SIZE = CHARS_SIZE
GRAD_CLIP_BOUND = 5.0
# ## An RNN 'discriminator'
#
# Instead of having a binary 'YES/NO' decision about whether a word is valid (via a lookup in the vocabulary), it may make it simpler to train a word-generator if we can assign a probability that a given word is valid.
#
# To do this, let's create a recurrent neural network (RNN) that accepts a (one-hot-encoded) word as input, and (at the end of the sequence) gives us an estimate of the probability that the word is valid.
#
# Actually, rather than descriminate according to whether the word is *actually* valid, let's 'just' try to decide whether it was produced directly from the dictionary or from the ```generate_bigram_word()``` source.
#
# This can be tested by giving it lists of actual words, and lists of words generated by ```generate_bigram_word()``` and seeing whether they can be correctly classified.
#
# The decision about what to do in the 12% of cases when the bigram function results in a valid word can be left until later... (since the distribution is so heavily skewed towards producing non-words).
#
# ### Create Training / Testing dataset
# And a 'batch generator' function that delivers data in the right format for RNN training
# +
def batch_dictionary(size=BATCH_SIZE/2):
uniform_vars = np.random.uniform( size=(size,) )
idx = freqs_cum.searchsorted(uniform_vars)
return wordsnp[ idx ].tolist()
def batch_bigram(size=BATCH_SIZE/2):
return [ bigram_word()[0:WORD_LENGTH_MAX] for i in range(size) ]
# -
# Test it out
#batch_test = lambda : batch_dictionary(size=4)
batch_test = lambda : batch_bigram(size=4)
print(batch_test())
print(batch_test())
print(batch_test())
# #### Lasagne RNN tutorial (including conventions & rationale)
#
# * http://colinraffel.com/talks/hammer2015recurrent.pdf
#
# #### Lasagne Examples
#
# * https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/recurrent.py
# * https://github.com/Lasagne/Recipes/blob/master/examples/lstm_text_generation.py
#
# #### Good blog post series
#
# * http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/
# After sampling a data batch, we transform it into a one hot feature representation with a mask
def prep_batch_for_network(batch_of_words):
word_max_length = np.array( [ len(w) for w in batch_of_words ]).max()
# translate into one-hot matrix, mask values and targets
input_values = np.zeros((len(batch_of_words), word_max_length, CHARS_SIZE), dtype='float32')
mask_values = np.zeros((len(batch_of_words), word_max_length), dtype='int32')
for i, word in enumerate(batch_of_words):
for j, c in enumerate(word):
input_values[i,j] = CHAR_TO_ONEHOT[ c ]
mask_values[i, 0:len(word) ] = 1
return input_values, mask_values
# ### Define the Descriminating Network Symbolically
# +
# Symbolic variables for input. In addition to the usual features and target,
# we need initial values for the RNN layer's hidden states
disc_input_sym = theano.tensor.tensor3()
disc_mask_sym = theano.tensor.imatrix()
disc_target_sym = theano.tensor.matrix() # probabilities of being from the dictionary (i.e. a single column matrix)
# +
# Our network has two stacked GRU layers processing the input sequence.
disc_input = lasagne.layers.InputLayer( (None, None, CHARS_SIZE) ) # batch_size, sequence_len, chars_size
disc_mask = lasagne.layers.InputLayer( (None, None, CHARS_SIZE) ) # batch_size, sequence_len, chars_size
disc_rnn1 = lasagne.layers.GRULayer(disc_input,
num_units=RNN_HIDDEN_SIZE,
gradient_steps=-1,
grad_clipping=GRAD_CLIP_BOUND,
hid_init=lasagne.init.Normal(),
learn_init=True,
mask_input=disc_mask,
only_return_final=True, # Only the state at the last timestep is needed
)
disc_decoder = lasagne.layers.DenseLayer(disc_rnn1,
num_units=1,
nonlinearity=lasagne.nonlinearities.sigmoid
)
disc_final = disc_decoder
# -
# Finally, the output stage
disc_output = lasagne.layers.get_output(disc_final, {
disc_input: disc_input_sym,
disc_mask: disc_mask_sym,
}
)
# ### Loss Function for Training
disc_loss = theano.tensor.nnet.binary_crossentropy(disc_output, disc_target_sym).mean()
# ### ... and the Training and Prediction functions
# +
# For stability during training, gradients are clipped and a total gradient norm constraint is also used
#MAX_GRAD_NORM = 15
disc_params = lasagne.layers.get_all_params(disc_final, trainable=True)
disc_grads = theano.tensor.grad(disc_loss, disc_params)
#disc_grads = [theano.tensor.clip(g, -GRAD_CLIP_BOUND, GRAD_CLIP_BOUND) for g in disc_grads]
#disc_grads, disc_norm = lasagne.updates.total_norm_constraint( disc_grads, MAX_GRAD_NORM, return_norm=True)
disc_updates = lasagne.updates.adam(disc_grads, disc_params)
disc_train = theano.function([disc_input_sym, disc_target_sym, disc_mask_sym], # , disc_rnn1_t0_sym
[disc_loss], # , disc_output, norm, hid_out_last, hid2_out_last
updates=disc_updates,
)
disc_predict = theano.function([disc_input_sym, disc_mask_sym], [disc_output])
print("Discriminator network functions defined")
# -
# ### Finally, the Discriminator Training Loop
#
# * Training takes a while :: 1000 iteration takes about 20 seconds on a CPU
# * ... you may want to skip this and the next cell, and load the pretrained weights instead
t0, iterations_complete = time.time(), 0
# +
epochs = 10*1000
t1, iterations_recent = time.time(), iterations_complete
for epoch_i in range(epochs):
# create a batch of words : half are dictionary, half are from bigram
batch_of_words = batch_dictionary() + batch_bigram()
# get the one-hot input values and corresponding mask matrix
disc_input_values, disc_mask_values = prep_batch_for_network(batch_of_words)
# and here are the assocated target values
disc_target_values= np.zeros((len(batch_of_words),1), dtype='float32')
disc_target_values[ 0:(BATCH_SIZE/2), 0 ] = 1.0 # First half are dictionary values
for i, word in enumerate(batch_of_words):
if True and i>BATCH_SIZE/2 and word in wordset:
disc_target_values[ i , 0 ] = 1.0 # bigram has hit a dictionary word by luck...
# Now train the discriminator RNN
disc_loss_, = disc_train(disc_input_values, disc_target_values, disc_mask_values)
#disc_output_, = disc_predict(disc_input_values, disc_mask_values)
iterations_complete += 1
if iterations_complete % 250 == 0:
secs_per_batch = float(time.time() - t1)/ (iterations_complete - iterations_recent)
eta_in_secs = secs_per_batch*(epochs-epoch_i)
print("Iteration {:5d}, loss_train: {:.4f} ({:.1f}s per 1000 batches) eta: {:.0f}m{:02.0f}s".format(
iterations_complete, float(disc_loss_),
secs_per_batch*1000., np.floor(eta_in_secs/60), np.floor(eta_in_secs % 60)
))
#print('Iteration {}, output: {}'.format(iteration, disc_output_, )) # , output: {}
t1, iterations_recent = time.time(), iterations_complete
print('Iteration {}, ran in {:.1f}sec'.format(iterations_complete, float(time.time() - t0)))
# -
# ### Save the learned parameters
#
# Uncomment the ```pickle.dump()``` to actually save to disk
disc_param_values = lasagne.layers.get_all_param_values(disc_final)
disc_param_dictionary = dict(
params = disc_param_values,
CHARS_VALID = CHARS_VALID,
CHAR_TO_IX = CHAR_TO_IX,
IX_TO_CHAR = IX_TO_CHAR,
)
#pickle.dump(disc_param_dictionary, open('../data/RNN/disc_trained.pkl','w'), protocol=pickle.HIGHEST_PROTOCOL)
# ### Load pretrained weights into network
disc_param_dictionary = pickle.load(open('../data/RNN/disc_trained_64x310k.pkl', 'r'))
lasagne.layers.set_all_param_values(disc_final, disc_param_dictionary['params'])
# ### Check that the Discriminator Network 'works'
test_text_list = ["shape", "shast", "shaes", "shafg", "shaqw"]
test_text_list = ["opposite", "aposite", "apposite", "xposite", "rrwqsite", "deposit", "idilic", "idyllic"]
# +
disc_input_values, disc_mask_values = prep_batch_for_network(test_text_list)
disc_output_, = disc_predict(disc_input_values, disc_mask_values)
for i,v in enumerate(disc_output_.tolist()):
print("%s : %5.2f%%" % ((test_text_list[i]+' '*20)[:20], v[0]*100.))
# -
# ## Create a Generative network
#
# Next, let's build an RNN that produces text, and train it using (a) a pure dictionary look-up, and (b) the correctness signal from the Discriminator above.
#
# Plan of attack :
#
# * Create a GRU that outputs a character probability distribution for every time step
# * Run the RNN several times :
# * each time is an additional character input longer
# * with the next character chosen according to the probability distribution given
# * and then re-run with the current input words (up to that point)
# * Stop adding characters when they've all reached 'space'
#
# This seems very inefficient (since the first RNN steps are being run multiple times on the same starting letters), but is the same as in https://github.com/Lasagne/Recipes/blob/master/examples/lstm_text_generation.py
# Let's pre-calculate the logs of the bigram frequencies, since they may be mixed in below
bigram_min_freq = 1e-10 # To prevent underflow in log...
bigram_freq_log = np.log( bigram_freq + bigram_min_freq ).astype('float32')
# +
# Symbolic variables for input. In addition to the usual features and target,
gen_input_sym = theano.tensor.ftensor3()
gen_mask_sym = theano.tensor.imatrix()
gen_words_target_sym = theano.tensor.imatrix() # characters generated (as character indicies)
# probabilities of being from the dictionary (i.e. a single column matrix)
gen_valid_target_sym = theano.tensor.fmatrix( )
# This is a single mixing parameter (0.0 = pure RNN, 1.0=pure Bigram)
gen_bigram_overlay = theano.tensor.fscalar()
# This is 'current' since it reflects the bigram field as far as it is known during the call
gen_bigram_freq_log_field = theano.tensor.ftensor3()
# +
gen_input = lasagne.layers.InputLayer( (None, None, CHARS_SIZE) ) # batch_size, sequence_len, chars_size
gen_mask = lasagne.layers.InputLayer( (None, None, CHARS_SIZE) ) # batch_size, sequence_len, chars_size
#gen_rnn1_t0 = lasagne.layers.InputLayer( (None, RNN_HIDDEN_SIZE) ) # batch_size, RNN_hidden_size=chars_size
#n_batch, n_time_steps, n_features = gen_input.input_var.shape
n_batch, n_time_steps, n_features = gen_input_sym.shape
gen_rnn1 = lasagne.layers.GRULayer(gen_input,
num_units=RNN_HIDDEN_SIZE,
gradient_steps=-1,
grad_clipping=GRAD_CLIP_BOUND,
#hid_init=disc_rnn1_t0,
hid_init=lasagne.init.Normal(),
learn_init=True,
mask_input=gen_mask,
only_return_final=False, # Need all of the output states
)
# Before the decoder layer, we need to reshape the sequence into the batch dimension,
# so that timesteps are decoded independently.
gen_reshape = lasagne.layers.ReshapeLayer(gen_rnn1, (-1, RNN_HIDDEN_SIZE) )
gen_prob_raw = lasagne.layers.DenseLayer(gen_reshape,
num_units=CHARS_SIZE,
nonlinearity=lasagne.nonlinearities.linear # No squashing (yet)
)
gen_prob = lasagne.layers.ReshapeLayer(gen_prob_raw, (-1, n_time_steps, CHARS_SIZE))
gen_prob_theano = lasagne.layers.get_output(gen_prob, {
gen_input: gen_input_sym,
gen_mask: gen_mask_sym,
})
gen_prob_mix = gen_bigram_overlay*gen_bigram_freq_log_field + (1.0-gen_bigram_overlay)*gen_prob_theano
gen_prob_mix_flattened = theano.tensor.reshape(gen_prob_mix, (-1, CHARS_SIZE))
gen_prob_softmax_flattened = theano.tensor.nnet.nnet.softmax(gen_prob_mix_flattened)
#gen_prob_final = lasagne.layers.SliceLayer(gen_prob_raw, indices=(-1), axis=1)
# +
# Finally, the output stage - this is for the training (over all the letters in the words)
#gen_output = gen_prob_softmax_flattened
# And for prediction (which is done incrementally, adding one letter at a time)
gen_output_last = gen_prob_softmax_flattened.reshape( (-1, n_time_steps, CHARS_SIZE) )[:, -1]
# +
# The generative network is trained by encouraging the outputs across time to match the given sequence of letters
# We flatten the sequence into the batch dimension before calculating the loss
#def gen_word_cross_ent(net_output, targets):
# preds_raw = theano.tensor.reshape(net_output, (-1, CHARS_SIZE))
# preds_softmax = theano.tensor.nnet.nnet.softmax(preds_raw)
# targets_flat = theano.tensor.flatten(targets)
# cost = theano.tensor.nnet.categorical_crossentropy(preds_softmax, targets_flat)
# return cost
targets_flat = theano.tensor.flatten(gen_words_target_sym)
gen_cross_entropy_flat = theano.tensor.nnet.categorical_crossentropy(gen_prob_softmax_flattened, targets_flat)
gen_cross_entropy = theano.tensor.reshape(gen_cross_entropy_flat, (-1, n_time_steps) )
gen_loss_weighted = theano.tensor.dot( gen_valid_target_sym.T, gen_cross_entropy )
gen_loss = gen_loss_weighted.mean()
# +
# For stability during training, gradients are clipped and a total gradient norm constraint is also used
#MAX_GRAD_NORM = 15
gen_predict = theano.function([gen_input_sym,
gen_bigram_overlay, gen_bigram_freq_log_field,
gen_mask_sym], [gen_output_last])
gen_params = lasagne.layers.get_all_params(gen_prob, trainable=True)
gen_grads = theano.tensor.grad(gen_loss, gen_params)
#gen_grads = [theano.tensor.clip(g, -GRAD_CLIP_BOUND, GRAD_CLIP_BOUND) for g in gen_grads]
#gen_grads, gen_norm = lasagne.updates.total_norm_constraint( gen_grads, MAX_GRAD_NORM, return_norm=True)
gen_updates = lasagne.updates.adam(gen_grads, gen_params)
gen_train = theano.function([gen_input_sym,
gen_bigram_overlay, gen_bigram_freq_log_field,
gen_words_target_sym, gen_valid_target_sym,
gen_mask_sym],
[gen_loss],
updates=gen_updates,
)
gen_debug = theano.function([gen_input_sym,
gen_bigram_overlay, gen_bigram_freq_log_field,
gen_words_target_sym, gen_valid_target_sym,
gen_mask_sym],
[gen_cross_entropy],
on_unused_input='ignore'
)
print("Generator network functions defined")
# -
# ### Use the Generative Network to create sample words
#
# The network above can be used to generate text...
#
# The following set-up allows for the output of the RNN at each timestep to be mixed with the letter frequency that the bigram model would suggest - in a proportion ```bigram_overlay``` which can vary from ```0``` (being solely RNN derived) to ```1.0``` (being solely bigram frequencies, with the RNN output being disregarded).
#
# The input is a 'random field' matrix that is used to chose each letter in each slot from the generated probability distribution.
#
# Once a space is output for a specific word, then it stops being extended (equivalently, the mask is set to zero going forwards).
#
# Once spaces have been observed for all words (or the maximum length reached), the process ends, and a list of the created words is returned.
# +
def generate_rnn_words(random_field, bigram_overlay=0.0):
batch_size, max_word_length = random_field.shape
idx_spc = CHAR_TO_IX[' ']
def append_indices_as_chars(words_current, idx_list):
for i, idx in enumerate(idx_list):
if idx == idx_spc:
pass # Words end at space
#words_current[i] += 'x'
else:
words_current[i] += IX_TO_CHAR[idx]
return words_current
# Create a 'first character' by using the bigram transitions from 'space' (this is fair)
idx_initial = [ np.searchsorted(bigram_freq_cum[idx_spc], random_field[i, 0]) for i in range(batch_size) ]
bigram_freq_log_field = np.zeros( (batch_size, max_word_length, CHARS_SIZE), dtype='float32')
bigram_freq_log_field[:,0] = bigram_freq_log[ np.array(idx_initial) , :]
words_current = [ '' for _ in range(batch_size) ]
words_current = append_indices_as_chars(words_current, idx_initial)
col = 1
while True:
gen_input_values, gen_mask_values = prep_batch_for_network(words_current)
#print(gen_mask_values[:,-1])
#gen_out_, = gen_predict(gen_input_values, gen_mask_values)
if gen_input_values.shape[1]<col: # Early termination
print("Early termination")
col -= 1
break
#print(gen_input_values.shape, gen_mask_values.shape, bigram_freq_log_field.shape, col)
probs, = gen_predict(gen_input_values, bigram_overlay, bigram_freq_log_field[:,0:col], gen_mask_values)
#print(probs[0])
# This output is the final probability[CHARS_SIZE], so let's cumsum it, etc.
probs_cum = probs.cumsum(axis=1)
idx_next = [ # Only add extra letters if we haven't already passed a space (i.e. mask[-1]==0)
idx_spc if gen_mask_values[i,-1]==0 else np.searchsorted(probs_cum[i], random_field[i, col])
for i in range(batch_size)
]
words_current = append_indices_as_chars(words_current, idx_next)
words_current_max_length = np.array( [ len(w) for w in words_current ]).max()
# If the words have reached the maximum length, or we didn't extend any of them...
if words_current_max_length>=max_word_length: # Finished
col += 1
break
# Guarded against overflow on length...
bigram_freq_log_field[:, col] = bigram_freq_log[ np.array(idx_next) , :]
col += 1
return words_current, bigram_freq_log_field[:,0:col]
def view_rnn_generator_sample_output(bigram_overlay=0.9):
# Create a probability distribution across all potential positions in the output 'field'
random_field = np.random.uniform( size=(BATCH_SIZE, WORD_LENGTH_MAX) )
gen_words_output, _underlying_bigram_field = generate_rnn_words(random_field, bigram_overlay=bigram_overlay)
print( '\n'.join(gen_words_output))
#print(_underlying_bigram_field)
# -
view_rnn_generator_sample_output(bigram_overlay=0.0)
view_rnn_generator_sample_output(bigram_overlay=0.9)
# #### Remember the initial (random) Network State
#
# This will come in handy when we need to reset the network back to 'untrained' later.
gen_param_values_initial = lasagne.layers.get_all_param_values(gen_prob)
# ### Now, train the Generator RNN based on the Dictionary itself
#
# Once we have an output word, let's reward the RNN based on a specific training signal. We'll encapsulate the training in a function that takes the input signal as a parameter, so that we can try other training schemes (later).
# +
def is_good_output_dictionary(output_words):
return np.array(
[ (1.0 if w in wordset else 0.0) for w in output_words ],
dtype='float32'
)
t0, iterations_complete = time.time(), 0
def reset_generative_network():
global t0, iterations_complete
t0, iterations_complete = time.time(), 0
lasagne.layers.set_all_param_values(gen_prob, gen_param_values_initial)
def prep_batch_for_network_output(mask_values, batch_of_words):
output_indices = np.zeros(mask_values.shape, dtype='int32')
for i, word in enumerate(batch_of_words):
word_shifted = word[1:]+' '
for j, c in enumerate(word_shifted):
output_indices[i,j] = CHAR_TO_IX[ c ]
return output_indices
# -
def train_generative_network(is_good_output_function=is_good_output_dictionary, epochs=10*1000, bigram_overlay=0.0):
if bigram_overlay>=1.0:
print("Cannot train with pure bigrams...")
return
global t0, iterations_complete
t1, iterations_recent = time.time(), iterations_complete
for epoch_i in range(epochs):
random_field = np.random.uniform( size=(BATCH_SIZE, WORD_LENGTH_MAX) )
gen_words_output, underlying_bigram_field = generate_rnn_words(random_field, bigram_overlay=bigram_overlay)
#print(gen_words_output[0])
#print(underlying_bigram_field[0])
# Now, create a training set of input -> output, coupled with an intensity signal
# first the step-by-step network inputs
gen_input_values, gen_mask_values = prep_batch_for_network(gen_words_output)
# now create step-by-step network outputs (strip off first character, add spaces) as *indicies*
gen_output_values_int = prep_batch_for_network_output(gen_mask_values, gen_words_output)
#print(gen_output_values_int.shape, underlying_bigram_field.shape)
#print(gen_output_values_int[0]) # makes sense
# And, since we have a set of words, we can also determine their 'goodness'
is_good_output = is_good_output_function(gen_words_output)
#print(is_good_output[0]) Starts at all zero. i.e. the word[0] is bad
# This looks like it is the wrong way 'round...
target_valid_row = -(np.array(is_good_output) - 0.5)
## i.e. higher values for more-correct symbols : This goes -ve, and wrong, quickly
#target_valid_row = (np.array(is_good_output) - 0.5)
#target_valid_row = np.ones( (gen_mask_values.shape[0],), dtype='float32' )
target_valid = target_valid_row[:, np.newaxis]
#print(target_valid.shape)
if False:
# Now debug the generator RNN
gen_debug_, = gen_debug(gen_input_values,
bigram_overlay, underlying_bigram_field,
gen_output_values_int, target_valid,
gen_mask_values)
print(gen_debug_.shape)
print(gen_debug_[0])
#return
# Now train the generator RNN
gen_loss_, = gen_train( gen_input_values,
bigram_overlay, underlying_bigram_field,
gen_output_values_int, target_valid,
gen_mask_values)
#print(gen_loss_)
# Hmm - this loss is ~ a character-level loss, and isn't comparable to a word-level score,
# which is a pity, since the 'words' seem to get worse, not better...
iterations_complete += 1
if iterations_complete % 10 == 0:
secs_per_batch = float(time.time() - t1)/ (iterations_complete - iterations_recent)
eta_in_secs = secs_per_batch*(epochs-epoch_i)
print("Iteration {:5d}, loss_train: {:.2f} word-score: {:.2f}% ({:.1f}s per 1000 batches) eta: {:.0f}m{:02.0f}s".format(
iterations_complete, float(gen_loss_),
float(is_good_output.mean())*100.,
secs_per_batch*1000., np.floor(eta_in_secs/60), np.floor(eta_in_secs % 60), )
)
print( ' '.join(gen_words_output[:10]) )
#print('Iteration {}, output: {}'.format(iteration, disc_output_, )) # , output: {}
t1, iterations_recent = time.time(), iterations_complete
print('Iteration {}, ran in {:.1f}sec'.format(iterations_complete, float(time.time() - t0)))
#theano.config.exception_verbosity='high' # ... a little pointless with RNNs
# See: http://deeplearning.net/software/theano/tutorial/debug_faq.html
reset_generative_network()
train_generative_network(is_good_output_function=is_good_output_dictionary, epochs=1*1000, bigram_overlay=0.9)
# ### How are we doing?
view_rnn_generator_sample_output(bigram_overlay=0.9)
# ## Use training signal from Discriminator
# +
#def is_good_output_dictionary(output_words):
# return np.array(
# [ (1.0 if w in wordset else 0.0) for w in output_words ],
# dtype='float32'
# )
def is_good_output_discriminator(output_words):
disc_input_values, disc_mask_values = prep_batch_for_network(output_words)
disc_output_, = disc_predict(disc_input_values, disc_mask_values)
return disc_output_.reshape( (-1,) )
# -
reset_generative_network()
train_generative_network(is_good_output_function=is_good_output_discriminator, epochs=1*1000, bigram_overlay=0.9)
#train_generative_network(is_good_output_function=is_good_output_dictionary, epochs=1*1000, bigram_overlay=0.9)
# ### How are we doing?
view_rnn_generator_sample_output(bigram_overlay=0.9)
# #### Hmmmm
# Exercises
# =====
#
# 1. Make the above work...
# 2. Try the Indian Names Corpus
| notebooks/work-in-progress/WIP_9-RNN-Fun.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 第13讲 认识和绘制数轴
# ### Problem 问题描述
# 在长宽分别为600和400像素的绘图区域绘制如下图所示的一条标有刻度、水平方向的带箭头指示方向的数轴。其中数轴的左右两端距离绘图区域左右边界均为20像素,相邻刻度的距离为50像素,刻度线的长度为20像素,表示刻度线数值的数字在刻度线的正下方且底端距离数轴20个像素。
#
# <img src="figures/L013_axis.png" width="600px"/>
# ### Math Background 数学背景
#
# 1. 数轴的构成
# 2. 数轴原点,正负数在数轴轴上的位置比较
# 3. 每一个数在数轴上都有一个点相对应,两个数的差在数轴上表示的是这两个数对应的两个点之间的距离。
# + [markdown] heading_collapsed=true
# ### Prerequisites 预备知识
# -
# #### 1. `write`方法可以在绘图区书写字符串
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home
width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
# 比较提起画笔和放下画笔时下面的代码执行的效果有什么不同
reset()
pu()
write("Sophie", move=True, align="center")
reset()
pd()
write("Tony", move=True, align="center", font=("Arial", 30, "normal"))
reset()
pd()
write("Sophie", move=False, align="center", font=("Arial", 30, "normal"))
# #### 2. `tuple`元组数据类型
#
# pos_x 是一个tuple类型的变量
pos_x = (30, 20) # pos_x 是一个tuple类型的变量
# 可以使用索引来获取tuple类型变量的元素
print(pos_x[0], pos_x[1]) # 可以使用索引来获取tuple类型变量的元素
# 可以使用len()方法来获取tuple类型数据的元素个数
len(pos_x) # 可以使用len()方法来获取tuple类型数据的元素个数
# 不可以更改tuple类型变量里某一个元素的值。例如执行下面的代码将发生错误
# ```python
# pos_x[0] = 40
# ```
#
# ```text
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-24-d852e9299be9> in <module>
# ----> 1 pos_x[0] = 40
#
# TypeError: 'tuple' object does not support item assignment
# ```
# +
# pos_x[0] = 40 # 不可以更改tuple类型变量里某一个元素的值
# -
# 可以给整个tuple类型变量赋予一个新的tuple值
pos_x = (50, 30) # 可以给整个tuple类型变量赋予一个新的tuple值
# `tuple`型的变量里的元素的类型可以互不相同
sophie = ("Sophie", 11, "Female", "Grade4") # tuple型的变量里的元素的类型可以互不相同
print(sophie)
# #### 3. 理解同一个方法在接受不同的参数值时执行结果的比较
#
# 对比输出的文字和海龟位置,观察下面的两条`write`方法在接受不同的`align`值或时效果有什么不同。
reset()
pu()
write("Jason", align="left", font=("Arial", 30, "normal"))
reset()
pu()
write("Jason", align="center", font=("Arial", 30, "normal"))
# #### 4. 对比输出的文字,观察下面的几条`write`方法在接受不同的`font`值时效果有什么不同。
#
# 参数`font`是一个`tuple`类型的变量
reset()
pu()
write("Jason", font=("Arial", 30, "normal"))
reset()
pu()
write("Jason", font=("Arial", 50, "normal"))
reset()
pu()
write("Jason", font=("Times New Roman", 50, "normal"))
reset()
pu()
write("Jason", font=("Arial", 50, "italic"))
# no reset() here 这里没有reset()
pu()
bk(200) # 后退200
write("Jason", font=("Arial", 50, "underline"))
# #### 5. 区分`()`何时表示元组数据何时表示方法接受的参数
#
# 看`()`前面有没有紧跟一个方法名, 下面这行代码声明了一个元组型变量,变量名为`jason`。
#
# ```python
# jason = ("Jason", ("Arial", 50, "Italic")) #
# ```
#
# 下面这行代码是在执行一个名为`jason`的方法
# ```python
# jason("Jason", ("Arial", 50, "Italic"))
# ```
# #### 6. 练习
#
# 编写下面的代码,更改变量`name`的值为你的名字,观察代码执行的效果。
# 如果没有导入绘图库相关方法以及执行setup方法,请解除下面几行代码的注释
# from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
# from turtle import goto, seth, write, ht, st, home
# width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
# setup(600, 400, 0, 0)
reset()
pu()
ht()
name = "Qiang"
text = "My name is {}.\nNice to meet you.".format(name)
write(text, align="center", font=("Arial", 30, "italic"))
# ### Solution 编程求解
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home
width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
# +
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 100 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1 # 每次坐标值变化的幅度
# +
# Solution1: without using goto() 第一种方法:不使用goto()
reset() # 重置绘图区 # this puts turtle in the midile and it erase all for the page.
min_x = -1 * max_x # 根据坐标轴允许的最大值,获取该坐标轴允许的最小值
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
bk(max_x) # backward max_x
pd() # 落下画笔,准备绘图
# draw mark 绘制刻度线
cur_x, last_x = min_x, min_x # 海龟当前位置和最近一次绘图后停留的位置
while cur_x <= max_x: # 循环
if cur_x % mark_interval == 0: # 海龟的位置是相邻刻度间隔长度的整数倍
length_move = cur_x - last_x # 计算海龟应该前进的长度
pd() # 落下画笔,准备绘图
fd(length_move) # 海龟前进(绘制一小段)
left(mark_degree) # 向左转90度,海龟朝正上方,准备绘制刻度线
fd(mark_line_length) # 绘制刻度线
pu() # 把画笔提起暂停绘图
bk(mark_line_length + text_offset) # 后退(向下)一段长度
text = str(int(cur_x // mark_interval))
# 准备刻度值字符串(由整型数据转换而来)
write(text, align="center") # 在当前位置以居中的形式书写文字字符串
fd(text_offset) # 前进(向上)一小段长度
right(mark_degree) # 向右转90度,海龟次朝向右侧
last_x = cur_x # 记录海龟当前位置,为下次绘图的起点
cur_x += delta_x # 当前位置增加一小段长度(个单位距离:像素)
pd() # 落下画笔,准备绘制
fd(max_x - last_x) # 绘制最后一个刻度线到数轴最大x值这一小段
if show_arrow: # 如果需要绘制箭头
right(arrow_degree) # 向右转,海龟朝向右侧偏下
bk(arrow_length) # 后退一定距离,绘制箭头一边
fd(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
bk(arrow_length) # 后退一定距离,绘制箭头另一边
ht() # 隐藏海龟
# +
# Solution2: using goto() 第二种方法:使用goto()
reset()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
ht() # hide turtle
# -
reset()
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 100, -50
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y)# go to arrow_x for x and arrow_y for
goto(max_x, 0)
# +
if show_arrow: # 如果需要绘制箭头
right(arrow_degree) # 向右转,海龟朝向右侧偏下
bk(arrow_length) # 后退一定距离,绘制箭头一边
fd(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
bk(arrow_length) # 后退一定距离,绘制箭头另一边
# longer
ht() # 隐藏海龟
# -
bye()
# ### Summary 知识点小结
# 1. turtle绘图库里的新方法`write`可以在绘图区海龟的当前位置书写文字;
# 2. 新的数据类型:`tuple`元组数据类型,它与`list`数据类型非常类似,但也有区别;
# 3. 在执行一个方法时,方法名后面的小括号`()`内可以接受一个或多个不同的数据,这些数据成为该方法可以接受的参数。方法接受的参数的值不一样,执行该方法最后得到的结果也通常不同;
# 4. 复习格式化字符串的`format`方法;
# 5. 复习`while`循环,并将`while`过程中循环应用到绘图过程中;
# 6. 复习操作符`//`和`%`。
# ### 计算机小知识
# 像素,字体`font`
# + [markdown] heading_collapsed=true
# ### Assignments 作业
# -
# 1. 仔细阅读本讲示例中给出的两种绘制坐标轴方法,回答下面的问题:
# Read carefully the two solutions demonstrated in the lecture, answer the following questions:
# 1. 给第二种方法中的每一行代码添加注释
# Add comments for every code line of the second solution to tell the meaning of each code line.
# 2. 比较并说出两种方法在绘制坐标轴的差别
# Compare the two solutions and tell the difference of them in drawing the axis.
# 3. 两种方法绘制出来的箭头一模一样吗?为什么?
# Are the arrows drawn by the two solutoins exactly same? why?
# (B. the first draws a little part of the line and then it draws a mark line
# the second draws the line first and then it going back to draw the line marks)
# (C. no because the arrow drawn by the second has a bigger. )
#
# + [markdown] hidden=true
# 2. 编程绘制如下图所示的水平坐标轴。所用的刻度间距、刻度线长度等排版指标军与本讲示例相同。其中,与本讲示例不同的是:
# By programming, draw horizontal axies as the following figure shows. Most of the parameters, including the marker interval, marker length, etc, have the same value as in the lecture. However, there are still some significant differences, which are:
# 1. 将表示0刻度坐标值的文字“0”的位置向右移动距离10,刻度线仍保持与相邻的刻度线等距离不变; Move the text "0", which indicating the value 0 on the axis, 10 pixels right to its original position. Keep the mark line where it is.
# 2. 在箭头的下方添加字母"x",字母"x"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add a letter "x" under the arrow at the right end of the axis, use font "Arial", size 10, and "italic" to write the "x"
# 3. 当调整绘图区域的大小为宽为800像素时,你的代码应该仅需要更新绘图区的宽度而不改变其他地方就能直接调整数轴长度和刻度的显示。When the width of drawing area changed to 800 pixels from 600 pixels, your codes should only need to change the value of `width` while keep others unchanged to draw the axis with new length and markers.
#
# <img src="figures/L013_assignment1.png" />
#
# <img src="figures/L013_assignment1_2.png" />
# +
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home, speed
width, height = 400, 500 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 10 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 10 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1 # 每次坐标值变化的幅度
# + hidden=true
reset()
# Solution2: using goto() 第二种方法:使用goto()
reset()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
if cur_x == 0:
pu()
fd(10)
pd()
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
ht()
# -
st()
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
# 3. 编程绘制一条如下图所示的垂直方向上的坐标轴。要求:By programming, draw an ertical axis as the following figure shows. Requirement:
#
# 1. 该图所是的坐标轴基本上是把水平方向的坐标轴围绕这坐标原点向左侧旋转90度得到;
# The axis can basically be considered as a 90 degree of anti-closewise rotation of the horizontal axis illustrated in the lecture with original zero point as the rotation center;
# 2. 大部分控制数轴风格的参数值与示例中的一样,下列除外:但是刻度线位于坐标轴的右侧,刻度值位于坐标轴的左侧。Most of the parameters controlling the style of the axis are same as introduced in the lecture, except: the marker lines are located on right side of the axis line, and the marker values are on the left side;
# 3. 隐藏表示0刻度坐标值的文字“0”以及对应的刻度线; Hide the marker line and the marker value for origin point;
# 4. 在箭头的左侧添加字母"y",字母"y"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add the letter "y" on left side of the axis end, the font for "y" is "Arial", size is 10, and style is "italic";
# 5. 如果绘图区的高度发生改变不再是400像素,你的代码应仅需要修改一处就能重新绘制出填满大部分(保留上下个20像素高的间隙)绘图区高度的数轴。If the height of drawing area is changed to any other value other than 400 pixels, your codes should only need to change one place in order to draw the new vertical axis that fullfill the most height of the draw area (keep 20 pixels paddings for both ends).
#
# <img src="figures/L013_assignment3.png" style="align:center" height="400px"/>
# +
reset()
#TODO: Add your own codes here 在这里添加你自己的代码
# Solution2: using goto() 第二种方法:使用goto()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(0, min_x) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(0, max_x) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
if cur_x == 0:
pass
else:
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
ht()
# +
reset()
pd()
st()
speed(2)
min_x = -1 * max_x # 根据坐标轴允许的最大值,获取该坐标轴允许的最小值
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
right(90) # and set its heading to its start-orientation 并设置朝向为初始朝向
fd(max_x) # forward max_x
pd() # 落下画笔,准备绘图
# draw mark 绘制刻度线
cur_x, last_x = min_x, min_x # 海龟当前位置和最近一次绘图后停留的位置
while cur_x <= max_x: # 循环
if cur_x % mark_interval == 0: # 海龟的位置是相邻刻度间隔长度的整数倍
length_move = cur_x - last_x # 计算海龟应该前进的长度
pd() # 落下画笔,准备绘图
bk(length_move) # 海龟前进(绘制一小段)
left(mark_degree) # 向左转90度,海龟朝正上方,准备绘制刻度线
fd(mark_line_length) # 绘制刻度线
pu() # 把画笔提起暂停绘图
bk(mark_line_length + text_offset) # 后退(向下)一段长度
text = str(int(cur_x // mark_interval))# 准备刻度值字符串(由整型数据转换而来)
if cur_x == 0:
fd(text_offset)
right(90)
else:
write(text, align="center") # 在当前位置以居中的形式书写文字字符串
fd(text_offset) # 前进(向上)一小段长度
right(mark_degree) # 向右转90度,海龟次朝向右侧
last_x = cur_x # 记录海龟当前位置,为下次绘图的起点
cur_x += delta_x # 当前位置增加一小段长度(个单位距离:像素)
pd() # 落下画笔,准备绘制
fd(max_x - last_x) # 绘制最后一个刻度线到数轴最大x值这一小段
if show_arrow: # 如果需要绘制箭头
bk(60)
right(arrow_degree) # 向右转,海龟朝向右侧偏下
fd(arrow_length) # 后退一定距离,绘制箭头一边
bk(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
fd(arrow_length) # 后退一定距离,绘制箭头另一边
pu()
right(120)
fd(20)
write("y", move=False, align="left", font=("Arial", 15, "italic")) # 在当前位置以居中的形式书写文字字符串
ht() # 隐藏海龟
# -
st()
home()
# 4. 编程绘制一条如下图所示的水平坐标轴。与本讲示例不同的是:By programming, draw a horizontal axis with major and minor marker lines as shown in the figure. Most of the parameters that control the style of the aixs remain same as introduced in the lecture, except:
# 1. 在刻度线的内部再绘制9条段的次要刻度线,这样原来相邻的两条刻度线被等间距的分为10个等分,每个等分对应的长度为5;Add 9 minor marker lines within two major marker lines so that every major marker interval is divided into 10 equal minor marker intervals, each 5 pixles length;
# 2. 与原来刻度线的宽度为10不同,次要刻度线的宽度为6; the length of the minor marker line is 6 pixels, keep the length of the major marker line 10 pixels unchanged;
# 3. (困难,可选做)在左右两侧整数刻度之外的区域**不要**绘制次要刻度线;(Difficult, Optional) Do **NOT** add minor maker lines on the parts where the position is smaller than the minimal major marker value or larger than the maximal major marker value;
# 4. 将表示0刻度坐标值的文字“0”的位置向右移动距离10,刻度线仍保持与相邻的刻度线等距离不变; Move the text "0", which indicating the value 0 on the axis, 10 pixels right to its original position. Keep the mark line where it is;
# 5. 在箭头的下方添加字母"x",字母"x"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add a letter "x" under the arrow at the right end of the axis, use font "Arial", size 10, and "italic" to write the "x";
# 6. 当调整绘图区域的大小为宽为800像素时,你的代码应该仅需要更新绘图区的宽度而不改变其他地方就能直接调整数轴长度和刻度的显示。When the width of drawing area changed to 800 pixels from 600 pixels, your codes should only need to change the value of `width` while keep others unchanged to draw the axis with new length and markers.
#
# <img src="figures/L013_assignment4.png" style="align:center" height="400px"/>
# +
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
minor_mark_line_interval = 5
minor_mark_line_length = 6
minor_mark_degree = 90
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 100 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1
# +
reset()
#TODO: Add your own codes here 在这里添加你自己的代码
reset()
min_x = -1 * max_x
minor_line_drawn_per_mark = 0
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x: # while cur_x is still in the line
if minor_line_drawn_per_mark == 9: #if
minor_line_drawn_per_mark = 0 # set minor_line_drawn_per_mark to 0 at the start of each while loop
pu() # pen up
goto(cur_x, 0) # go to cur_x for x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center")
pd() # pen down
cur_x += delta_x # cur_x is delta_x + delta_x
else:
pu() # pen up
goto(cur_x, 0) # go to cur_x for x and 0 for y
pd() # pen down
goto(cur_x, minor_mark_line_length) # 绘制刻度线
minor_line_drawn_per_mark += 1
cur_x += minor_mark_line_interval
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
# -
# <span style="color:#ff0000; font-size:300%"><u>Good</u></span>
| assignments/2021-07-10/Sophie_assingment_013_marked.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1. Import This Stuff
# +
import gym
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
def step(action, num_steps=1):
e =env.unwrapped
actions_meanings = e.get_action_meanings()
# print(actions_meanings)
act_dict = {actions_meanings[i].lower():i for i in range(len(actions_meanings))}
a = act_dict[action]
for i in range(num_steps):
env.step(a)
def printinds(inds,name=""):
print(name,": [",", ".join(["%3i"%(i) for i in inds]),"]")
# -
# ### 2. Pick Game
env = gym.make("BattleZoneNoFrameskip-v4")
env.reset();
# ### 3. Render The Game
# On Macs this will open a separate window with the game. Try to get this window split-screened with this notebook.
env.render("human")
# ### 4. Pick RAM Bytes to Ignore
# ##### Change this cell to ignore bytes that aren't afftected by actions
bytes_to_ignore = [ 0, 63, 64, 66,100, 101,
80, 84, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96,
36, 37, 38, 39, 40, 124, 126 , 31, 67, 68, 69, 70, 72]
# ##### Change this cell if you just want to look at a single RAM bytes value (like checking if the spreadsheet is correct
bytes_to_print_alone = [ ]
# ### 5. Repeat the Below Cell To Step Through The Game
# 1. First pick ```action = "noop", num_times = 1``` a few times to see what RAM Byte Inds Change
# 2. Set bytes_to_ignore to those values
# 3. Now doing a noop should show no RAM bytes changing
# 4. Now try controlling the agent with "right", "left", and "fire"
# 5. See what RAM values change (those are probabaly the agents position
# ##### Modify this cell
env.reset();
action = "down"
num_times = 3
# ##### Then run this cell (run it 3 or 4 times first to get the human rendering to reach half-screen)
inds = np.arange(128)
if len(bytes_to_print_alone) > 0:
inds = np.asarray(bytes_to_print_alone)
else:
inds = np.delete(inds, bytes_to_ignore)
r0 = env.env.ale.getRAM()[inds]
step(action, num_times)# change this action
env.render("human")
r1 = env.env.ale.getRAM()[inds]
diff = r1 - r0
if len(bytes_to_print_alone) > 0:
i,rb,ra = inds, r0, r1
else:
i,rb,ra = inds[diff!=0], r0[diff!=0], r1[diff!=0]
printinds(i,name="RAM Byte Inds ") # prints rams that change
printinds(rb, name="RAM Values Before")
printinds(ra, name="RAM Values After ")
[ 3, 4, 6, 7, 10, 12, 14, 16, 18, 20, 22, 23, 24, 27, 36, 37, 38, 39, 40, 41, 59, 60, 67, 68, 69, 70, 74, 75, 76, 78, 85, 91, 122, 123, 124, 125, 126 ]
[ 15, 145, 1, 63, 28, 7, 0, 62, 31, 112, 127, 0, 127, 129, 0, 247, 173, 255, 64, 221, 14, 235, 186, 22, 232, 226, 31, 19, 241, 205, 5, 5, 6, 222, 99, 249, 238 ]
[ 12, 148, 0, 127, 56, 3, 16, 124, 15, 240, 254, 16, 63, 192, 1, 220, 4, 32, 144, 220, 15, 234, 185, 138, 234, 135, 30, 20, 116, 90, 4, 4, 78, 246, 253, 248, 105 ]
253 % 8
| notebooks/atari_guess_n_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
df = pd.read_csv('salmon_seabass.csv')
# both species are endcoded in the data set as follows:
# 0 ... salmon
# 1 ... sea bass
df.loc[(df.species == 0),'species_desc'] = 'Salmon'
df.loc[(df.species == 1),'species_desc'] = 'Sea Bass'
df
# +
data_to_plot = [df[df.species==0], df[df.species==1]]
plt.figure(figsize=(12, 8))
for i, data in enumerate(data_to_plot):
#print(data)
plt.scatter(x=data.lightness, y=data.width, c=['blue','gold'][i], s=data.width**1.1,label=['Salmon','Sea Bass'][i], alpha=0.8)
plt.title("Salmon/Sea Bass dataset", fontsize=18)
plt.xlabel('Lightness', fontsize=12)
plt.ylabel('Width', fontsize=12)
plt.legend()
plt.grid(True)
plt.show()
# +
df_salmon=df[df.species==0]
df_boss=df[df.species==1]
df_salmon=df_salmon[['lightness','width']]
df_salmon.columns=['salmon_lightness','salmon_width']
df_boss=df_boss[['lightness','width']]
df_boss.columns=['boss_lightness','boss_width']
#df_boss.reset_index(drop=True)
ax=df_salmon.plot.hist()
df_boss.plot.hist(ax=ax)
# -
| salmon_seabass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# ## Introduction
#
# Word2Vec is a popular algorithm used for generating dense vector representations of words in large corpora using unsupervised learning. The resulting vectors have been shown to capture semantic relationships between the corresponding words and are used extensively for many downstream natural language processing (NLP) tasks like sentiment analysis, named entity recognition and machine translation.
# SageMaker BlazingText which provides efficient implementations of Word2Vec on
#
# - single CPU instance
# - single instance with multiple GPUs - P2 or P3 instances
# - multiple CPU instances (Distributed training)
# In this notebook, we demonstrate how BlazingText can be used for distributed training of word2vec using multiple CPU instances.
# ## Setup
#
# Let's start by specifying:
#
# - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. If you don't specify a bucket, SageMaker SDK will create a default bucket following a pre-defined naming convention in the same region.
# - The IAM role ARN used to give SageMaker access to your data. It can be fetched using the **get_execution_role** method from sagemaker python SDK.
# + isConfigCell=true
import sagemaker
from sagemaker import get_execution_role
import boto3
import json
sess = sagemaker.Session()
role = get_execution_role()
print(role) # This is the role that SageMaker would use to leverage AWS resources (S3, CloudWatch) on your behalf
bucket = sess.default_bucket() # Replace with your own bucket name if needed
print(bucket)
prefix = 'sagemaker/DEMO-blazingtext-text8' #Replace with the prefix under which you want to store the data if needed
# -
# ### Data Ingestion
#
# Next, we download a dataset from the web on which we want to train the word vectors. BlazingText expects a single preprocessed text file with space separated tokens and each line of the file should contain a single sentence.
#
# In this example, let us train the vectors on [text8](http://mattmahoney.net/dc/textdata.html) dataset (100 MB), which is a small (already preprocessed) version of Wikipedia dump.
# !wget http://mattmahoney.net/dc/text8.zip -O text8.gz
# Uncompressing
# !gzip -d text8.gz -f
# After the data downloading and uncompressing is complete, we need to upload it to S3 so that it can be consumed by SageMaker to execute training jobs. We'll use Python SDK to upload these two files to the bucket and prefix location that we have set above.
# +
train_channel = prefix + '/train'
sess.upload_data(path='text8', bucket=bucket, key_prefix=train_channel)
s3_train_data = 's3://{}/{}'.format(bucket, train_channel)
# -
s3_train_data
# Next we need to setup an output location at S3, where the model artifact will be dumped. These artifacts are also the output of the algorithm's training job.
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
s3_output_location
# ## Training Setup
# Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job.
region_name = boto3.Session().region_name
region_name
container = sagemaker.amazon.amazon_estimator.get_image_uri(region_name, "blazingtext", "latest")
print('Using SageMaker BlazingText container: {} ({})'.format(container, region_name))
# ## Training the BlazingText model for generating word vectors
# Similar to the original implementation of [Word2Vec](https://arxiv.org/pdf/1301.3781.pdf), SageMaker BlazingText provides an efficient implementation of the continuous bag-of-words (CBOW) and skip-gram architectures using Negative Sampling, on CPUs and additionally on GPU[s]. The GPU implementation uses highly optimized CUDA kernels. To learn more, please refer to [*BlazingText: Scaling and Accelerating Word2Vec using Multiple GPUs*](https://dl.acm.org/citation.cfm?doid=3146347.3146354). BlazingText also supports learning of subword embeddings with CBOW and skip-gram modes. This enables BlazingText to generate vectors for out-of-vocabulary (OOV) words, as demonstrated in this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_word2vec_subwords_text8/blazingtext_word2vec_subwords_text8.ipynb).
#
#
#
# Besides skip-gram and CBOW, SageMaker BlazingText also supports the "Batch Skipgram" mode, which uses efficient mini-batching and matrix-matrix operations ([BLAS Level 3 routines](https://software.intel.com/en-us/mkl-developer-reference-fortran-blas-level-3-routines)). This mode enables distributed word2vec training across multiple CPU nodes, allowing almost linear scale up of word2vec computation to process hundreds of millions of words per second. Please refer to [*Parallelizing Word2Vec in Shared and Distributed Memory*](https://arxiv.org/pdf/1604.04661.pdf) to learn more.
# BlazingText also supports a *supervised* mode for text classification. It extends the FastText text classifier to leverage GPU acceleration using custom CUDA kernels. The model can be trained on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. For more information, please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html) or [the text classification notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/blazingtext_text_classification_dbpedia/blazingtext_text_classification_dbpedia.ipynb).
# To summarize, the following modes are supported by BlazingText on different types instances:
#
# | Modes | cbow (supports subwords training) | skipgram (supports subwords training) | batch_skipgram | supervised |
# |:----------------------: |:----: |:--------: |:--------------: | :--------------: |
# | Single CPU instance | ✔ | ✔ | ✔ | ✔ |
# | Single GPU instance | ✔ | ✔ | | ✔ (Instance with 1 GPU only) |
# | Multiple CPU instances | | | ✔ | | |
#
# Now, let's define the resource configuration and hyperparameters to train word vectors on *text8* dataset, using "batch_skipgram" mode on two c4.2xlarge instances.
#
bt_model = sagemaker.estimator.Estimator(container,
role,
train_instance_count=2,
train_instance_type='ml.c4.2xlarge',
train_volume_size = 5,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
# Please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext_hyperparameters.html) for the complete list of hyperparameters.
bt_model.set_hyperparameters(mode="skipgram",
epochs=5,
min_count=5,
sampling_threshold=0.0001,
learning_rate=0.05,
window_size=5,
vector_dim=10,
negative_samples=5,
subwords=True, # Enables learning of subword embeddings for OOV word vector generation
min_char=3, # min length of char ngrams
max_char=6, # max length of char ngrams
batch_size=11, # = (2*window_size + 1) (Preferred. Used only if mode is batch_skipgram)
evaluation=True)# Perform similarity evaluation on WS-353 dataset at the end of training
# Now that the hyper-parameters are setup, let us prepare the handshake between our data channels and the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes.
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
data_channels = {'train': train_data}
# We have our `Estimator` object, we have set the hyper-parameters for this object and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm. The following command will train the algorithm. Training the algorithm involves a few steps. Firstly, the instance that we requested while creating the `Estimator` classes is provisioned and is setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the training job begins. The provisioning and data downloading will take some time, depending on the size of the data. Therefore it might be a few minutes before we start getting training logs for our training jobs. The data logs will also print out `Spearman's Rho` on some pre-selected validation datasets after the training job has executed. This metric is a proxy for the quality of the algorithm.
#
# Once the job has finished a "Job complete" message will be printed. The trained model can be found in the S3 bucket that was setup as `output_path` in the estimator.
bt_model.fit(inputs=data_channels, logs=True)
# ## Hosting / Inference
# Once the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model. Note that we don't have to host on the same type of instance that we used to train. Because instance endpoints will be up and running for long, it's advisable to choose a cheaper instance for inference.
bt_endpoint = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge')
# ### Getting vector representations for words
# #### Use JSON format for inference
# The payload should contain a list of words with the key as "**instances**". BlazingText supports content-type `application/json`.
import pandas as pd
import numpy as np
df = pd.read_csv('<csv file name')
df.head()
# +
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
# +
import nltk, re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
# Let's get a list of stop words from the NLTK library
stop = stopwords.words('english')
# These words are important for our problem. We don't want to remove them.
additional_stopwords = ["a", "an", "the", "this", "that", "is", "it", "to", "and"]
stop.extend(additional_stopwords)
# New stop word list
#stop_words = [word for word in stop if word not in excluding]
# Initialize the lemmatizer
wl = WordNetLemmatizer()
# This is a helper function to map NTLK position tags
# Full list is available here: https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process_text_lemmitization(texts):
final_text_list=[]
for sent in texts:
filtered_sentence=[]
if not isinstance(sent,float):
sent = sent.lower() # Lowercase
sent = sent.strip() # Remove leading/trailing whitespace
sent = re.sub('\s+', ' ', sent) # Remove extra space and tabs
sent = re.compile('<.*?>').sub('', sent) # Remove HTML tags/markups:
for w in word_tokenize(sent):
# We are applying some custom filtering here, feel free to try different things
# Check if it is not numeric and its length>2 and not in stop words
if(not w.isnumeric()) and (len(w)>3) and (w not in stop):
# Stem and add to filtered list
filtered_sentence.append(w)
lemmatized_sentence = []
# Get position tags
word_pos_tags = nltk.pos_tag(filtered_sentence)
# Map the position tag and lemmatize the word/token
for idx, tag in enumerate(word_pos_tags):
lemmatized_sentence.append(wl.lemmatize(tag[0], get_wordnet_pos(tag[1])))
lemmatized_text = " ".join(lemmatized_sentence)
final_text_list.append(lemmatized_text)
return final_text_list
# -
df_processed = process_text_lemmitization(df['sentence'])
df_processed
def get_max_word_count(sent_list):
word_count_list = []
for sent in sent_list:
sent_words = word_tokenize(sent)
word_count = len(sent_words)
word_count_list.append(word_count)
return max(word_count_list)
max_word_count = get_max_word_count(df_processed)
max_word_count
max_columns = max_word_count*10
max_columns
def sentence_to_vec2(response):
sentence_vec = []
test_array = np.zeros(max_columns)
for vec in response:
sentence_vec.extend(vec['vector'])
sent_array = np.array(sentence_vec)
test_array[0:sent_array.shape[0]] = sent_array
return test_array
def process_sent_to_vec(sent_list):
sent_list_vecs = []
#print(sent_list)
for sent in sent_list:
#print(sent)
sent_words = word_tokenize(sent)
payload = {"instances" : sent_words}
#print(sent_words)
response = bt_endpoint.predict(json.dumps(payload))
vecs = json.loads(response)
sent_vectors = sentence_to_vec2(vecs)
sent_list_vecs.append(sent_vectors)
return sent_list_vecs
test_vec = process_sent_to_vec(df_processed)
test_vec_array = np.array(test_vec)
test_vec_array.shape
test_vec_array
train_data = test_vec_array.astype('float32')
np.savetxt("kmeans_train_data.csv", train_data[0:100], delimiter=",")
# +
from sagemaker import KMeans
num_clusters = 5
kmeans = KMeans(role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path='s3://'+ bucket +'/sentence-similarity/',
k=num_clusters)
# -
# %%time
kmeans.fit(kmeans.record_set(train_data))
# +
test_channel = prefix + '/batch'
sess.upload_data(path='kmeans_train_data.csv', bucket=bucket, key_prefix=test_channel)
# +
# %%time
kmeans_transformer = kmeans.transformer(1, 'ml.m4.xlarge')
# start a transform job
batch_file = 'kmeans_train_data.csv'
input_location = 's3://{}/{}/batch/{}'.format(bucket, prefix, batch_file) # use input data without ID column
kmeans_transformer.transform(input_location, split_type='Line')
kmeans_transformer.wait()
# +
import json
import io
from urllib.parse import urlparse
def get_csv_output_from_s3(s3uri, file_name):
parsed_url = urlparse(s3uri)
bucket_name = parsed_url.netloc
prefix = parsed_url.path[1:]
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, '{}/{}'.format(prefix, file_name))
return obj.get()["Body"].read().decode('utf-8')
# -
output = get_csv_output_from_s3(kmeans_transformer.output_path, '{}.out'.format(batch_file))
output_df = pd.read_csv(io.StringIO(output), sep=",", header=None)
output_df.head(8)
# %%time
kmeans_predictor = kmeans.deploy(initial_instance_count=1,
instance_type='ml.t2.medium')
# %%time
result_kmeans=kmeans_predictor.predict(train_data[0:990])
result_kmeans
cluster_labels = [r.label['closest_cluster'].float32_tensor.values[0] for r in result_kmeans]
cluster_labels
df_results = pd.DataFrame(columns=['student_response'])
df_results['student_response'] = df_processed[0:990]
df_results['cluster'] = cluster_labels
df_results.head()
df_results.to_csv('results_word2vec_sm.csv',index=False)
pd.DataFrame(cluster_labels)[0].value_counts()
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
matplotlib.style.use('ggplot')
ax=plt.subplots(figsize=(6,3))
ax=sns.distplot(cluster_labels, kde=False)
title="Histogram of Cluster Counts"
ax.set_title(title, fontsize=12)
plt.show()
# ### Evaluation
# Let us now download the word vectors learned by our model and visualize them using a [t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) plot.
# +
s3 = boto3.resource('s3')
key = bt_model.model_data[bt_model.model_data.find("/", 5)+1:]
s3.Bucket(bucket).download_file(key, 'model.tar.gz')
# -
# Uncompress `model.tar.gz` to get `vectors.txt`
# !tar -xvzf model.tar.gz
# If you set "evaluation" as "true" in the hyperparameters, then "eval.json" will be there in the model artifacts.
#
# The quality of trained model is evaluated on word similarity task. We use [WS-353](http://alfonseca.org/eng/research/wordsim353.html), which is one of the most popular test datasets used for this purpose. It contains word pairs together with human-assigned similarity judgments.
#
# The word representations are evaluated by ranking the pairs according to their cosine similarities, and measuring the Spearmans rank correlation coefficient with the human judgments.
#
# Let's look at the evaluation scores which are there in eval.json. For embeddings trained on the text8 dataset, scores above 0.65 are pretty good.
# !cat eval.json
# Now, let us do a 2D visualization of the word vectors
# +
import numpy as np
from sklearn.preprocessing import normalize
# Read the 400 most frequent word vectors. The vectors in the file are in descending order of frequency.
num_points = 400
first_line = True
index_to_word = []
with open("vectors.txt","r") as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num-1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
# +
from sklearn.manifold import TSNE
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=10000)
two_d_embeddings = tsne.fit_transform(word_vecs[:num_points])
labels = index_to_word[:num_points]
# +
from matplotlib import pylab
# %matplotlib inline
def plot(embeddings, labels):
pylab.figure(figsize=(20,20))
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
plot(two_d_embeddings, labels)
# -
# Running the code above might generate a plot like the one below. t-SNE and Word2Vec are stochastic, so although when you run the code the plot won’t look exactly like this, you can still see clusters of similar words such as below where 'british', 'american', 'french', 'english' are near the bottom-left, and 'military', 'army' and 'forces' are all together near the bottom.
# 
# ### Stop / Close the Endpoint (Optional)
# Finally, we should delete the endpoint before we close the notebook.
sess.delete_endpoint(bt_endpoint.endpoint)
| code/SageMaker-word2vec-kmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project info - unsupervised learning with KMeans
# ## Description
#
# The Scitkit-learn module 'datasets' includes the 20 News Groups dataset, which is a text dataset including roughly 18,000 articles on 20 different topics.
#
# The dataset includes labels for each article, but can also be analyzed in an unsupervised fashion.
# # Import modules and tools
# +
# standard libary and settings
import os
import sys
import importlib
import itertools
import csv
import ast
from timeit import default_timer as timer
global ITERATION
import time
from functools import reduce
rundate = time.strftime("%Y%m%d")
import warnings
warnings.simplefilter("ignore")
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# data extensions and settings
import numpy as np
np.set_printoptions(threshold=np.inf, suppress=True)
import pandas as pd
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.options.display.float_format = "{:,.6f}".format
# modeling extensions
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from sklearn.datasets import load_boston, load_wine, load_iris, load_breast_cancer, make_blobs, make_moons
from sklearn.decomposition import PCA, LatentDirichletAllocation
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier, IsolationForest
from sklearn.feature_extraction.text import CounterVectorizer, TfidfTransformer, TfidfVectorizer, HashingVectorizer
from sklearn.feature_selection import f_classif, f_regression, VarianceThreshold, SelectFromModel, SelectKBest
import sklearn.gaussian_process as gaussian_process
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics import precision_score, recall_score, f1_score, explained_variance_score, mean_squared_log_error, mean_absolute_error, median_absolute_error, mean_squared_error, r2_score, confusion_matrix, roc_curve, accuracy_score, roc_auc_score, homogeneity_score, completeness_score, classification_report, silhouette_samples
from sklearn.model_selection import KFold, train_test_split, GridSearchCV, StratifiedKFold, cross_val_score, RandomizedSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler, RobustScaler, PolynomialFeatures, OrdinalEncoder, LabelEncoder, OneHotEncoder, KBinsDiscretizer, QuantileTransformer, PowerTransformer, MinMaxScaler
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
import sklearn.utils as utils
import eif as iso
from scipy import stats, special
from xgboost import XGBClassifier, XGBRegressor
from lightgbm import LGBMClassifier, LGBMRegressor
import catboost
from hyperopt import hp, tpe, Trials, fmin, STATUS_OK
from hyperopt.pyll.stochastic import sample
# visualization extensions and settings
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# custom extensions and settings
sys.path.append("/home/mlmachine") if "/home/mlmachine" not in sys.path else None
sys.path.append("/home/prettierplot") if "/home/prettierplot" not in sys.path else None
import mlmachine as mlm
from prettierplot.plotter import PrettierPlot
import prettierplot.style as style
# -
# # Load, clean, inspect data
# +
# Load data from scikit-learn
X = datasets.fetch_20newsgroups(
subset="all",
remove=("headers", "footers", "quotes"),
categories=[
"talk.politics.guns",
"rec.sport.hockey",
"comp.graphics",
"sci.space",
"rec.motorcycles",
],
)
# Dataset dimensions
print("Dataset dimensions: {0}".format(X.filenames.shape))
# -
np.unique(X.target, return_counts=True)
# +
# Review article categories
Labels = X.target_names
Labels
# +
# Sample from train data
X.data[0]
# +
# Corresponding label
ix = X.target[0]
Labels[ix]
# -
# ## Convert text to vectors
# +
# Build a bag of words model and use term frequency - inverse document frequency
# to understand how common or uncommon each word that appears in each document
# is relative to the rest of documents in the corpus
tfidf = TfidfVectorizer(
max_df=0.5,
stop_words="english",
ngram_range=(1, 1),
lowercase=True,
strip_accents="unicode",
)
tfidfPipe = Pipeline([("vec", tfidf)])
vectorized = tfidfPipe.fit_transform(X.data)
vectorized.shape
# +
# Capture all unique words
vec = tfidfPipe.named_steps["vec"]
features = vec.get_feature_names()
# -
# __Document-specific word importances__
# +
# Functions to determine word importance
def top_tfidf_feats(row, features, top_n=25):
"""
Get top n tfidf values in row and return them
with their corresponding feature names.
"""
topn_ids = np.argsort(row)[::-1][:top_n]
top_feats = [(features[i], row[i]) for i in topn_ids]
df = pd.DataFrame(top_feats)
df.columns = ["feature", "tfidf"]
return df
def top_feats_in_doc(Xtr, features, row_id, top_n=25):
"""
Top tfidf features in specific document (matrix row)
"""
row = np.squeeze(Xtr[row_id].toarray())
return top_tfidf_feats(row, features, top_n)
# -
# ### Evaluate single article and word importances
# +
# Sample from training data
X.data[1]
# +
# Corresponding label for that training sample
ix = X.target[1]
Labels[ix]
# +
# Print words based on highest word importance values, within a single document
tfidfImp = top_feats_in_doc(vectorized, features, row_id=1, top_n=10)
tfidfImp
# -
# __Corpus-wide word importances__
# +
# The function is used for identifying word importances, across entire corpus
def top_mean_feats(Xtr, features, grp_ids=None, min_tfidf=0.1, top_n=25):
"""
Return the top n features that on average are most important amongst
documents in rows indentified by indices in grp_ids.
"""
if grp_ids:
D = Xtr[grp_ids].toarray()
else:
D = Xtr.toarray()
D[D < min_tfidf] = 0
tfidf_means = np.mean(D, axis=0)
return top_tfidf_feats(tfidf_means, features, top_n)
# +
# Print words based on highest word importance values, within the entire corpus
top_mean_feats(vectorized, features, grp_ids=None, min_tfidf=0.3, top_n=10)
# -
# __Category-specific word importances__
# +
# The function is used for identifying word importances, within each document category
def top_feats_by_class(Xtr, y, features, min_tfidf=0.1, top_n=25):
"""
Return a list of dfs, where each df holds top_n features and
their mean tfidf value calculated across documents with the
same class label.
"""
dfs = []
labels = np.unique(y)
for label in labels:
ids = np.where(y == label)
feats_df = top_mean_feats(Xtr, features, ids, min_tfidf=min_tfidf, top_n=top_n)
feats_df.label = label
dfs.append(feats_df)
return dfs
# +
# Print words based on highest word importance values, within each class of documents
# In this case we're looking at atheism
dfs = top_feats_by_class(vectorized, X.target, features, min_tfidf=0.3, top_n=10)
# +
# Review top words by importance for a specific class
topicIx = 1
print("Top features within category type {0} \n".format(Labels[topicIx]))
display(dfs[topicIx])
# -
# # Cluster analysis
#
# This section executes cluster analysis, an unsupervised learning technique, on the documents. It groups individual documents with other document that are determined by the algorithm to be similar. In this model, we will use KMeans to find K different clusters. In this case, we will use k = 20, because we know ther are 20 different categories. We can then compare the documents and their cluster labels to the actual labels to see how well KMeans performed its unsupervised learning task.
def grid_search(data, params):
tfidf = TfidfVectorizer(
stop_words="english", lowercase=True, strip_accents="unicode"
)
lr_tfidf = Pipeline(
[
("vect", tfidf),
(
"clf",
KMeans(
init="k-means++", n_clusters=5, random_state=0, verbose=0
),
),
]
)
gsTfIdf = GridSearchCV(lr_tfidf, params, verbose=1, refit=True)
gsTfIdf.fit(data)
print()
print("Best score: %0.3f" % gsTfIdf.best_score_)
print("Best parameters set:")
best_parameters = gsTfIdf.best_estimator_.get_params()
for param_name in sorted(params.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return gsTfIdf
# +
param_grid = {
"vect__ngram_range": [(1, 1)],
"vect__use_idf": (True, False),
"vect__max_df": np.linspace(0.25, 0.75, 4),
"vect__max_features": np.arange(5000, 14001, 1000),
"vect__norm": ["l1", "l2", None],
}
gsTfIdf = grid_search(X.data, param_grid)
# +
# Replicate best model as chosen by GridSearchCV
tfidf = TfidfVectorizer(
max_df=0.25,
max_features=14000,
norm="l1",
stop_words="english",
ngram_range=(1, 1),
lowercase=True,
strip_accents="unicode",
use_idf=False,
)
X_trainVec = tfidf.fit_transform(X.data)
# -
distortions = []
for i in range(1, 7):
km = KMeans(
n_clusters=i, init="k-means++", n_init=3, max_iter=100, random_state=0
)
km.fit(X_trainVec)
distortions.append(km.inertia_)
distortions
plt.figure(figsize=(12, 12))
plt.plot(np.arange(1, len(distortions) + 1), distortions)
plt.xlabel("# of clusters")
plt.ylabel("Distortion")
# +
#
kmlabels = grid_search.best_estimator_.fit_predict(X.data)
# -
print("Homogeneity: %0.3f" % homogeneity_score(X.target, km.labels_))
print("Completeness: %0.3f" % completeness_score(X.target, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(X.target, km.labels_))
| projects/20NewsGroups/kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: wPCC_pipeline
# language: python
# name: python3
# ---
# ## Inverse dynamics
# + tags=["remove_cell"]
# # %load imports.py
# %load_ext autoreload
# %autoreload 2
# %config Completer.use_jedi = False ## (To fix autocomplete)
import pandas as pd
from src.models.vmm import ModelSimulator
import matplotlib.pyplot as plt
import matplotlib
plt.style.use('presentation')
matplotlib.rcParams["xtick.labelsize"] = 16
from src.visualization.plot import track_plots, plot, captive_plot
import kedro
import numpy as np
import os.path
import anyconfig
from myst_nb import glue
from src.symbols import *
import src.symbols as symbols
from src.system_equations import *
from IPython.display import display, Math, Latex, Markdown
from sympy.physics.vector.printing import vpprint, vlatex
from src.models.regression import MotionRegression
from src.parameters import df_parameters
from src.substitute_dynamic_symbols import run
from src.models.diff_eq_to_matrix import DiffEqToMatrix
p = df_parameters["symbol"]
import statsmodels.api as sm
# Read configs:
conf_path = os.path.join("../../conf/base/")
runs_globals_path = os.path.join(
conf_path,
"runs_globals.yml",
)
runs_globals = anyconfig.load(runs_globals_path)
model_test_ids = runs_globals["model_test_ids"]
join_globals_path = os.path.join(
conf_path,
"join_globals.yml",
)
joins = runs_globals["joins"]
join_runs_dict = anyconfig.load(join_globals_path)
globals_path = os.path.join(
conf_path,
"globals.yml",
)
global_variables = anyconfig.load(globals_path)
vmm_names = global_variables["vmms"]
only_joined = global_variables[
"only_joined"
] # (regress/predict with only models from joined runs)S
vmms = {}
for vmm_name in vmm_names:
vmms[vmm_name] = catalog.load(vmm_name)
# + tags=["remove_cell"]
# %reload_kedro
vmm_name = 'vmm_martins_simple'
vmm = vmms[vmm_name]
data = pd.read_csv('example.csv', index_col=0)
added_masses = catalog.load("added_masses")
model = catalog.load(f"{ vmm_name}.motion_regression.joined.model")
initial_parameters = catalog.load("initial_parameters")
model.parameters=initial_parameters
regression = MotionRegression(
vmm=vmm,
data=data,
added_masses=added_masses,
prime_system=model.prime_system,
ship_parameters=model.ship_parameters,
#exclude_parameters={"Xthrust": 1.0, "Ydelta": 1},
)
# + tags=["remove_input"]
eq_system
# + tags=["remove_input"]
solution = sp.solve(eq_system.doit(),X_D,Y_D,N_D, dict=True)[0]
eq_XD = sp.Eq(X_D, solution[X_D])
eq_YD = sp.Eq(Y_D, solution[Y_D])
eq_ND = sp.Eq(N_D, solution[N_D])
display(eq_XD)
display(eq_YD)
display(eq_ND)
# + tags=["remove_input"]
display(vmm.X_qs_eq)
display(vmm.Y_qs_eq)
display(vmm.N_qs_eq)
# + tags=["remove_cell"]
subs = [(value, key ) for key,value in p.items()]
subs.append((u1d,'u1d'))
subs.append((v1d,'v1d'))
subs.append((r1d,'r1d'))
eq = eq_XD.subs(subs)
lambda_X_D = sp.lambdify(list(eq.rhs.free_symbols), eq.rhs)
eq = eq_YD.subs(subs)
lambda_Y_D = sp.lambdify(list(eq.rhs.free_symbols), eq.rhs)
eq = eq_ND.subs(subs)
lambda_N_D = sp.lambdify(list(eq.rhs.free_symbols), eq.rhs)
# + tags=["remove_cell"]
df_captive = data.copy()
df_captive_prime = model.prime_system.prime(df_captive, U=data['U'])
df_captive_prime['fx'] = run(lambda_X_D,
inputs=df_captive_prime,
**model.ship_parameters_prime,
**added_masses)
df_captive_prime['fy'] = run(lambda_Y_D,
inputs=df_captive_prime,
**model.ship_parameters_prime,
**added_masses)
df_captive_prime['mz'] = run(lambda_N_D,
inputs=df_captive_prime,
**model.ship_parameters_prime,
**added_masses)
# + tags=["remove_cell"]
Y_D_ = sp.symbols('Y_D')
eq = vmm.Y_qs_eq.subs(Y_D,Y_D_)
diff_eq_Y = DiffEqToMatrix(eq, label=Y_D_, base_features=[u,v,r,delta,thrust])
X_Y,y_Y = diff_eq_Y.calculate_features_and_label(data=df_captive_prime, y=df_captive_prime['fy'])
model_Y = sm.OLS(y_Y, X_Y)
result_Y = model_Y.fit()
# + tags=["remove_cell"]
N_D_ = sp.symbols('N_D')
eq = vmm.N_qs_eq.subs(N_D,N_D_)
diff_eq_N = DiffEqToMatrix(eq, label=N_D_, base_features=[u,v,r,delta,thrust])
X_N,y_N = diff_eq_N.calculate_features_and_label(data=df_captive_prime, y=df_captive_prime['mz'])
model_N = sm.OLS(y_N, X_N)
result_N = model_N.fit()
# + tags=["remove_cell"]
X_D_ = sp.symbols('X_D')
eq = vmm.X_qs_eq.subs(X_D,X_D_)
diff_eq_X = DiffEqToMatrix(eq, label=X_D_, base_features=[u,v,r,delta,thrust], exclude_parameters={'Xthrust':model.parameters['Xthrust']})
X_X,y_X = diff_eq_X.calculate_features_and_label(data=df_captive_prime, y=df_captive_prime['fx'])
model_X = sm.OLS(y_X, X_X)
result_X = model_X.fit()
# + tags=["remove_cell"]
df_parameters_X = pd.DataFrame(pd.Series({key:value for key,value in model.parameters.items() if key[0]=='X' and value !=0}, name='real'))
df_parameters_X['regression'] = result_X.params
df_parameters_X.dropna(inplace=True)
df_parameters_X.index = p[df_parameters_X.index].apply(lambda x: "$%s$" % str(x).replace('delta',r'\delta'))
df_parameters_X.index.name = ''
df_parameters_Y = pd.DataFrame(pd.Series({key:value for key,value in model.parameters.items() if key[0]=='Y' and value !=0}, name='real'))
df_parameters_Y['regression'] = result_Y.params
df_parameters_Y.dropna(inplace=True)
df_parameters_Y.index = p[df_parameters_Y.index].apply(lambda x: "$%s$" % str(x).replace('delta',r'\delta').replace('thrust','T'))
df_parameters_Y.index.name = ''
df_parameters_N = pd.DataFrame(pd.Series({key:value for key,value in model.parameters.items() if key[0]=='N' and value !=0}, name='real'))
df_parameters_N['regression'] = result_N.params
df_parameters_N.dropna(inplace=True)
df_parameters_N.index = p[df_parameters_N.index].apply(lambda x: "$%s$" % str(x).replace('delta',r'\delta').replace('thrust','T'))
df_parameters_N.index.name = ''
# + tags=["remove_input"]
fig,axes=plt.subplots(ncols=3)
ax=axes[0]
df_parameters_X.plot.bar(ax=ax)
ax=axes[1]
df_parameters_Y.plot.bar(ax=ax)
ax.get_legend().set_visible(False)
ax=axes[2]
df_parameters_N.plot.bar(ax=ax)
plt.tight_layout()
ax.get_legend().set_visible(False)
# -
| _build/jupyter_execute/03.01_inverse_dynamics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# imports libraries
import pickle # import/export lists
import re # regular expression
import math # mathematical functions
import pandas as pd # dataframes
# opens raw data
with open ('data_profile', 'rb') as fp:
data_profile = pickle.load(fp)
# converts to dataframe
df = pd.DataFrame(data_profile)
df.columns = ['id', 'desc', 'country', 'join_date', 'tabs']
# splits tabs into individual columns
tabs = df['tabs'].apply(pd.Series).fillna('0')
tabs = tabs.apply(pd.to_numeric)
tabs.columns = [name[1:] for name in tabs.columns]
df = df.join(tabs)
del df['tabs']
# parses description column
df['status'] = 'inactive'
df.loc[['reader' in row for row in df['desc']], 'status'] = 'reader'
df.loc[['author' in row for row in df['desc']], 'status'] = 'author'
del df['desc']
# +
# parses date column
df['join'] = [re.split(r'[-/]+', row) for row in df['join_date']]
valid = [row[0] != 'NA' for row in df['join']]
df['join_month'] = 'NA'
df.loc[valid, 'join_month'] = [row[0] for row in df['join'][valid]]
df['join_year'] = 'NA'
df.loc[valid, 'join_year'] = [row[2] for row in df['join'][valid]]
old_time = [len(row) == 4 for row in df.join_year]
df.loc[old_time, 'join_year'] = [row[2:] for row in df.loc[old_time, 'join_year']]
del df['join']
# -
# saves dataframe
df.to_pickle("df_profile")
| .ipynb_checkpoints/profile_clean-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rodrigowe1988/Data-Science-na-Pratica/blob/main/Valores_Ausentes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vvplGyW2N3rX"
# # Valores Ausentes
#
# Se você está lidando com bases de dados do mundo real, pode ter certeza que estará lidando com dados incompletos ou valores ausentes.
#
# Muitas vezes esses dados são inseridos por pessoas, manualmente. Há casos em que você já tinha séries históricas de anos, e em determinado momento alguém resolveu adicionar uma nova coluna.
#
# Existem inúmeras técnicas para lidar com esse tipo de problema, e nesta aula vamos falar de duas das principais abordagens.
#
# Antes, vamos importar os dados do Projeto +BIKE.
#
# + id="pGmRLamzA6pv" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="946a7067-b650-4905-99bb-ada7237f6848"
DATA_PATH = "http://dl.dropboxusercontent.com/s/yyfeoxqw61o3iel/df_rides.csv"
# importar os pacotes necessários
import pandas as pd
# importar o dataset
df = pd.read_csv(DATA_PATH)
# ver as primeiras entradas
df.head()
# + [markdown] id="vvrOjE6UBJMl"
# ## Identificando os valores ausentes
#
# Já vimos anteriormente como identificar valores do tipo `NaN`. É indispensável identificar a quantidade de valores ausentes e qual a representatividade dessa quantidade frente ao total de entradas.
# + id="VRcP5l-7Bgao" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="46dabba7-545d-4325-d4ba-8aa20a809247"
# ver a quantidade de valores ausentes
df.isnull().sum()
# + id="nQvK0IuSBjHR" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="ac387c2e-e2a4-4013-bdef-b760eba6d8c0"
# ver a porcentagem de valores ausentes
df.isnull().sum() / df.shape[0]
# + [markdown] id="iy6GSiD7BsOr"
# ## Excluir valores ausentes
#
# Está é uma primeira opção quando você está lidando com valores nulos do tipo `NaN` no seu *DataFrame*.
#
# Entretanto, ela tende a não ser ideal, pois por causa do valor de uma única célula, você elimina os dados existentes em outras colunas. Esta opção deve ser considerada no caso da quantidade de dados nulos serem pequenos a ponto de não ter representatividade no dataset
#
# Eu consigo excluir tanto linhas com valores ausentes quanto colunas inteiras. Para que o Pandas saiba se a sua inteção é de eliminar linhas (`axis=0`) ou colunas (`axis=1`), você deve informar dentro do método `dropna()`.
# + id="i6RlbEz2DNj3" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="b525ea21-a9df-4b3d-cf0e-8d8d7c452133"
# eliminar todas as entradas onde existam valores ausentes em `user_gender`
df_clean = df.dropna(subset=['user_gender'], axis=0)
# comparar o antes e o depois
print("Antes:\t{}".format(df.shape))
print("Depois:\t{}".format(df_clean.shape))
# + [markdown] id="dAh5qCOaENmL"
# ## Preencher valores
#
# Esta normalmente é a melhor opção, pois permite que você mantenha dados existentes em outras células.
#
# Uma pergunta que normalmente surge é: "mas eu substituo o valor ausente por qual valor?". A resposta para essa pergunta é: depende.
#
# Existem técnicas simples como usar valor mais frequente, media e mediana, assim como há técnicas mais avançadas que envolvem até mesmo o uso de modelos de *machine learning* cuja função é dizer qual valor usar nesses campos.
#
# Vou mostrar como você pode usar a mediana para preencher os campos ausentes da coluna `ride_duration`, com o uso da função `fillna()`
# + id="1LiewAIO7CJO" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="242c1cc3-e976-4506-e6a8-350b7be6229e"
# antes
df_clean.isnull().sum()
# + id="_MuQkDcPFM9R" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="6134c7f0-46ee-41be-a728-c659aebf79a8"
# preencher valores ausentes em `ride_duration` com a mediana
rd_median = df_clean.ride_duration.median()
df_clean = df_clean.fillna({"ride_duration": rd_median})
# ver valores ausentes
df_clean.isnull().sum()
# + [markdown] id="TPAgjNPiFt5W"
# No caso da coluna `user_gender`, temos uma variável categórica. O ideal aqui é usar o valor mais frequente para o preenchimento dos valores. Valor ver qual aquele que tem maior recorrência e substituir diretamente na coluna.
# + id="J_1tGxn07S1S" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="43ebe3cd-45de-400b-9b72-0c8a1f15fb12"
# copiar novamente
df_clean = df.copy()
# ver valores ausentes
df_clean.isnull().sum()
# + id="amgKJvvlF9ea" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="85484db1-4c32-481f-9fee-b231cb37edb7"
# ver o valor mais frequente
df_clean.user_gender.value_counts()
# + id="eFTYwbyrGPFA" colab={"base_uri": "https://localhost:8080/", "height": 215} outputId="9d1dac34-2a7e-474d-f6aa-711a605fa259"
# preencher os valores ausentes de user_gender com 'M'
df_clean = df_clean.fillna({"user_gender": 'M'})
# ver valores ausentes
df_clean.isnull().sum()
| Valores_Ausentes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Original CapsNet Model Train
#
# In this notebook we provide a simple interface to train the original CapsNet model described in "Dynamic routinig between capsules". The model is copycat of the original Sara's repository (https://github.com/Sarasra/models/tree/master/research/capsules). <br>
# However, if you really reach 99.75, you've got to buy me a drink :)
# %load_ext autoreload
# %autoreload 2
import tensorflow as tf
from utils import Dataset, plotImages, plotWrongImages
from models import CapsNet
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
# some parameters
model_name = 'MNIST' # only MNIST is available
n_routing = 3
# # 1.0 Import the Dataset
dataset = Dataset(model_name, config_path='config.json') # only MNIST
# ## 1.1 Visualize imported dataset
n_images = 20 # number of images to be plotted
plotImages(dataset.X_test[:n_images,...,0], dataset.y_test[:n_images], n_images, dataset.class_names)
# # 2.0 Load the Model
model_train = CapsNet(model_name, mode='train', verbose=True, n_routing=n_routing)
# # 3.0 Train the Model
history = model_train.train(dataset, initial_epoch=0)
| original_capsnet_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#BrushFrie
import collections
import numpy as np
from ipynb.fs.full.hw2_script import *
def BrushFire(grid):
temp = np.copy(grid)
q = collections.deque()
m, n = len(temp), len(temp[0])
### [i,j,c] = [x, y, distance]
for i in range(m):
for j in range(n):
if temp[i][j] < 0:
continue
if i == 0 or j == 0 or i == m-1 or j == n-1:
temp[i][j] = 1
for i in range(m):
for j in range(n):
if temp[i][j] < 0:
q.append([i,j,0])
if temp[i][j] == 1:
q.append([i,j,1])
### BFS
direciton = [[1,0],[0,1],[-1,0],[0,-1],[1,1],[-1,1],[-1,-1],[1,-1]]
while q:
x, y, dist = q.popleft()
for dx, dy in direciton:
i = x + dx
j = y + dy
if i < 0 or i >= m or j < 0 or j >= n:
continue
if temp[i][j] < 0:
continue
if temp[i][j] != 0 and temp[i][j] < dist + 1:
continue
if temp[i][j] == 0 or temp[i][j] > dist + 1:
temp[i][j] = dist + 1
q.append([i,j,dist + 1])
return temp
Map,x , y = generate_world_1()
plot_GVD(Map)
Map1BF = BrushFire(Map)
plot_GVD(Map1BF)
Map2,x , y = generate_world_2()
plot_GVD(Map2)
Map2BF = BrushFire(Map2)
plot_GVD(Map2BF)
Map3, x , y = generate_world_3()
plot_GVD(Map3)
Map3BF = BrushFire(Map3)
plot_GVD(Map3BF)
Map4, x , y = generate_world_4()
plot_GVD(Map4)
Map4BF = BrushFire(Map4)
plot_GVD(Map4BF)
# +
### GVD
def GVDMAP(grid):
BF = BrushFire(grid)
temp = np.copy(grid)
q = collections.deque()
m, n = len(temp), len(temp[0])
### [i,j,c] = [x, y, distance]
for i in range(m):
for j in range(n):
if temp[i][j] < 0:
continue
if i == 0 or j == 0 or i == m-1 or j == n-1:
temp[i][j] = 1
for i in range(m):
for j in range(n):
if temp[i][j] < 0:
q.append([i,j,0, temp[i][j]])
continue
if temp[i][j] == 1:
if i == 0 and j == 0:
q.append([i,j,1, -4])
elif i == m-1 and j == n-1:
q.append([i,j,1, -5])
elif i == 0:
q.append([i,j,1, -4])
elif j == 0:
q.append([i,j,1, -7])
elif j == n-1:
q.append([i,j,1, -5])
elif i == m-1:
q.append([i,j,1, -6])
### BFS
GVD_LIST = set()
direciton = [[1,0],[0,1],[-1,0],[0,-1],[1,1],[-1,1],[-1,-1],[1,-1]]
pointer = collections.defaultdict(set)
while q:
x, y, dist, ID = q.popleft()
for dx, dy in direciton:
i = x + dx
j = y + dy
if i < 0 or i >= m or j < 0 or j >= n:
continue
if temp[i][j] < 0:
continue
# if temp[i][j] != 0 and temp[i][j] < dist + 1:
# continue
if temp[i][j] == 0 or temp[i][j] > dist + 1:
if (i,j) in GVD_LIST:
GVD_LIST.remove((i,j))
temp[i][j] = dist + 1
q.append([i,j,dist + 1, ID])
pointer[(i,j)].add(ID)
if temp[i][j] != 0 and (temp[i][j] == dist + 1 or abs(temp[i][j] - dist - 1) == 1) and len(pointer[(i,j)]) > 0 and ID not in pointer[(i,j)]:
GVD_LIST.add((i,j))
pointer[(i,j)].add(ID)
return BF, GVD_LIST
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
def plotGVD(grid):
MapBF, GVD_SET = GVDMAP(grid)
plot_GVD(MapBF)
GVD_LIST = np.array(list(GVD_SET))
GVD_x, GVD_y = zip(*GVD_LIST)
GVD_grid = np.copy(MapBF)
GVD_grid[GVD_x,GVD_y] = 20
fig, ax = plt.subplots()
img1 = ax.imshow(GVD_grid, cmap="RdBu", alpha=0.6)
obstacles = GVD_grid.copy()
obstacles[obstacles < 0] = -2.0
masked_data = np.ma.masked_where(obstacles > 0, obstacles)
legend_elements = [Patch(facecolor='blue', label='Obstacle')]
legend_elements.append(Patch(facecolor='#83b1d3', label='GVD'))
img2 = ax.imshow(masked_data, cmap="bwr")
ax.legend(handles=legend_elements)
plt.show()
return list(GVD_SET)
# -
Map,x , y = generate_world_1()
PATH = plotGVD(Map)
Map2,x , y = generate_world_2()
PATH = plotGVD(Map2)
Map3, x , y = generate_world_3()
PATH = plotGVD(Map3)
def plotPATH(grid, start, end):
MapBF, GVD_SET = GVDMAP(grid)
plot_GVD(MapBF)
GVD_LIST = np.array(list(GVD_SET))
GVD_x, GVD_y = zip(*GVD_LIST)
GVD_grid = np.copy(MapBF)
GVD_grid[GVD_x,GVD_y] = 20
A = tuple(start)
B = tuple(end)
AtoGVD = path_to_GVD(MapBF, list(GVD_SET), start)
BtoGVD = path_to_GVD(MapBF, list(GVD_SET), end)
s = AtoGVD[-1]
BtoGVD.reverse()
e = BtoGVD[0]
Route = GVD_path(MapBF, list(GVD_SET), s, e)
Route = AtoGVD + Route + BtoGVD
fig, ax = plt.subplots()
img1 = ax.imshow(GVD_grid, cmap="RdBu", alpha=0.6)
obstacles = GVD_grid.copy()
obstacles[obstacles < 0] = -2.0
masked_data = np.ma.masked_where(obstacles > 0, obstacles)
legend_elements = [Patch(facecolor='blue', label='Obstacle')]
legend_elements.append(Patch(facecolor='#83b1d3', label='GVD'))
img2 = ax.imshow(masked_data, cmap="bwr")
path_x, path_y = zip(*Route)
GVD_grid[path_x,path_y] = 40.0
grid_path = GVD_grid.copy()
grid_path = np.ma.masked_where(grid_path != 40.0, grid_path)
img3 = ax.imshow(grid_path, cmap="cool_r", interpolation="nearest")
legend_elements.append(Patch(facecolor='#e741f6', label='path'))
ax.legend(handles=legend_elements)
plt.show()
start = [20,3]
end = [95,75]
Map4, x , y = generate_world_4()
plotPATH(Map4, start, end)
Map3, x , y = generate_world_3()
plotPATH(Map3, start, end)
Map2, x , y = generate_world_2()
plotPATH(Map2, start, end)
Map, x , y = generate_world_1()
plotPATH(Map, start, end)
| CAofR/HW2/Hw2_Q5_GVD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
import sklearn
assert sklearn.__version__ >= "0.23.1"
import numpy as np
assert np.__version__ >= "1.19.1"
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
import seaborn as sns
assert sns.__version__ >= "0.10.1"
# -
# # Load data
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import FunctionTransformer, PowerTransformer, OneHotEncoder, OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn import set_config
set_config(display='diagram')
# +
import pandas as pd
assert pd.__version__ >= "1.1.0"
# Model will be trained using this data.
def load_training_data():
data = pd.read_csv('../data/titanic/train.csv')
return data
# This data will only be used when submitting the final analysis.
# It also doesn't contain the Survived column.
def load_testing_data():
data = pd.read_csv('../data/titanic/test.csv')
return data
# -
train_set, test_set = train_test_split(load_training_data(), test_size=0.2, random_state=1)
train_set.head()
X_train = train_set.drop('Survived', axis=1)
y_train = train_set.Survived
X_test = test_set.drop('Survived', axis=1)
y_test = test_set.Survived
# The output of this doesn't have any missing values
preprocessor1 = ColumnTransformer(
transformers=[
('pclass', 'passthrough', ['Pclass']),
('sex', 'passthrough', ['Sex']),
('age', SimpleImputer(strategy='median'), ['Age']),
('fare', SimpleImputer(strategy='median'), ['Fare']),
])
preprocessor1
# This calculates new variables
preprocessor2 = ColumnTransformer(
transformers=[
('pclass', 'passthrough', [0]),
('sex', 'passthrough', [1]),
('age', 'passthrough', [2]),
('fare', 'passthrough', [3]),
])
preprocessor2
# OneHotEncode categorical variables
preprocessor3 = ColumnTransformer(
transformers=[
('pclass', 'passthrough', [0]),
('sex', OrdinalEncoder(), [1]),
('age', 'passthrough', [2]),
('fare', 'passthrough', [3]),
])
preprocessor3
# Just testing
pd.DataFrame(Pipeline(steps=[
('pp1', preprocessor1),
]).fit_transform(X_train)).sample(10)
# Just testing
pd.DataFrame(Pipeline(steps=[
('pp1', preprocessor1),
('pp2', preprocessor2),
]).fit_transform(X_train)).sample(10)
# Just testing
pd.DataFrame(Pipeline(steps=[
('pp1', preprocessor1),
('pp2', preprocessor2),
('pp3', preprocessor3),
]).fit_transform(X_train))
pp = Pipeline(steps=[
('pp1', preprocessor1),
('pp2', preprocessor2),
('pp3', preprocessor3),
])
pp
# y_train contains the labels
X_train_pp = pp.fit_transform(X_train)
# y_test contains the labels
X_test_pp = pp.transform(X_test)
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, precision_recall_curve, roc_curve
sgd_clf = SGDClassifier()
cross_val_score(sgd_clf, X_train_pp, y_train, cv=2, scoring="accuracy")
y_train_pred = cross_val_predict(sgd_clf, X_train_pp, y_train, cv=3)
confusion_matrix(y_train, y_train_pred)
precision_score(y_train, y_train_pred)
recall_score(y_train, y_train_pred)
f1_score(y_train, y_train_pred)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.legend(loc="center right", fontsize=16) # Not shown in the book
plt.xlabel("Threshold", fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.axis([-50000, 50000, 0, 1]) # Not shown
y_scores = cross_val_predict(sgd_clf, X_train_pp, y_train, cv=3, method="decision_function")
precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.grid(True)
plot_precision_vs_recall(precisions, recalls)
plt.show()
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal
plt.axis([0, 1, 0, 1]) # Not shown in the book
plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16) # Not shown
plt.ylabel('True Positive Rate (Recall)', fontsize=16) # Not shown
plt.grid(True) # Not shown
fpr, tpr, thresholds = roc_curve(y_train, y_scores)
plot_roc_curve(fpr, tpr)
forest_clf = RandomForestClassifier()
y_probas_forest = cross_val_predict(forest_clf, X_train_pp, y_train, cv=5, method="predict_proba")
y_scores_forest = y_probas_forest[:, 1]
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train, y_scores_forest)
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
forest_clf2 = RandomForestClassifier(n_estimators=500)
forest_clf2.fit(X_train_pp, y_train)
forest_clf2.feature_importances_
# # Output final test set predictions for uploading to Kaggle
final_test_set = load_testing_data()
res = pd.concat([
pd.DataFrame(final_test_set.PassengerId, columns=['PassengerId']),
pd.DataFrame(clf.predict(final_test_set), columns=['Survived'])
], axis=1).astype({'Survived':'int'})
res
res.to_csv('titanic_out.csv', index=False)
| p-titanic/new_titanic_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://docs.python.org/3/library/webbrowser.html
import tweepy
import webbrowser
import time
import pandas as pd
# +
# https://docs.tweepy.org/en/stable/index.html
consumer_key = ""
consumer_secret = ""
callback_uri = 'oob'
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_uri)
redirect_url = auth.get_authorization_url()
print(redirect_url)
except tweepy.TweepError:
print('Error! Failed to get request token.')
# A NEW PAGE OPENS REQUESTING AUTHORIZATION FROM TWITTER TO GENERATE A CODE
webbrowser.open(redirect_url)
user_pin_input = input("What's the pin value? \n")
print(user_pin_input)
try:
auth.get_access_token(user_pin_input)
except tweepy.TweepError:
print('Error! Failed to get access token.')
print(auth.access_token, auth.access_token_secret)
# -
# https://docs.tweepy.org/en/stable/api.html#tweepy-api-twitter-api-wrapper
# wait_on_rate_limit=True
api = tweepy.API(auth, wait_on_rate_limit=True)
me = api.me()
print(me.screen_name)
len(api.home_timeline())
# https://docs.tweepy.org/en/stable/cursor_tutorial.html
# OBTENER MAS ITEMS/PAGINACIÓN
for count, status in enumerate(tweepy.Cursor(api.home_timeline, count=50).items(50)):
print(count, status.text)
# +
# LIMITS
# https://developer.twitter.com/en/docs/rate-limits
# https://developer.twitter.com/en/docs/twitter-api/v1/rate-limits
# -
# OTHER USER
other_user = "Pajaropolitico"
for count, status in enumerate(tweepy.Cursor(api.user_timeline, screen_name=other_user, count=20).items(20)):
print(count, status.text)
# friends_ids Returns an array containing the IDs of users being followed by the specified user.
the_pajaropolitico_friends = []
for count, _id in enumerate(tweepy.Cursor(api.friends_ids, screen_name=other_user).items(30)):
print(count, _id)
the_pajaropolitico_friends.append(_id)
api.get_user(the_pajaropolitico_friends[0]).screen_name
# Search Methods
# https://docs.tweepy.org/en/stable/api.html#search-methods
query = "#django #Docker #postgresql -jQuery -@udemy_es -Bootstrap -Discount -Angular -flask"
for count, status in enumerate(tweepy.Cursor(api.search, q=query).items(50)):
print(count, status.text, status.author.screen_name)
# +
# SEARCH BY USER NAME
query_username = "Django"
search_results = set()
for count, user in enumerate(tweepy.Cursor(api.search_users, q=query_username).items(50)):
print(count, user.screen_name)
search_results.add(user.screen_name)
print(list(search_results))
print(len(list(search_results)))
# +
# PAGINACIÓN
# BUSQUEDA USUARIO
query_username_two = "adultswim"
def process_page(page_results):
for count, user in enumerate(page_results):
print(count, user.screen_name)
for count, page in enumerate(tweepy.Cursor(api.search_users, q=query_username_two, per_page=10).pages(3)):
print(count, "page")
process_page(page)
# -
| n_day_python/SandBoxPyTwitter/6 - items & Paginatio with Tweepy Cursor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 5</font>
#
# ## Download: http://github.com/dsacademybr
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# ## Exercícios
# +
# Exercício 1 - Crie um objeto a partir da classe abaixo, chamado roc1, passando 2 parâmetros e depois faça uma chamada
# aos atributos e métodos
from math import sqrt
class Rocket():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def move_rocket(self, x_increment=0, y_increment=1):
self.x += x_increment
self.y += y_increment
def print_rocket(self):
print(self.x, self.y)
# -
roc1 = Rocket(12, 19)
roc1.move_rocket()
roc1.print_rocket()
# +
# Exercício 2 - Crie uma classe chamada Pessoa() com os atributos: nome, cidade, telefone e e-mail. Use pelo menos 2
# métodos especiais na sua classe. Crie um objeto da sua classe e faça uma chamada a pelo menos um dos seus métodos
# especiais.
class Pessoa:
def __init__(self, nome, cidade, telefone, email):
self.nome = nome
self.cidade = cidade
self.telefone = telefone
self.email = email
def __str__(self):
return "Nome: %s, Cidade: %s, Telefone: %s, e-mail; %s" \
%(self.nome, self.cidade, self.telefone, self.email)
def __len__(self):
return self.telefone
# +
nome = input('Nome: ')
cidade = input('Cidade: ')
tel = input('Telefone: ')
mail = input('e-mail: ')
pes1 = Pessoa(nome, cidade, tel, mail)
# -
print(pes1)
len(pes1)
# Exercício 3 - Crie a classe Smartphone com 2 atributos, tamanho e interface e crie a classe MP3Player com os
# atributos capacidade. A classe MP3player deve herdar os atributos da classe Smartphone.
class Smartphone:
def __init__(self, tamanho, interface):
self.tamanho = input('tamanho: ')
self.interface = input('interface: ')
def Ident(self):
print("VIVO")
class Mp3player(Smartphone):
def __init__(self):
Smartphone.__init__(self.tamanho, self.interface)
print('Bem vindo à Vivo.')
# ### FIM
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| Python/dsa/fad/pyfund/Cap05/Notebooks/DSA-Python-Cap05-Exercicios.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''ia'': conda)'
# language: python
# name: python371064bitiaconda87e87804aa764a52908ab4f521814c1a
# ---
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('/home/jhonat/Documents/tcc/dataset/train/furtos_train.mp4')
# +
count = 0
while cap.isOpened():
cap.set(cv.CAP_PROP_POS_MSEC,(count*200)) # added this line get frame 0.2 segundos
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# cv.imshow("Display window", frame)
# height, width = frame.shape[:2]
# print('height(antes), width(antes): ',height, width)
frame_reduze = cv.resize(frame,(200, 200), interpolation = cv.INTER_LINEAR)
# height, width = frame_reduze.shape[:2]
# print('height(depois), width(depois): ',height, width)
# cv.imshow("Display reduce", frame_reduze)
nomeDoFrame = '/home/jhonat/Documents/tcc/dataset/train/theft-%d.png' % count
cv.imwrite(nomeDoFrame, frame_reduze)
count += 1
cap.release()
print('finished')
# -
| machineLearning/deepLearning/dataset_theft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def deriv(x,y):
return x**2 + y**2
def rk4(x0,y0,target_x,h):
while x0 <= target_x:
print(x0,y0)
k1 = h*deriv(x0,y0)
k2 = h*deriv(x0 + h/2, y0 + k1/2)
k3 = h*deriv(x0 + h/2, y0 + k2/2)
k4 = h*deriv(x0 + h, y0 + k3)
#These are the values that are fed back into the function:
y0 = y0 + (1/6)*(k1 + 2*k2 + 2*k3 + k4)
x0 = x0 + h
rk4(0,0,1,0.2)
# -
| Exercise18/Implementing_the_Runge_Kutta_Method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a>
# *This notebook contains an excerpt from the book [Machine Learning for OpenCV](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv) by <NAME>.
# The code is released under the [MIT license](https://opensource.org/licenses/MIT),
# and is available on [GitHub](https://github.com/mbeyeler/opencv-machine-learning).*
#
# *Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations.
# If you find this content useful, please consider supporting the work by
# [buying the book](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv)!*
# <!--NAVIGATION-->
# < [Building Our First Decision Tree](05.01-Building-Our-First-Decision-Tree.ipynb) | [Contents](../README.md) | [Using Decision Trees for Regression](05.03-Using-Decision-Trees-for-Regression.ipynb) >
# # Using Decision Trees to Diagnose Breast Cancer
#
# Now that we have built our first decision trees, it's time to turn our attention to a real dataset: The Breast Cancer Wisconsin dataset <https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)>.
#
# In order to make the take feasible, the researchers performed feature extraction on the images, like we did in Chapter 4, Representing Data and Engineering Features. They went through a total of 569 images, and extracted 30 different features that describe the characteristics of the cell nuclei present in the images, including:
#
# - cell nucleus texture (represented by the standard deviation of the gray-scale values)
#
# - cell nucleus size (calculated as the mean of distances from center to points on the perimeter)
#
# - tissue smoothness (local variation in radius lengths)
#
# - tissue compactness
#
# The goal of the research was then to classify tissue samples into benign and malignant (a binary classification task).
# ## Loading the dataset
#
# The full dataset is part of Scikit-Learn's example datasets:
from sklearn import datasets
data = datasets.load_breast_cancer()
# As in previous examples, all data is contained in a 2-D feature matrix data.data, where the rows represent data samples, and the columns are the feature values:
data.data.shape
# With a look at the provided feature names, we recognize some that we mentioned above:
data.feature_names
# Since this is a binary classification task, we expect to find exactly two target names:
data.target_names
# Let's split the dataset into training and test sets using a healthy 80-20 split:
import sklearn.model_selection as ms
X_train, X_test, y_train, y_test = ms.train_test_split(data.data, data.target, test_size=0.2, random_state=42)
X_train.shape, X_test.shape
# ## Building the decision tree
from sklearn import tree
dtc = tree.DecisionTreeClassifier(random_state=42)
dtc.fit(X_train, y_train)
# Since we did not specify any pre-pruning parameters, we would expect this decision tree to grow quite large and result in a perfect score on the training set:
dtc.score(X_train, y_train)
# However, to our surprise, the test error is not too shabby, either:
dtc.score(X_test, y_test)
with open("tree.dot", 'w') as f:
f = tree.export_graphviz(dtc, out_file=f,
feature_names=data.feature_names,
class_names=data.target_names)
# Now we want to do some model exploration. For example, we mentioned above that the depth of a tree influences its performance. If we wanted to study this dependency more systematically, we could repeat building the tree for different values of `max_depth`:
import numpy as np
max_depths = np.array([1, 2, 3, 5, 7, 9, 11])
# For each of these values, we want to run the full model cascade from start to finish. We also want to record the train and test scores. We do this in a for loop:
train_score = []
test_score = []
for d in max_depths:
dtc = tree.DecisionTreeClassifier(max_depth=d, random_state=42)
dtc.fit(X_train, y_train)
train_score.append(dtc.score(X_train, y_train))
test_score.append(dtc.score(X_test, y_test))
# We can plot the scores as a function of the tree depth using Matplotlib:
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('ggplot')
plt.figure(figsize=(10, 6))
plt.plot(max_depths, train_score, 'o-', linewidth=3, label='train')
plt.plot(max_depths, test_score, 's-', linewidth=3, label='test')
plt.xlabel('max_depth')
plt.ylabel('score')
plt.ylim(0.85, 1.1)
plt.legend()
# Let's do one more. What about the minimum numbers of samples required to make a node a leaf node?
#
# We repeat the procedure from above:
train_score = []
test_score = []
min_samples = np.array([2, 4, 8, 16, 32])
for s in min_samples:
dtc = tree.DecisionTreeClassifier(min_samples_leaf=s, random_state=42)
dtc.fit(X_train, y_train)
train_score.append(dtc.score(X_train, y_train))
test_score.append(dtc.score(X_test, y_test))
# This leads to a plot that looks quite different from the one before:
plt.figure(figsize=(10, 6))
plt.plot(min_samples, train_score, 'o-', linewidth=3, label='train')
plt.plot(min_samples, test_score, 's-', linewidth=3, label='test')
plt.xlabel('min_samples_leaf')
plt.ylabel('score')
plt.ylim(0.9, 1)
plt.legend()
# What does this all mean? Refer to Chapter 5 for the answers.
# <!--NAVIGATION-->
# < [Building Our First Decision Tree](05.01-Building-Our-First-Decision-Tree.ipynb) | [Contents](../README.md) | [Using Decision Trees for Regression](05.03-Using-Decision-Trees-for-Regression.ipynb) >
| notebooks/05.02-Using-Decision-Trees-to-Diagnose-Breast-Cancer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
import os
import io
import json, glob
import urllib3
from PIL import Image
import PIL.Image
import numpy as np
import pandas as pd
from tenacity import *
# !pip install tenacity
# +
def parse_dataset(dataset, outdir, _max=10000):
"""
parse the dataset to create a list of tuple containing absolute path and url of image
:param _dataset: dataset to parse
:param _outdir: output directory where data will be saved
:param _max: maximum images to download (change to download all dataset)
:return: list of tuple containing absolute path and url of image
"""
_fnames_urls = []
with open(dataset, 'r') as f:
data = json.load(f)
for image in data["images"]:
url = image["url"]
fname = os.path.join(outdir, "{}.jpg".format(image["imageId"]))
_fnames_urls.append((fname, url))
return _fnames_urls[:_max]
def download_image(fnames_and_urls):
"""
download image and save its with 90% quality as JPG format
skip image downloading if image already exists at given path
:param fnames_and_urls: tuple containing absolute path and url of image
"""
fname, url = fnames_and_urls
if not os.path.exists(fname):
http = urllib3.PoolManager(retries=retry(connect=3, read=2, redirect=3))
response = http.request("GET", url)
image = Image.open(io.BytesIO(response.data))
image_rgb = image.convert("RGB")
image_rgb.save(fname, format='JPEG', quality=90)
# %cd '/home/ec2-user/SageMaker/imat/'
parse = parse_dataset('train.json', '/home/ec2-user/SageMaker/imat/train_images',100000)
for i in parse:
download_image(i)
# -
train_json = json.load(open('/home/ec2-user/SageMaker/imat/train.json'))
num_images = len(train_json['images'])
print(num_images)
# Get image ids and labels
image_id = []
label_id = []
for i in range(46053):
image_id.append(int(train_json['annotations'][i]['imageId']))
label_list = train_json['annotations'][i]['labelId']
label_str = ''
for j in range(len(label_list)):
label_str += ' ' + label_list[j]
label_str = label_str[1:]
label_id.append(label_str)
len(label_id)
def get_index(path):
return int(path.split('/')[-1].split('.')[0])
# Get validation image paths
train_path_format = '/home/ec2-user/SageMaker/imat/train_images/*.jpg'
image_path = sorted(glob.glob(train_path_format), key= get_index)
len(image_path)
train_csv = pd.DataFrame({'imageId': image_id, 'labelId': label_id, 'imagePath': image_path})
train_csv.head()
train_csv.shape
train_csv.to_csv('train.csv', index=False)
| notebooks/json2csv_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data clean-up
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
efw = pd.read_excel("efw.xlsx")
#Turn third row into dataframe header
efw.columns = efw.iloc[2]
#Drop first rows
efw = efw.drop([0, 1, 2], axis = 0)
efw.head(5)
# +
#turn these columns into integers (Year) and strings (ISO & Countries)
efw[["Year", "Rank", "Quartile"]]=efw[["Year", "Rank", "Quartile"]].astype(object)
efw[["ISO_Code", "Countries"]].astype(str)
efw.head(5)
# -
print(efw.columns)
#Drop dataframe columns that will not be used
efw = efw.drop(['1A data', 'Top marginal income tax rate','Top marginal income and payroll tax rate', ' Non-tariff trade barriers',
'Compliance costs of importing and exporting', '1B data', '1C data', '1Di data', '1Dii data', '3A data', '3B data', '3C data', ' Revenue from trade taxes (% of trade sector)', '4Ai data',
'Mean tariff rate', '4Aii data', 'Standard deviation of tariff rates',
'4Aiii data', ' Non-tariff trade barriers', 'Compliance costs of importing and exporting','Foreign ownership/investment restrictions', 'Capital controls',
'Freedom of foreigners to visit','Ownership of banks',
'Private sector credit',
'Interest rate controls/negative real interest rates)','Hiring regulations and minimum wage',
'Hiring and firing regulations', 'Centralized collective bargaining',
'Hours Regulations', 'Mandated cost of worker dismissal',
'Conscription','Administrative requirements', 'Bureaucracy costs',
'Starting\na business', 'Extra payments/bribes/favoritism',
'Licensing restrictions', 'Tax compliance',],axis=1)
print(efw.columns)
#Rename dataframe columns
efw.columns = ('year', 'ISO_code', 'countries', 'ECON', 'rank', 'quartile',
'1a_government_consumption', '1b_transfers',
'1c_gov_enterprises', '1d_top_marg_tax_rate',
'1_size_government', '2a_judicial_independence', '2b_impartial_courts',
'2c_protection_property_rights',
'2d_military_interference',
'2e_integrity_legal_system', '2f_legal_enforcement_contracts',
'2g_restrictions_sale_real_property',
'2h_reliability_police', '2i_business_costs_crime', '2j_gender_adjustment',
'2_property_rights', '3a_money_growth',
'3b_std_inflation', "3c_inflation",
'3d_freedom_own_foreign_currency', '3_sound_money',
'4a_tariffs', '4b_regulatory_trade_barriers', '4c_black_market',
'4d_control_movement_capital_ppl',
'4_trade', '5a_credit_market_reg',
'5b_labor_market_reg', '5c_business_reg', '5_regulation')
efw.head(5)
#Load Gini dataset
gini = pd.read_excel("gini.xls", header = 3)
# +
#Drop columns
gini.drop(gini.columns[2:4], axis=1, inplace = True)
gini.drop(gini.columns[0], axis = 1, inplace = True)
gini.head(5)
# +
#Set index to merge dataframes
gini = gini.set_index("Country Code")
gini.head(5)
# +
#Prepare melted dataframe to merge with EFW
gini = gini.reset_index().melt('Country Code')
gini.head(5)
# -
#Rename columns
gini.columns = ('ISO_code', 'year', 'gini')
#Merge dataframes
df = pd.merge(gini, efw)
df.tail(15)
#Select Latin American countries
df_gini = df.loc[df['ISO_code'].isin(["ARG", "BOL", "BRA", "CHL", "COL", "CRI", "DOM", "ECU", "GTM", "HND", "MEX", "NIC", "PAN", "PER", "SLV", "URY", "VEN"])]
df_gini
df_gini["ISO_code"].unique()
# +
#Create a list of the df_gini columns to downcast to float
cols = df_gini.columns.tolist()
cols = cols[4:]
#Turn objects into floats/integers
df_gini[cols] = df_gini[cols].apply(pd.to_numeric, errors='coerce')
# -
df_gini.info()
df_gini.tail(5)
df_gini["countries"].unique()
#Set index to countries and year
df_gini = df_gini.set_index(["countries", "year"]).sort_index()
df_gini.head(5)
# ## Plots
# #### Economic Freedom and Gini Index Over Time
_ = df_gini.unstack('countries')['gini'].plot(kind = "line", figsize = (9, 6), title = "Gini Score Over Time", marker = ".")
plt.legend(bbox_to_anchor=(1.1, 1.00))
plt.savefig('GiniCountries.pdf', transparent = True, bbox_inches='tight')
plt.show()
_ = df_gini.unstack('countries')['ECON'].plot(kind = "line", figsize = (9, 6), title = "EFW Over Time")
plt.legend(bbox_to_anchor=(1.1, 1.00))
plt.savefig('EFWCountries.pdf', transparent = True, bbox_inches='tight')
plt.show()
# #### Define a function that plots EFW and Gini Index over time for any country
#Create function that plots Gini and Economic Freedom figures for each country
def PlotCountry(countries):
simple = df_gini.loc[countries, :]
simple = simple.reset_index()
simple = simple.drop(['rank', 'quartile',
'1a_government_consumption', '1b_transfers',
'1c_gov_enterprises', '1d_top_marg_tax_rate',
'1_size_government', '2a_judicial_independence', '2b_impartial_courts',
'2c_protection_property_rights',
'2d_military_interference',
'2e_integrity_legal_system', '2f_legal_enforcement_contracts',
'2g_restrictions_sale_real_property',
'2h_reliability_police', '2i_business_costs_crime', '2j_gender_adjustment',
'2_property_rights', '3a_money_growth',
'3b_std_inflation', "3c_inflation",
'3d_freedom_own_foreign_currency', '3_sound_money',
'4a_tariffs', '4b_regulatory_trade_barriers', '4c_black_market',
'4d_control_movement_capital_ppl',
'4_trade', '5a_credit_market_reg',
'5b_labor_market_reg', '5c_business_reg', '5_regulation'], axis = 1)
simple = simple.round(2)
simple.columns = ["Year", "ISO Code", "Gini Index", "Economic Freedom"]
#Plot a double-axis lineplot
fig, ax1 = plt.subplots(figsize = (5,4))
color = 'darkred'
ax1.set_xlabel('Year')
ax1.set_ylabel('Gini Index', color = color)
ax1.plot("Year", "Gini Index", color = color, data = simple, linestyle='--', marker = ".")
ax1.tick_params(axis='y', labelcolor = color)
ax2 = ax1.twinx() #Instantiate second axis that shares the same x-axis
color = 'darkblue'
ax2.set_ylabel('Economic Freedom', color = color)
ax2.plot("Year", "Economic Freedom", color = color, data = simple)
ax2.tick_params(axis='y', labelcolor=color)
plt.title(countries)
fig.tight_layout() #Otherwise the right y-label is slightly clipped
plt.savefig(f'{countries}.png', transparent = True)
plt.show()
PlotCountry("Argentina")
PlotCountry("Brazil")
PlotCountry("Peru")
PlotCountry("Colombia")
PlotCountry("Ecuador")
PlotCountry("Costa Rica")
PlotCountry("Panama")
PlotCountry("Mexico")
PlotCountry("Uruguay")
PlotCountry("Chile")
PlotCountry("Bolivia")
PlotCountry("Venezuela")
PlotCountry("Guatemala")
PlotCountry("El Salvador")
PlotCountry("Nicaragua")
PlotCountry("Dominican Rep.")
PlotCountry("Honduras")
PlotCountry("Brazil")
# #### Plot average of Gini Index and Economic Freedom over time
# +
#Get the averages for Gini Index and EFW figures for each year
indexed = df_gini.groupby("year").mean()
indexed = indexed.reset_index()
#Plot Gini Index and EFW over time
fig, ax1 = plt.subplots()
color = 'darkred'
ax1.set_xlabel('Year')
ax1.set_ylabel('Gini Index', color = color)
ax1.plot("year", "gini", color = color, data = indexed, linestyle='--', marker = ".")
ax1.tick_params(axis='y', labelcolor = color)
ax2 = ax1.twinx() #Instantiate second axis that shares the same x-axis
color = 'darkblue'
ax2.set_ylabel('Economic Freedom', color = color)
ax2.plot("year","ECON", color = color, data = indexed)
ax2.tick_params(axis='y', labelcolor=color)
plt.title("Gini Index and Economic Freedom Over Time")
fig.tight_layout() #Otherwise the right y-label is slightly clipped
plt.savefig('Overall.pdf', transparent = True)
plt.show()
# -
# #### Explore percent and absolut changes in Gini Index and Economic Freedom
df = df_gini.reset_index()
df = df.set_index(["year", "countries"])
df.head(5)
percentage_gini = (((df.gini[2016] / df.gini[2000] - 1) * 100))
percentage_gini
percentage_efw = (((df.ECON[2016] / df.ECON[1980]) - 1) * 100)
percentage_efw
df = df.reset_index()
df = df.set_index("countries")
df.head(5)
# +
#Since the Gini data appears to be discontinued for some countries, I will calculate the percentage change using
#the first year for which there is available data for Gini and compare it to the lastest Gini figure.
def Change(countries):
new = df[df['gini'].notnull()] #get only rows with gini figures (drop rows with NaNs in gini column)
df1 = new.loc[countries,:] #select specific country
df1 = df1.reset_index()
x = df1.iloc[0].tolist() #create a list with the Gini coefficients
x_new = x[0:5] #select elements from list that will be used
y = df1.iloc[-1].tolist() #create a list with EFW scores
y_new = y[0:5] #select elements from list that will be used
print("PERCENTAGE CHANGE FOR PERIOD", x_new[1], "-",y_new[1])
change_gini = (((y_new[3] / x_new[3]) - 1) * 100) #calculate percentage change in Gini
print("Change in Gini Index for",countries, ":" , round(change_gini,2))
change_efw = (((y_new[4] / x_new[4]) - 1) * 100) #calculate percentage change in EFW
print("Change in Economic Freedom for",countries, ":" , round(change_efw,2))
# -
Change("Chile")
Change("Argentina")
Change("Honduras")
Change("Brazil")
Change("Colombia")
Change("Costa Rica")
Change("Uruguay")
Change("Nicaragua")
Change("Venezuela")
# #### Create new dataframe with changes in Gini and Economic Freedom
new_df = df.reset_index()
new_df = new_df[new_df["gini"].notnull()]
new_df = new_df.set_index(["countries", "year"])
new_df.head(15)
#Create empty lists with values from the percentage changes in Gini and EFW
countries = []
gini = []
efw = []
#Iterate through each country to append the changes in Gini and Economic Freedom to the lists created (gini and efw)
for i in new_df.index.levels[0]:
countries.append(i)
country = new_df.loc[i]
country = country.reset_index()
x = country.iloc[0].tolist()
y = country.iloc[-1].tolist()
change_g = (((y[2] / x[2]) - 1) * 100)
change_e = (((y[3] / x[3]) - 1) * 100)
gini.append(change_g)
efw.append(change_e)
# +
#Iterate through the lists with the percentage changes for Gini and Economic Freedom and change negative values to "decrease"
#and positive changes to "increase"
g = []
e = []
for n in gini:
if n > 0:
g.append("increase")
else:
g.append("decrease")
for f in efw:
if f > 0:
e.append("increase")
else:
e.append("decrease")
# -
tuples = list(zip(countries,g,e))
tuples
#Create dataframe from tuples
changes = pd.DataFrame(tuples, columns=['Country','Change in Gini', "Change in Economic Freedom"])
changes
#Calculate how many countries have decreased their Gini score and increased their EFW score for the same period of time
changes[(changes["Change in Gini"] == "decrease") & (changes["Change in Economic Freedom"] == "increase")].count()
#Calculate how many countries have increased both their Gini and EFW score for the same period of time
changes[(changes["Change in Gini"] == "increase") & (changes["Change in Economic Freedom"] == "increase")].count()
#Calculate how many countries have decreased both their Gini and EFW score for the same period of time
changes[(changes["Change in Gini"] == "decrease") & (changes["Change in Economic Freedom"] == "decrease")].count()
#Calculate how many countries have increased their Gini score and decreased their EFW score for the same period of time
changes[(changes["Change in Gini"] == "increase") & (changes["Change in Economic Freedom"] == "decrease")].count()
| Inequality LatAm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# ## Prepare reference
#
# Standard genomes are located in `/mnt/brick1/ref`.
#
# It's a good idea to symlink to the appropriate genome and/or index files from your project's `ref` directory.
#
# The following assumes the following tree for the project:
#
# ```
# :--Project
# |-ref
# |-data
# |-sample01
# |-sample02
# ...
# |-results
# |-sample01
# |-sample02
# ...
# |-notebooks<you're working from here>
# ```
# ### Change these values. If `bowtie2` is not on the `PATH` assign its full path to `$aligner`
# +
num_samples=6
index="../ref/MG1655"
sampleid="sample"
aligner=$(which bowtie2)
# -
# ## Align reads
#
# Assuming we are working with paired end reads and that `bowtie2` is somewhere on your `PATH`. If it's not either add it's location to the `PATH` variable or include the full path to `bowtie2` executable below:
# +
base_dir="../data"
for i in $(seq 1 $num_samples)
do
sample_dir="$base_dir/${sampleid}${i}"
result_dir="../results/${sampleid}${i}"
if [ ! -d "$result_dir" ]; then
echo "Creating $result_dir ..."
mkdir -p $result_dir
fi
read1=$sample_dir/read1.fifo
read2=$sample_dir/read2.fifo
mkfifo $read1
mkfifo $read2
zcat $sample_dir/R1.fastq.gz > $read1 &
zcat $sample_dir/R2.fastq.gz > $read2 &
# Align using bowtie2
$aligner -p 30 -x $index \
-1 $read1 -2 $read2 \
| samtools view -bhS - > "$result_dir/${sampleid}${i}.bam"
rm $sample_dir/*.fifo
done
# -
# ## Sort and index `.bam` files
#
# Same rationale applies to `samtools` below.
#
# Assuming `samtools` version > 1.5.
# +
base_dir="../data"
for i in $(seq 1 $num_samples)
do
result_dir="../results/${sampleid}${i}"
samtools sort "$result_dir/${sampleid}${i}.bam" \
-@ 30 \
-o "$result_dir/${sampleid}${i}_sorted.bam"
done
# +
# index .bam files
for i in $(seq 1 $num_samples)
do
result_dir="../results/${sampleid}${i}"
samtools index -@ 30 "$result_dir/${sampleid}${i}_sorted.bam"
done
| sessions/templates/Alignment template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Pipeline Anova SVM
#
#
# Simple usage of Pipeline that runs successively a univariate
# feature selection with anova and then a SVM of the selected features.
#
# Using a sub-pipeline, the fitted coefficients can be mapped back into
# the original feature space.
#
#
# +
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.LinearSVC()
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
coef = anova_svm[:-1].inverse_transform(anova_svm['linearsvc'].coef_)
print(coef)
| 01 Machine Learning/scikit_examples_jupyter/feature_selection/plot_feature_selection_pipeline.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# + [markdown] deletable=true editable=true
# # Negative Binomial: Compute false positive/negatives
#
# Currently `glmnet` does not support negative binomila regression. Therefore, we simply compute the number of false positive/negatives of negative binomial in the same cross validation setting as Normal/Bernoulli/Poisson.
# + deletable=true editable=true
using Distributed
addprocs(4)
nprocs()
# + deletable=true editable=true
using MendelIHT
using SnpArrays
using DataFrames
using Distributions
using Random
using LinearAlgebra
using DelimitedFiles
using GLM
# + deletable=true editable=true
function iht_negativebinomial(n::Int64, p::Int64, k::Int64, d::UnionAll, l::Link)
#construct snpmatrix, covariate files, and true model b
x, = simulate_random_snparray(n, p, undef)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # the intercept
x_float = [convert(Matrix{Float64}, x, center=true, scale=true) z] #Float64 version of x
# simulate response, true model b, and the correct non-0 positions of b
y, true_b, correct_position = simulate_random_response(x, xbm, k, d, l)
#specify path and folds
num_folds = 3
folds = rand(1:num_folds, size(x, 1));
#find non-zero entries returned by best lasso model as largest k estimate
path = collect(1:50);
#run IHT's cross validation routine
mses = cv_iht_distributed(d(), l, x, z, y, 1, path, folds, num_folds, use_maf=false, debias=false, showinfo=false, parallel=true);
iht_k_est = argmin(mses)
iht_result = L0_reg(x, xbm, z, y, 1, iht_k_est, d(), l, debias=false, init=false, use_maf=false)
iht_beta = iht_result.beta
#show lasso and IHT's reconstruction result
compare_model = DataFrame(
true_β = true_b[correct_position],
IHT_β = iht_beta[correct_position])
@show compare_model
#compute true/false positives/negatives for IHT and lasso
iht_tp = count(!iszero, iht_beta[correct_position])
iht_fp = iht_k_est - iht_tp
iht_fn = k - iht_tp
println("IHT false positives = $iht_fp")
println("IHT false negatives = $iht_fn" * "\n")
return iht_fp, iht_fn
end
# + deletable=true editable=true
#simulat data with k true predictors, from distribution d and with link l.
n = 1000
p = 10000
k = 10
d = NegativeBinomial
l = LogLink()
#set random seed
Random.seed!(2019)
#run function above, saving results in 4 vectors
total_runs = 50
iht_false_positives = zeros(total_runs)
iht_false_negatives = zeros(total_runs)
for i in 1:total_runs
println("current run = $i")
iht_fp, iht_fn = iht_negativebinomial(n, p, k, d, l)
iht_false_positives[i] = iht_fp
iht_false_negatives[i] = iht_fn
end
# + deletable=true editable=true
negativebinomial_iht_false_positives = (0+1+1+0+1+0+0+0+0+0+0+2+3+0+0+1+0+0+4+0+1+0+0+3+0+
1+0+0+0+0+4+0+0+2+6+1+4+0+2+0+0+0+0+0+2+0+0+0+2+0)/ 50
bernoulli_iht_false_negatives = (2+8+3+7+3+4+5+5+4+6+3+4+2+3+4+5+4+4+6+2+6+5+9+5+6+
2+6+7+7+6+7+4+5+5+5+8+4+8+2+3+9+9+5+5+5+8+2+4+4+9)/50
IHT_did_not_converge = 2
result = [negativebinomial_iht_false_positives; bernoulli_iht_false_negatives]
| figures/precision_recall/old2/precision_recall_negativebinomial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.092568, "end_time": "2020-08-07T07:17:20.608866", "exception": false, "start_time": "2020-08-07T07:17:20.516298", "status": "completed"} tags=[]
# # PyCaret 2 Classification Example
# This notebook is created using PyCaret 2.0. Last updated : 31-07-2020
# + papermill={"duration": 0.127113, "end_time": "2020-08-07T07:17:20.829021", "exception": false, "start_time": "2020-08-07T07:17:20.701908", "status": "completed"} tags=[]
# check version
from pycaret.utils import version
version()
# + [markdown] papermill={"duration": 0.099711, "end_time": "2020-08-07T07:17:21.022676", "exception": false, "start_time": "2020-08-07T07:17:20.922965", "status": "completed"} tags=[]
# # 1. Data Repository
# + papermill={"duration": 7.02273, "end_time": "2020-08-07T07:17:28.141797", "exception": false, "start_time": "2020-08-07T07:17:21.119067", "status": "completed"} tags=[]
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:96% !important; }</style>"))# デフォルトは75%
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from imblearn.under_sampling import RandomUnderSampler
from tqdm import tqdm_notebook as tqdm
import pycaret
sns.set()
# 常に全ての列(カラム)を表示
pd.set_option("display.max_columns", None)
# !pwd
sys.executable
# + papermill={"duration": 0.185137, "end_time": "2020-08-07T07:17:28.424215", "exception": false, "start_time": "2020-08-07T07:17:28.239078", "status": "completed"} tags=["parameters"]
# 入力ディレクトリ
data_dir = "../../../data/orig/home-credit-default-risk"
# 出力ディレクトリ
output_dir = "model"
os.makedirs(output_dir, exist_ok=True)
# 目的変数
target = "TARGET"
# 乱数シード
session_id = 123
# cv hold
fold = 5
# metric
optimize = "AUC"
# models
choice_ms = ["catboost", "lightgbm"]#, "lda"]
#choice_ms = ['nb', 'lightgbm', "rf"] # test用
# チューニング回数
n_iter = 100
#n_iter = 3 # test用
# compare_models()時間かかるので
is_compare_models = False
# setup()のcsv保存するか(0.5GBぐらい容量食うから)
is_save_setup_csv = False
#is_save_setup_csv = True
# 学習に除く列
ignore_features = ["SK_ID_CURR"]
# + papermill={"duration": 0.147959, "end_time": "2020-08-07T07:17:28.678994", "exception": false, "start_time": "2020-08-07T07:17:28.531035", "status": "completed"} tags=["injected-parameters"]
# Parameters
n_iter = 400
is_compare_models = False
is_save_setup_csv = False
# + [markdown] papermill={"duration": 0.122794, "end_time": "2020-08-07T07:17:28.916818", "exception": false, "start_time": "2020-08-07T07:17:28.794024", "status": "completed"} tags=[]
# # data load
# + papermill={"duration": 15.915309, "end_time": "2020-08-07T07:17:44.952599", "exception": false, "start_time": "2020-08-07T07:17:29.03729", "status": "completed"} tags=[]
df_train = pd.read_csv(
os.path.join(data_dir, "application_train.csv"),
)
df_test = pd.read_csv(
os.path.join(data_dir, "application_test.csv"),
)
print(df_train.info())
display(
df_train.head().style.background_gradient(cmap="Pastel1")
)
display(df_train.describe().style.background_gradient(cmap="Pastel1"))
print(df_test.info())
display(df_test.head().style.background_gradient(cmap="Pastel1"))
display(df_test.describe().style.background_gradient(cmap="Pastel1"))
# + papermill={"duration": 0.190496, "end_time": "2020-08-07T07:17:45.264053", "exception": false, "start_time": "2020-08-07T07:17:45.073557", "status": "completed"} tags=[]
params = {"target": target,
"session_id": session_id,
"silent": True,
"ignore_features": ignore_features,
}
def imbalance_setup(df, target, params=params, fold=None, experiment_name=None):
"""imbalanceのオプション入れるとデータ数が変わるため再setpuする"""
# mlflowのファイル作るか
if experiment_name is not None:
params["log_experiment"] = True
params["experiment_name"] = experiment_name
strategy = None
if fold == "all":
# cvなし
n_min = df[target].value_counts().min()
strategy = {0:n_min, 1:n_min}
elif fold == 10:
# 10foldでの最大数
n_fold10 = 15639
strategy = {0:n_fold10, 1:n_fold10}
elif fold == 5:
# 5foldでの最大数
n_fold5 = 13901
strategy = {0:n_fold5, 1:n_fold5}
# アンダーサンプリング
if strategy is not None:
params["fix_imbalance"] = True # 不均衡補正入れる
params["fix_imbalance_method"] = RandomUnderSampler(sampling_strategy=strategy, random_state=session_id) # imblearnの関数で不均衡補正
return setup(df, **params)
# + [markdown] papermill={"duration": 0.133949, "end_time": "2020-08-07T07:17:45.517494", "exception": false, "start_time": "2020-08-07T07:17:45.383545", "status": "completed"} tags=[]
# # 2. Initialize Setup
# + papermill={"duration": 0.17092, "end_time": "2020-08-07T07:17:45.833745", "exception": false, "start_time": "2020-08-07T07:17:45.662825", "status": "completed"} tags=[]
# from pycaret.classification import *
# help(setup)
# + papermill={"duration": 148.942231, "end_time": "2020-08-07T07:20:14.89738", "exception": false, "start_time": "2020-08-07T07:17:45.955149", "status": "completed"} tags=[]
# %%time
from pycaret.classification import *
if is_save_setup_csv:
_df_test = df_test.copy()
_df_test[target] = 0 # target列仮で入れる
clf1 = imbalance_setup(_df_test, target)
display(clf1[0].head(3))
# 一応前処理後のtest set保存しておく
pd.concat([clf1[0], clf1[1]], axis=1).to_csv(
os.path.join(output_dir, "test_setup.csv"), index=False
)
clf1 = imbalance_setup(df_train, target, fold=fold)
display(clf1[0].head(3))
if is_save_setup_csv:
# 一応前処理後のtrain set保存しておく
pd.concat([clf1[0], clf1[1]], axis=1).to_csv(
os.path.join(output_dir, "train_setup.csv"), index=False
)
# + papermill={"duration": 0.256342, "end_time": "2020-08-07T07:20:15.320508", "exception": false, "start_time": "2020-08-07T07:20:15.064166", "status": "completed"} tags=[]
# test
#lr = create_model('lr', fold=fold)
# + [markdown] papermill={"duration": 0.178935, "end_time": "2020-08-07T07:20:15.674529", "exception": false, "start_time": "2020-08-07T07:20:15.495594", "status": "completed"} tags=[]
# # 3. Compare Baseline
# + papermill={"duration": 0.254372, "end_time": "2020-08-07T07:20:16.092054", "exception": false, "start_time": "2020-08-07T07:20:15.837682", "status": "completed"} tags=[]
# %%time
if is_compare_models:
best_model = compare_models(sort=optimize, fold=fold)
# + [markdown] papermill={"duration": 0.182413, "end_time": "2020-08-07T07:20:16.44788", "exception": false, "start_time": "2020-08-07T07:20:16.265467", "status": "completed"} tags=[]
# # 4. Create Model
# + papermill={"duration": 0.276918, "end_time": "2020-08-07T07:20:16.892118", "exception": false, "start_time": "2020-08-07T07:20:16.6152", "status": "completed"} tags=[]
models()
# + papermill={"duration": 0.274837, "end_time": "2020-08-07T07:20:17.344014", "exception": false, "start_time": "2020-08-07T07:20:17.069177", "status": "completed"} tags=[]
models(type="ensemble").index.tolist()
# + papermill={"duration": 0.255242, "end_time": "2020-08-07T07:20:17.776441", "exception": false, "start_time": "2020-08-07T07:20:17.521199", "status": "completed"} tags=[]
#ensembled_models = compare_models(
# whitelist=models(type="ensemble").index.tolist(), fold=3
#)
# + [markdown] papermill={"duration": 0.171473, "end_time": "2020-08-07T07:20:18.127", "exception": false, "start_time": "2020-08-07T07:20:17.955527", "status": "completed"} tags=[]
# # 5. Tune Hyperparameters
# + papermill={"duration": 181.644647, "end_time": "2020-08-07T07:23:19.949977", "exception": false, "start_time": "2020-08-07T07:20:18.30533", "status": "completed"} tags=[]
# %%time
tune_models = []
for m in choice_ms:
m = create_model(m, fold=fold)
tuned_m = tune_model(
m,
fold=fold,
optimize=optimize,
n_iter=n_iter,
)
tune_models.append(tuned_m)
tuned_lightgbm = tune_models[1]
print(tune_models)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#help(finalize_model)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# %%time
# cv分けず+hold-outのデータも全部含めて学習
clf1 = imbalance_setup(fold=None)
f_tune_models = []
for m, name in zip(tune_models, choice_ms):
f_m = finalize_model(m)
f_tune_models.append(f_m)
save_model(f_m, model_name=os.path.join(output_dir, "pycaret_tuned_" + name))
print(f_tune_models)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 6. Ensemble Model は省略
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 7. Blend Models
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#help(blend_models)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# CatBoost Classifierはエラーになる
#clf1 = imbalance_setup(fold=fold)
#
#blender = blend_models(estimator_list=tune_models,
# fold=fold,
# optimize=optimize,
# method="soft",
# choose_better=True, # 精度改善しなかった場合、create_model()で作ったモデルを返す
# )
#save_model(blender, model_name=os.path.join(output_dir, "pycaret_blender"))
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 8. Stack Models
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#help(stack_models)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
clf1 = imbalance_setup(fold=fold) # finalize=Trueでもfold指定必要
stacker = stack_models(estimator_list=tune_models[:-1],
meta_model=tune_models[-1],
fold=fold,
optimize=optimize,
finalize=True, # cv分けず+hold-outのデータも全部含めて学習
)
save_model(stacker, model_name=os.path.join(output_dir, "pycaret_stacker"))
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 9. Analyze Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#from pycaret.classification import *
#help(plot_model)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
plot_model(tune_models[1])
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
plot_model(tune_models[1], plot="confusion_matrix")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
plot_model(tune_models[1], plot="boundary")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
plot_model(tune_models[1], plot="feature") # catboostはエラーになる
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# prだけ異様に時間かかるのでコメントアウト
#plot_model(tune_models[1], plot="pr")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
plot_model(tune_models[1], plot="class_report")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
evaluate_model(tune_models[1])
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 10. Interpret Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#catboost = create_model("catboost", cross_validation=False)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# ## DockerではShapはつかえない。Docker imageが壊れるらしいのでインストールしていない
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#interpret_model(catboost)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#interpret_model(catboost, plot="correlation")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#interpret_model(catboost, plot="reason", observation=12)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 11. AutoML()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# help(automl)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# #%%time
## なんかエラーになる。。。
#automl = automl(optimize=optimize)
#save_model(automl, model_name=os.path.join(output_dir, "pycaret_automl"))
#print(automl)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 12. Predict Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#pred_holdouts = predict_model(f_tune_models[0])
#pred_holdouts.head()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#new_data = df_test.copy()
#predict_new = predict_model(f_tune_models[0], data=new_data)
#predict_new.head()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
f_tune_models = []
for name in choice_ms:
loaded_model = load_model(os.path.join(output_dir, f"pycaret_tuned_{name}"))
f_tune_models.append(loaded_model)
stacker = load_model(os.path.join(output_dir, f"pycaret_stacker"))
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
def make_submit(df_test, model, data_dir: str, output_dir: str, csv_name: str):
"""submit csv作成"""
df_predict = predict_model(model, data=df_test)
tem_csv = f"{data_dir}/sample_submission.csv"
df_tem = pd.read_csv(tem_csv)
df_sub = pd.merge(df_tem, df_predict, how="left", on="SK_ID_CURR")[["SK_ID_CURR", "Score"]]
df_sub = df_sub.rename(columns={"Score": "TARGET"})
df_sub.to_csv(f"{output_dir}/{csv_name}.csv", index=False)
display(df_sub.head())
for m, name in zip(f_tune_models, choice_ms):
make_submit(df_test, m, data_dir, output_dir, f"{name}_submission")
#make_submit(df_test, blender, data_dir, output_dir, "blender_submission")
make_submit(df_test, stacker, data_dir, output_dir, "stacker_submission")
#make_submit(df_test, automl, data_dir, output_dir, "automl_submission")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 13. Save / Load Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#save_model(best, model_name=os.path.join(output_dir, "pycaret_automl"))
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#loaded_bestmodel = load_model(os.path.join(output_dir, "pycaret_automl"))
#print(loaded_bestmodel)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
from sklearn import set_config
set_config(display="diagram")
loaded_bestmodel[0]
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
from sklearn import set_config
set_config(display="text")
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 14. Deploy Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
#deploy_model(best, model_name="best-aws", authentication={"bucket": "pycaret-test"})
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 15. Get Config / Set Config
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
X_train = get_config("X_train")
X_train.head()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
get_config("seed")
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
from pycaret.classification import set_config
set_config("seed", 999)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
get_config("seed")
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # 16. MLFlow UI
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # !mlflow ui
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "completed"} tags=[]
# # End
# Thank you. For more information / tutorials on PyCaret, please visit https://www.pycaret.org
| pycaret_v2/notebook/home-credit-default-risk/pycaret-2-classification-home-credit-default-risk.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/KamrulSh/Python_Practice/blob/master/P16_stack.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# + [markdown] id="4t356i-fglCo"
# # stack implementation using list (method 1)
#
# + colab={"base_uri": "https://localhost:8080/"} id="UUmhGt92gDT2" outputId="bd31bcf4-c4d6-4327-a3ce-81b981771a40"
stack = []
# append() function to push
# element in the stack
stack.append('a')
stack.append('b')
print('Initial stack:')
print(stack)
# pop() fucntion to pop
# element from stack in
# LIFO order
print('Elements poped from stack:')
print(stack.pop())
print(stack.pop())
print('Stack after elements are poped:')
print(stack)
# print(stack.pop())
# -
# ## stack implementation using list (method 2)
#
# +
class Stack:
def __init__(self):
self.stack = []
# Use list append method to push element
def push(self, val):
self.stack.append(val)
return val
# Stack is empty when stack size is 0
def isEmpty(self):
return len(self.stack) == 0
# Use list pop method to remove element
def pop(self):
if self.isEmpty():
return ("No element in the Stack")
else:
return self.stack.pop()
# Use peek to look at the top of the stack
def peek(self):
if self.isEmpty():
return ("No element in the Stack")
else:
return self.stack[-1]
def print(self):
return self.stack
stack = Stack()
print("push:", stack.push(1))
print("push:", stack.push(2))
print(stack.print())
print("pop:", stack.pop())
print("pop:", stack.pop())
print("pop:", stack.pop())
print(stack.print())
print("peek:", stack.peek())
print("push:", stack.push(3))
print("push:", stack.push(4))
print(stack.print())
print("peek:", stack.peek())
# -
# ### stack implementation using list (method 3)
#
# +
class Stack:
def __init__(self, size):
self.stack = [None] * size
self.size = size
self.top = -1
# Use list append method to push element
def push(self, val):
if self.isFull():
return ("Stack overflow")
else:
self.top += 1
self.stack[self.top] = val
return val
# Stack is full when self.size is equal to top + 1
def isFull(self):
return self.size == self.top + 1
# Stack is empty when top is -1
def isEmpty(self):
return self.top == -1
# OR return self.size() == 0
# Use list pop method to remove element and set as None
def pop(self):
if self.isEmpty():
return ("Stack underflow")
else:
topVal = self.stack[self.top]
self.stack[self.top] = None
self.top -= 1
return topVal
# Use peek to look at the top of the stack
def peek(self):
if self.isEmpty():
return ("Stack empty")
else:
return self.stack[self.top]
# Stack size
def size(self):
return self.top + 1
def print(self):
return self.stack
stack = Stack(3)
print("push:", stack.push(1))
print("push:", stack.push(2))
print(stack.print())
print("pop:", stack.pop())
print("pop:", stack.pop())
print(stack.print())
print("pop:", stack.pop())
print("peek:", stack.peek())
print("push:", stack.push(3))
print("push:", stack.push(4))
print("push:", stack.push(5))
print("push:", stack.push(6))
print(stack.print())
print("peek:", stack.peek())
# + [markdown] id="Xy1AWoK4hz9x"
# # **using collections.deque**
#
# + colab={"base_uri": "https://localhost:8080/"} id="BRPPArF4gxZd" outputId="bfa09157-ac9b-4f4e-aa7c-6de28f744313"
from collections import deque
stack = deque()
# append() function to push element in the stack
stack.append(1)
stack.append(2)
stack.append(3)
print(stack)
# pop() fucntion to pop element from stack in LIFO order
print("pop:", stack.pop())
# appendleft() function to push element in the left side of the stack
stack.appendleft(11)
print(stack)
stack.append(12)
print(stack)
print("popleft:", stack.popleft())
print(stack)
print("peek:", stack[-1])
print("pop:", stack.pop())
print(stack)
print("popleft:", stack.popleft())
print(stack)
# + [markdown] id="rUDFj86FkVKN"
# # **using queue module**
#
# + colab={"base_uri": "https://localhost:8080/"} id="1eIVJG-VlOHv" outputId="b425d3c2-5dcd-4652-dab8-6718f94382d1"
from queue import LifoQueue
stack = LifoQueue(maxsize=3)
# qsize() show the number of elements in the stack
print("Size:", stack.qsize())
# put() function to push element in the stack
stack.put(1)
print("Full:", stack.full())
stack.put(2)
stack.put(3)
print("Full:", stack.full())
print("Size:", stack.qsize())
# get() fucntion to pop element from stack in LIFO order
print("pop:", stack.get())
print("pop:", stack.get())
print("pop:", stack.get())
print("Empty:", stack.empty())
# + [markdown] id="Td8pE0BonR2v"
# # **using singly linked list**
#
# + colab={"base_uri": "https://localhost:8080/"} id="IpxaHAtgnVrL" outputId="be8dbed3-dfc6-4474-d2a4-ad5e16212ec6"
# Python program to demonstrate
# stack implementation using a linked list.
# node class
class StackNode:
def __init__(self, value):
self.value = value
self.next = None
class Stack:
def __init__(self):
self.head = None
self.size = 0
# String representation of the stack
def __str__(self):
cur = self.head
out = ""
while cur:
out += str(cur.value) + "->"
cur = cur.next
out += "None"
return out
# Get the current size of the stack
def getSize(self):
return self.size
# Check if the stack is empty
def isEmpty(self):
return self.size == 0
# Get the top item of the stack
def peek(self):
if self.head is None:
return "Empty stack"
else:
return self.head.value
# Push a value into the stack.
def push(self, value):
if self.head is None:
self.head = StackNode(value)
self.size += 1
else:
node = StackNode(value)
node.next = self.head
self.head = node
self.size += 1
# Remove a value from the stack and return.
def pop(self):
if self.head:
remove = self.head
self.head = self.head.next
self.size -= 1
return remove.value
else:
return "Empty stack"
# Driver Code
if __name__ == "__main__":
stack = Stack()
for i in range(1, 5):
stack.push(i)
print(f"Push: {i} | {stack}")
print("Peek:", stack.peek())
for _ in range(1, 6):
remove = stack.pop()
print(f"Pop: {remove} | {stack}")
print("Peek:", stack.peek())
| data structures/DS3_stack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="DweYe9FcbMK_"
# ##### Copyright 2018 The TensorFlow Authors.
#
# + cellView="form" colab={} colab_type="code" id="AVV2e0XKbJeX"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="sUtoed20cRJJ"
# # Tải dữ liệu văn bản
# + [markdown] colab_type="text" id="1ap_W4aQcgNT"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/text"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Xem trên TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/vi/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Chạy trên Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/vi/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Xem mã nguồn trên GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/vi/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Tải notebook</a>
# </td>
# </table>
# -
# Cộng đồng TensorFlow tại Việt Nam đã và đang dịch những tài liệu này từ nguyên bản tiếng Anh. Những bản dịch này được hoàn thiện dựa trên sự nỗ lực đóng góp từ cộng đồng lập trình viên sử dụng TensorFlow, và điều này có thể không đảm bảo được tính cập nhật của bản dịch đối với [Tài liệu chính thức bằng tiếng Anh](https://www.tensorflow.org/?hl=en) này. Nếu bạn có bất kỳ đề xuất nào nhằm cải thiện bản dịch này, vui lòng tạo Pull request đến kho chứa trên GitHub của [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n). Để đăng ký dịch hoặc cải thiện nội dung bản dịch, các bạn hãy liên hệ và đặt vấn đề tại [docs-vi@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-vi).
# + [markdown] colab_type="text" id="NWeQAo0Ec_BL"
# Trong bài viết này chúng ta sẽ cùng tìm hiểu về cách sử dụng `tf.data.TextLineDataset` để tạo các mẫu dữ liệu từ tệp văn bản sẵn có. `TextLineDataset` được dùng để thiết kế một tập dữ liệu từ các tệp văn bản khác nhau, trong đó mỗi dòng trong văn bản gốc sẽ ứng với một mẫu trong tập dữ liệu (phù hợp với các văn bản được tổ chức theo dòng như thơ hoặc log lỗi).
#
# Ta sẽ sử dụng ba bản dịch tiếng Anh của tác phẩm Illiad được sáng tác bởi Homer để xây dựng mô hình cho phép xuất ra bản dịch tương ứng của từng dòng trong văn bản.
# + [markdown] colab_type="text" id="fgZ9gjmPfSnK"
# ## Chuẩn bị
#
# + colab={} colab_type="code" id="baYFZMW_bJHh"
import tensorflow as tf
import tensorflow_datasets as tfds
import os
# + [markdown] colab_type="text" id="YWVWjyIkffau"
# Ta sẽ sử dụng bản dịch của các dịch giả sau:
#
# - [<NAME>](https://en.wikipedia.org/wiki/William_Cowper) — [bản dịch](https://storage.googleapis.com/download.tensorflow.org/data/illiad/cowper.txt)
#
# - [<NAME>](https://en.wikipedia.org/wiki/Edward_Smith-Stanley,_14th_Earl_of_Derby) — [bản dịch](https://storage.googleapis.com/download.tensorflow.org/data/illiad/derby.txt)
#
# - [<NAME>](https://en.wikipedia.org/wiki/Samuel_Butler_%28novelist%29) — [bản dịch](https://storage.googleapis.com/download.tensorflow.org/data/illiad/butler.txt)
#
# Các văn bản được sử dụng đều đã được tiền xử lý dữ liệu cơ bản (xóa header/footer của tài liệu, số dòng và tiêu đề chương). Chúng ta sẽ phải tải xuống các tệp này.
# + colab={} colab_type="code" id="4YlKQthEYlFw"
DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
FILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt']
for name in FILE_NAMES:
text_dir = tf.keras.utils.get_file(name, origin=DIRECTORY_URL+name)
parent_dir = os.path.dirname(text_dir)
parent_dir
# + [markdown] colab_type="text" id="q3sDy6nuXoNp"
# ## Xây dựng tập dữ liệu từ văn bản
#
# Đầu tiên ta sẽ tạo ra các tập dữ liệu tương ứng với từng tệp văn bản.
#
# Về sau các tập dữ liệu này sẽ được gộp thành một tập lớn, do đó ta cần ghi chú lại tập dữ liệu gốc ứng với từng mẫu. Để làm được điều này, ta sẽ sử dụng `tf.data.Dataset.map` kết hợp với một hàm đánh nhãn. Khi truyền vào một mẫu, hàm sẽ trả về cặp dữ liệu `(example, label)` tương ứng.
# + colab={} colab_type="code" id="K0BjCOpOh7Ch"
def labeler(example, index):
return example, tf.cast(index, tf.int64)
labeled_data_sets = []
for i, file_name in enumerate(FILE_NAMES):
lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name))
labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))
labeled_data_sets.append(labeled_dataset)
# + [markdown] colab_type="text" id="M8PHK5J_cXE5"
# Gộp các tập đã dán nhãn thành một tập dữ liệu lớn.
# + colab={} colab_type="code" id="6jAeYkTIi9-2"
BUFFER_SIZE = 50000
BATCH_SIZE = 64
TAKE_SIZE = 5000
# + colab={} colab_type="code" id="Qd544E-Sh63L"
all_labeled_data = labeled_data_sets[0]
for labeled_dataset in labeled_data_sets[1:]:
all_labeled_data = all_labeled_data.concatenate(labeled_dataset)
all_labeled_data = all_labeled_data.shuffle(
BUFFER_SIZE, reshuffle_each_iteration=False)
# + [markdown] colab_type="text" id="r4JEHrJXeG5k"
# Để xem cặp `(example, label)` trả về, ta có thể sử dụng các hàm `tf.data.Dataset.take` and `print`. Giá trị trong thuộc tính `numpy` chính là giá trị của Tensor.
# + colab={} colab_type="code" id="gywKlN0xh6u5"
for ex in all_labeled_data.take(5):
print(ex)
# + [markdown] colab_type="text" id="5rrpU2_sfDh0"
# ## Encode mỗi dòng văn bản
#
# Các mô hình học máy nhận đầu vào là số. Do đó để xây dựng mô hình cho dữ liệu văn bản, các chuỗi văn bản cần được chuyển thành chuỗi các số. Nói cách khác, ta cần ánh xạ mỗi từ phân biệt thành một số phân biệt.
#
# ### Xây dựng danh sách từ vựng
#
# Để xây dựng danh sách từ vựng, ta cần tách các chuỗi văn bản thành tập hợp các từ. Có nhiều cách để tách từ trong TensorFlow và Python. Tuy nhiên trong bài viết này, ta sẽ thực hiện như sau:
#
# 1. Xét chuỗi văn bản trong thuộc tính `numpy` của từng mẫu.
# 2. Sử dụng `tfds.features.text.Tokenizer` để tách chuỗi thành các token.
# 3. Đưa token thu được vào cấu trúc set trong Python để loại bỏ các token trùng lặp.
# 4. Lưu số lượng từ trong danh sách từ vựng để sử dụng về sau.
# + colab={} colab_type="code" id="YkHtbGnDh6mg"
tokenizer = tfds.features.text.Tokenizer()
vocabulary_set = set()
for text_tensor, _ in all_labeled_data:
some_tokens = tokenizer.tokenize(text_tensor.numpy())
vocabulary_set.update(some_tokens)
vocab_size = len(vocabulary_set)
vocab_size
# + [markdown] colab_type="text" id="0W35VJqAh9zs"
# ### Encode các mẫu dữ liệu
#
# Ta sẽ tạo encoder bằng cách truyền `vocabulary_set` vào `tfds.features.text.TokenTextEncoder`. Phương thức `encode` của encoder nhận đầu vào là một chuỗi văn bản và trả về là chuỗi các số.
# + colab={} colab_type="code" id="gkxJIVAth6j0"
encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)
# + [markdown] colab_type="text" id="v6S5Qyabi-vo"
# Thử thực hiện encode trên một chuỗi văn bản mẫu để kiểm tra kết quả trả về.
# + colab={} colab_type="code" id="jgxPZaxUuTbk"
example_text = next(iter(all_labeled_data))[0].numpy()
print(example_text)
# + colab={} colab_type="code" id="XoVpKR3qj5yb"
encoded_example = encoder.encode(example_text)
print(encoded_example)
# + [markdown] colab_type="text" id="p9qHM0v8k_Mg"
# Để sử dụng encoder với toàn bộ văn bản, ta cần "đóng gói" (wrap) nó vào trong một đối tượng là `tf.py_function` và truyền đối tượng này vào phương thức `map` của tập dữ liệu.
# + colab={} colab_type="code" id="HcIQ7LOTh6eT"
def encode(text_tensor, label):
encoded_text = encoder.encode(text_tensor.numpy())
return encoded_text, label
# + [markdown] colab_type="text" id="eES_Z1ia-Om-"
# Ta sẽ sử dụng `Dataset.map` để chạy hàm với từng mẫu trong tập dữ liệu. `Dataset.map` thực thi trên ở chế độ đồ thị:
#
# * Mỗi tensor trong đồ thị không chứa giá trị.
# * Ở chế độ đồ thị, ta chỉ có thể sử dụng toán tử (Ops) và hàm toán học (functions) trong TensorFlow.
#
# Do đó ta không thể truyền trực tiếp hàm đã viết vào `.map` mà phải thông qua `tf.py_function`. `tf.py_function` sẽ truyền tensor (bao gồm giá trị và phương thức `.numpy()` để truy cập giá trị đó) vào hàm đã viết.
#
# + colab={} colab_type="code" id="KmQVsAgJ-RM0"
def encode_map_fn(text, label):
# py_func doesn't set the shape of the returned tensors.
encoded_text, label = tf.py_function(encode,
inp=[text, label],
Tout=(tf.int64, tf.int64))
# `tf.data.Datasets` work best if all components have a shape set
# so set the shapes manually:
encoded_text.set_shape([None])
label.set_shape([])
return encoded_text, label
all_encoded_data = all_labeled_data.map(encode_map_fn)
# + [markdown] colab_type="text" id="_YZToSXSm0qr"
# ## Chia tập dữ liệu thành batch huấn luyện và batch kiểm thử
#
# Ta sẽ sử dụng `tf.data.Dataset.take` và `tf.data.Dataset.skip` để chia tập dữ liệu thành một tập nhỏ dành cho việc kiểm thử và tập lớn hơn còn lại cho việc huấn luyện.
#
# Trước khi đưa vào mô hình, tập dữ liệu cần được tổ chức thành từng batch. Thông thường các mẫu dữ liệu thuộc cùng một batch sẽ có cùng kích thước với nhau. Tuy nhiên trong dữ liệu hiện có, mỗi mẫu có kích thước khác nhau do mỗi dòng trong văn bản gốc có số lượng từ khác nhau. Vì thế ta sẽ sử dụng `tf.data.Dataset.padded_batch` (thay vì `batch`) để tất cả các mẫu có cùng kích thước bằng cách đệm thêm cho chúng.
# + colab={} colab_type="code" id="r-rmbijQh6bf"
train_data = all_encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE)
train_data = train_data.padded_batch(BATCH_SIZE)
test_data = all_encoded_data.take(TAKE_SIZE)
test_data = test_data.padded_batch(BATCH_SIZE)
# + [markdown] colab_type="text" id="Xdz7SVwmqi1l"
# Kể từ bây giờ, dữ liệu trong `test_data` và `train_data` không tổ chức theo dạng tập hợp các cặp `(example, label)` mà là tập hợp các batch. Mỗi batch là một cặp *(nhiều mẫu, nhiều nhãn)* được biễu diễn dưới dạng mảng.
#
# Ví dụ:
# + colab={} colab_type="code" id="kMslWfuwoqpB"
sample_text, sample_labels = next(iter(test_data))
sample_text[0], sample_labels[0]
# + [markdown] colab_type="text" id="UI4I6_Sa0vWu"
# Số lượng từ trong danh sách từ vựng lúc này cần tăng lên 1 vì ta vừa bổ sung một token mới là số 0 được sử dụng trong quá trình đệm dữ liệu.
# + colab={} colab_type="code" id="IlD1Lli91vuc"
vocab_size += 1
# + [markdown] colab_type="text" id="K8SUhGFNsmRi"
# ## Xây dựng mô hình
# + colab={} colab_type="code" id="QJgI1pow2YR9"
model = tf.keras.Sequential()
# + [markdown] colab_type="text" id="wi0iiKLTKdoF"
# Tầng đầu tiên của mô hình sẽ chuyển các chuỗi văn bản được biểu diễn dưới dạng số thành các vector embedding dày đặc. Xem thêm tại [Hướng dẫn về word embeddings](../text/word_embeddings.ipynb).
# + colab={} colab_type="code" id="DR6-ctbY638P"
model.add(tf.keras.layers.Embedding(vocab_size, 64))
# + [markdown] colab_type="text" id="_8OJOPohKh1q"
# Tiếp theo sẽ là tầng [Bộ nhớ Ngắn hạn Dài - *Long Short-Term Memory (LSTM)*](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) cho phép mô hình xác định ý nghĩa của một từ trong ngữ cảnh của nó. Bidirectional được bổ sung bên ngoài tầng LSTM giúp mô hình học được mối quan hệ giữa điểm dữ liệu được xét và các điểm trước và sau nó.
# + colab={} colab_type="code" id="x6rnq6DN_WUs"
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)))
# + [markdown] colab_type="text" id="cdffbMr5LF1g"
# Cuối cùng ta sẽ cần một hoặc nhiều tầng kết nối dày đặc (*densely connected layer*), trong đó tầng cuối cùng sẽ đóng vai trò là tầng đầu ra. Tầng đầu ra sẽ trả về xác suất ứng với từng nhãn. Nhãn có xác suất cao nhất chính là giá trị tiên đoán của mô hình.
# + colab={} colab_type="code" id="QTEaNSnLCsv5"
# One or more dense layers.
# Edit the list in the `for` line to experiment with layer sizes.
for units in [64, 64]:
model.add(tf.keras.layers.Dense(units, activation='relu'))
# Output layer. The first argument is the number of labels.
model.add(tf.keras.layers.Dense(3))
# + [markdown] colab_type="text" id="zLHPU8q5DLi_"
# Ta sẽ biên dịch mô hình với hàm mất mát là `sparse_categorical_crossentropy` (dành cho mô hình phân loại) và trình tối ưu hóa là `adam` (có nhiều lựa chọn, tuy nhiên `adam` là lựa chọn phổ biến nhất).
# + colab={} colab_type="code" id="pkTBUVO4h6Y5"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + [markdown] colab_type="text" id="DM-HLo5NDhql"
# ## Huấn luyện mô hình
#
# Mô hình đã xây dựng chạy trên tập dữ liệu cho kết quả tương đối khả quan (khoảng 83%).
# + colab={} colab_type="code" id="aLtO33tNh6V8"
model.fit(train_data, epochs=3, validation_data=test_data)
# + colab={} colab_type="code" id="KTPCYf_Jh6TH"
eval_loss, eval_acc = model.evaluate(test_data)
print('\nEval loss: {:.3f}, Eval accuracy: {:.3f}'.format(eval_loss, eval_acc))
| site/vi/tutorials/load_data/text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Circos on Jupyter: Run Circos in your browser
#
# Welcome. Select a notebook below to get started.
# The arrangement is meant to mirror the documentation on the Circos site. If in doubt, start with the one at the top and work down.
#
# ## Start here the first time
# [Getting Circos Up and Running](Getting_Circos_Up_and_Running.ipynb)
#
#
# ### 1. Quick Guide tutorials
#
# [Quick Start: Part 1: Hello World](notebooks/Quick_Start_Part1.ipynb)
# [Quick Start: Part 2: Ticks & Labels](notebooks/Quick_Start_Part2.ipynb)
# [Quick Start: Part 3: Ideogram Selection, Scale, Color & Orientation](notebooks/Quick_Start_Part3.ipynb)
# [Quick Start: Part 4: Links & Rules](notebooks/Quick_Start_Part4.ipynb)
# [Quick Start: Part 5: Histograms](notebooks/Quick_Start_Part5.ipynb)
# [Quick Start: Part 6: Axes & Backgrounds](notebooks/Quick_Start_Part6.ipynb)
# [Quick Start: Part 7: Heat Maps & Colors](notebooks/Quick_Start_Part7.ipynb)
# [Quick Start: Part 8: Text](notebooks/Quick_Start_Part8.ipynb)
# [Quick Start: Part 9: Automation & Macros](notebooks/Quick_Start_Part9.ipynb)
#
#
#
# ### 2. Drawing Ideograms tutorials
#
# [Drawing Ideograms: Part 1: Ideograms](notebooks/Drawing_ideograms_Part1.ipynb)
# [Drawing Ideograms: Part 2: Karyotypes](notebooks/Drawing_ideograms_Part2.ipynb)
# [Drawing Ideograms: Part 3: Filtering](notebooks/Drawing_ideograms_Part3.ipynb)
# [Drawing Ideograms: Part 4: Ordering](notebooks/Drawing_ideograms_Part4.ipynb)
# [Drawing Ideograms: Part 5: Cropping](notebooks/Drawing_ideograms_Part5.ipynb)
# [Drawing Ideograms: Part 6: Spacing and Axis Breaks](notebooks/Drawing_ideograms_Part6.ipynb)
# [Drawing Ideograms: Part 7: Tags](notebooks/Drawing_ideograms_Part7.ipynb)
# [Drawing Ideograms: Part 8: Variable Radius](notebooks/Drawing_ideograms_Part8.ipynb)
# [Drawing Ideograms: Part 9: Labels](notebooks/Drawing_ideograms_Part9.ipynb)
# [Drawing Ideograms: Part 10: Progression and Orientation](notebooks/Drawing_ideograms_Part10.ipynb)
#
# ### 3. Highlights tutorials
#
# [Highlights: Part 1: Wedge Highlights](notebooks/Highlights_Part1.ipynb)
# [Highlights: Part 2: Highlight Parameters - Part I - Embedded in Data File](notebooks/Highlights_Part2.ipynb)
# [Highlights: Part 3: Highlight Parameters - Part II - Using z-depth](notebooks/Highlights_Part3.ipynb)
# [Highlights: Part 4: Highlight Parameters - Part III - Radial Position](notebooks/Highlights_Part4.ipynb)
# [Highlights: Part 5: Ideogram Highlights](notebooks/Highlights_Part5.ipynb)
# [Highlights: Part 6: Recipe 1 - Ideogram Highlights](notebooks/Highlights_Part6.ipynb)
# [Highlights: Part 7: Recipe 2 - Focusing on a Genome Region](notebooks/Highlights_Part7.ipynb)
# [Highlights: Part 8: Recipe 3 - Plot Axis Range Highlights](notebooks/Highlights_Part8.ipynb)
# [Highlights: Part 9: Drawing on Top of Data](notebooks/Highlights_Part9.ipynb)
#
# ### 4. Tick Marks, Grids, and Labels tutorials
#
# [Tick marks, grids, and labels : Part 1: Tick Marks - Basics](notebooks/TickMarks_Part1.ipynb)
# [Tick marks, grids, and labels : Part 2: Tick Marks - Margins](notebooks/TickMarks_Part2.ipynb)
# [Tick marks, grids, and labels : Part 3: Tick Marks - Label Margins](notebooks/TickMarks_Part3.ipynb)
# [Tick marks, grids, and labels : Part 4: Tick Marks - Offsets](notebooks/TickMarks_Part4.ipynb)
# [Tick marks, grids, and labels : Part 5: Grids](notebooks/TickMarks_Part5.ipynb)
# [Tick marks, grids, and labels : Part 6: Label Formats](notebooks/TickMarks_Part6.ipynb)
# [Tick marks, grids, and labels : Part 7: Relative Ticks](notebooks/TickMarks_Part7.ipynb)
# [Tick marks, grids, and labels : Part 8: Ticks at Specific Positions](notebooks/TickMarks_Part8.ipynb)
# [Tick marks, grids, and labels : Part 9: Tick Rings](notebooks/TickMarks_Part9.ipynb)
#
# ### 5. Links and Relationships tutorials
#
# [Links and relationships : Part 1: Drawing Basic Links](notebooks/Links_Part1.ipynb)
# [Links and relationships : Part 2: Link Geometry](notebooks/Links_Part2.ipynb)
# [Links and relationships : Part 3: Link Formatting](notebooks/Links_Part3.ipynb)
# [Links and relationships : Part 4: Link Rules - Part I](notebooks/Links_Part4.ipynb)
# [Links and relationships : Part 5: Link Rules - Part II](notebooks/Links_Part5.ipynb)
# [Links and relationships : Part 6: Link Rules - Part III](notebooks/Links_Part6.ipynb)
# [Links and relationships : Part 7: Link Rules - Part IV](notebooks/Links_Part7.ipynb)
# [Links and relationships : Part 8: Link Rules - Part V](notebooks/Links_Part8.ipynb)
# [Links and relationships : Part 9: Ribbons](notebooks/Links_Part9.ipynb)
# [Links and relationships : Part 10: Ribbon Twists](notebooks/Links_Part10.ipynb)
#
# ### 6. 2D Data Tracks tutorials
#
# [2D Data tracks : Part 1: Scatter Plots](notebooks/2d_data_tracks_Part1.ipynb)
# [2D Data tracks : Part 2: Line Plots](notebooks/2d_data_tracks_Part2.ipynb)
# [2D Data tracks : Part 3: Histograms](notebooks/2d_data_tracks_Part3.ipynb)
# [2D Data tracks : Part 4: Tiles](notebooks/2d_data_tracks_Part4.ipynb)
# [2D Data tracks : Part 5: Heat Maps](notebooks/2d_data_tracks_Part5.ipynb)
# [2D Data tracks : Part 6: Text—Basic](notebooks/2d_data_tracks_Part6.ipynb)
# [2D Data tracks : Part 7: Text—Stacking](notebooks/2d_data_tracks_Part7.ipynb)
# [2D Data tracks : Part 8: Text—Rules](notebooks/2d_data_tracks_Part8.ipynb)
# [2D Data tracks : Part 9: Glyphs—Part I](notebooks/2d_data_tracks_Part9.ipynb)
# [2D Data tracks : Part 10: Glyphs—Part II](notebooks/2d_data_tracks_Part10.ipynb)
# [2D Data tracks : Part 11: Connectors](notebooks/2d_data_tracks_Part11.ipynb)
# [2D Data tracks : Part 12: Putting It All Together](notebooks/2d_data_tracks_Part12.ipynb)
#
# ### 7. Axis Scaling tutorials
#
# [Axis Scaling: Part 1: Global Scale Adjustment](notebooks/AxisScaling_Part1.ipynb)
# [Axis Scaling: Part 2: Global Relative Scale Adjustment](notebooks/AxisScaling_Part2.ipynb)
# [Axis Scaling: Part 3: Adjusting Scale for Regions](notebooks/AxisScaling_Part3.ipynb)
# [Axis Scaling: Part 4: Creating Zoomed Regions](notebooks/AxisScaling_Part4.ipynb)
# [Axis Scaling: Part 5: Overlapping Zoomed Regions](notebooks/AxisScaling_Part5.ipynb)
# [Axis Scaling: Part 6: Smoothing Scale](notebooks/AxisScaling_Part6.ipynb)
# [Axis Scaling: Part 7: Combining Scales](notebooks/AxisScaling_Part7.ipynb)
# [Axis Scaling: Part 8: Drawing Data with Scale Adjustment](notebooks/AxisScaling_Part8.ipynb)
#
# ### 8. Recipes
#
# [Recipes: Part 1: Microbial Genome](notebooks/Recipes_Part1.ipynb)
# [Recipes: Part 2: Link Geometry - Detailed Bezier Control](notebooks/Recipes_Part2.ipynb)
# [Recipes: Part 3: Labeling Karyotype Bands](notebooks/Recipes_Part3.ipynb)
# [Recipes: Part 4: Image Maps](notebooks/Recipes_Part4.ipynb)
# [Recipes: Part 5: Image Transparency and Background](notebooks/Recipes_Part5.ipynb)
# [Recipes: Part 6: Complex Histograms](notebooks/Recipes_Part6.ipynb)
# [Recipes: Part 7: Variable Radius Link Ends](notebooks/Recipes_Part7.ipynb)
# [Recipes: Part 8: Stacked Histograms](notebooks/Recipes_Part8.ipynb)
# [Recipes: Part 9: Transparent Links](notebooks/Recipes_Part9.ipynb)
# [Recipes: Part 10: ID Fields](notebooks/Recipes_Part10.ipynb)
# [Recipes: Part 11: Heat Map Links](notebooks/Recipes_Part11.ipynb)
# [Recipes: Part 12: Inverted Links](notebooks/Recipes_Part12.ipynb)
# [Recipes: Part 13: Copy Number Data](notebooks/Recipes_Part13.ipynb)
# [Recipes: Part 14: Directed Links](notebooks/Recipes_Part14.ipynb)
# [Recipes: Part 15: Pattern Fills](notebooks/Recipes_Part15.ipynb)
# [Recipes: Part 16: Automating Tracks](notebooks/Recipes_Part16.ipynb)
# [Recipes: Part 17: Automating Heatmaps](notebooks/Recipes_Part17.ipynb)
# [Recipes: Part 18: Circular Stacked Bar Plots](notebooks/Recipes_Part18.ipynb)
# [Recipes: Part 19: Cortical Maps—Connectograms](notebooks/Recipes_Part19.ipynb)
# [Recipes: Part 20: Cell Cycle—Part 1](notebooks/Recipes_Part20.ipynb)
# [Recipes: Part 21: Cell Cycle—Part 2](notebooks/Recipes_Part21.ipynb)
# [Recipes: Part 22: Nature Cover Encode Diagram](notebooks/Recipes_Part22.ipynb)
# [Recipes: Part 23: Naming Names](notebooks/Recipes_Part23.ipynb)
#
# ### 9. Helper Tools tutorials
#
# [Helper Tools: Part 1: Generating Random Link Data](notebooks/HelperTools_Part1.ipynb)
# [Helper Tools: Part 2: Reordering Ideograms to Minimize Overlap](notebooks/HelperTools_Part2.ipynb)
# [Helper Tools: Part 3: Bundling Links](notebooks/HelperTools_Part3.ipynb)
# [Helper Tools: Part 4: Filtering Links](notebooks/HelperTools_Part4.ipynb)
# [Helper Tools: Part 5: Visualizing Tabular Data](notebooks/HelperTools_Part5.ipynb)
# [Helper Tools: Part 6: Generate Link Density Tracks](notebooks/HelperTools_Part6.ipynb)
# [Helper Tools: Part 7: Visualizing Categorical Data](notebooks/HelperTools_Part7.ipynb)
#
# ### 10. Image Maps tutorals
#
# [Image Maps: Part 1: Image Maps - Introduction and Clickable Ideogram](notebooks/ImageMaps_Part1.ipynb)
# [Image Maps: Part 2: Image Maps - Clickable Cytogenetic Bands](notebooks/ImageMaps_Part2.ipynb)
# [Image Maps: Part 3: Clickable Tick Marks](notebooks/ImageMaps_Part3.ipynb)
# [Image Maps: Part 4: Clickable Highlights and Data](notebooks/ImageMaps_Part4.ipynb)
#
# ### 11. Reference
#
# See [here](http://circos.ca/documentation/tutorials/reference/) for section #11 as there is no associated tutorial code to run here.
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # NODDI-Watson
# *(Zhang et al. 2012)* proposed a model to estimate the dispersion of neurites (i.e. both axons and neuron dendrites), and is called Neurite Orientation Dispersion and Density Imaging (NODDI). It models dispersion for a single axon bundle using a Watson distribution $W(\kappa,\boldsymbol{\mu})$, that is a particular case of Bingham when $\kappa=\kappa_1=\kappa_2$. As such, it is incapable of modeling axon bending or fanning. The model separately models the CSF, restricted and hindered diffusion as a Gaussian Ball, Stick and Zeppelin model. NODDI models the signal as
# \begin{align}
# E^{\textrm{NODDI}}_{\textrm{Watson}}= \underbrace{f_{\textrm{CSF}}\overbrace{E_{\textrm{iso}}(\cdot|\lambda_{\textrm{CSF}})}^{\textrm{Ball}}}_{\textrm{CSF}}
# +\overbrace{W(\kappa,\boldsymbol{\mu})}^{\textrm{Watson}}\,*_{\mathbb{S}^2}\, \left[\underbrace{f_h\overbrace{E_{\textrm{h}}(\cdot|\lambda_\perp^{\textrm{tort}},\lambda_\parallel)}^{\textrm{Zeppelin}}}_{\textrm{Hindered Extra-Axonal}}+\underbrace{f_r\overbrace{E_r(\cdot|\lambda_\parallel)}^{\textrm{Stick}}}_{\textrm{Intra-Axonal}}\right].
# \end{align}
# To improve the stability of estimated model parameters, NODDI sets several constraints on model parameters:
# - isotropic diffusivity $\lambda_{\textrm{CSF}}$ is fixed to $3\cdot10^-9 m^2/s$
# - parallel diffusivity $\lambda_\parallel$ of the stick and zeppelin are the same and fixed to $1.7\cdot10^-9 m^2/s$
# - A tortuosity model is used to link the perpendicular diffusivity $\lambda_\perp^{\textrm{tort}}$ to the intra- and extra- axonal volume fraction $f_r$, $f_h$ and parallel diffusivity $\lambda_\parallel$.
#
# The only parameters that NODDI-Watson finally fits to the data are Watson parameters $\kappa$, $\mu$ and the volume fractions $f_{\textrm{CSF}}$, $f_h$ and $f_r$.
# # Using Dmipy to set up the NODDI-Watson model
# Setting up the NODDI-Watson model in Dmipy is straight-forward.
# First, we instantiate the model components for the non-dispersed components themselves: a Ball for the CSF, and the Stick and Zeppelin for the intra- and extra-axonal diffusion.
from dmipy.signal_models import cylinder_models, gaussian_models
ball = gaussian_models.G1Ball()
stick = cylinder_models.C1Stick()
zeppelin = gaussian_models.G2Zeppelin()
# We then Watson-disperse the stick and zeppelin together, making a representation for a dispersed single axon bundle
from dmipy.distributions.distribute_models import SD1WatsonDistributed
watson_dispersed_bundle = SD1WatsonDistributed(models=[stick, zeppelin])
# Then, we can set part of NODDI's model parameter links that we listed above. The bundle representation allows to set tortuous, equal and fixed parameters. First, we show the parameter names of the bundle.
watson_dispersed_bundle.parameter_names
# We can then set the tortuous $\lambda_\perp$ parameter, followed by the equal $\lambda_\parallel$ parameters of the stick and zeppelin, and finally we set the remaining $\lambda_\parallel$ to $1.7\times10^{-9}m^2/s$. Note that fixing or linking a parameter removes it permanently from the parameter list, meaning it cannot be used for subsequent parameter links. For this reason, we needed to do the tortuosity constraint before fixing $\lambda_\parallel$.
watson_dispersed_bundle.set_tortuous_parameter('G2Zeppelin_1_lambda_perp','C1Stick_1_lambda_par','partial_volume_0')
watson_dispersed_bundle.set_equal_parameter('G2Zeppelin_1_lambda_par', 'C1Stick_1_lambda_par')
watson_dispersed_bundle.set_fixed_parameter('G2Zeppelin_1_lambda_par', 1.7e-9)
# Finally, we put the model together with the scheme as follows.
from dmipy.core.modeling_framework import MultiCompartmentModel
NODDI_mod = MultiCompartmentModel(models=[ball, watson_dispersed_bundle])
# You can see the names and cardinality of the to-be optimized parameters in the generated MultiCompartmentModel as follows.
NODDI_mod.parameter_names
# The last thing is to fix the diffusivity of the Ball compartment to static values. We can do that using the model.set_fixed_parameter(parameter_name, value) function.
NODDI_mod.set_fixed_parameter('G1Ball_1_lambda_iso', 3e-9)
# We can visualize the model as follows:
from IPython.display import Image
NODDI_mod.visualize_model_setup(view=False, cleanup=False)
Image('Model Setup.png')
# # Fitting NODDI to Human Connectome Project data
# To illustrate Dmipy's ease-of-use we also fit the our constructed model to a coronal slice of the HCP data. We will illustrate FODs in the red region of interest later.
from dmipy.data import saved_data
scheme_hcp, data_hcp = saved_data.wu_minn_hcp_coronal_slice()
# +
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# %matplotlib inline
fig, ax = plt.subplots(1)
ax.imshow(data_hcp[:, 0, :, 0].T, origin=True)
rect = patches.Rectangle((70,70),20,20,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.set_axis_off()
ax.set_title('HCP coronal slice B0 with ROI');
# -
# ## Fit NODDI to HCP data
# Having pathos and numba installed, fitting the model to this coronal slice takes less than 10 minutes.
NODDI_fit_hcp = NODDI_mod.fit(
scheme_hcp, data_hcp, mask=data_hcp[..., 0]>0)
# ## Visualize results
# +
fitted_parameters = NODDI_fit_hcp.fitted_parameters
fig, axs = plt.subplots(2, 2, figsize=[15, 15])
axs = axs.ravel()
counter = 0
for name, values in fitted_parameters.items():
if values.squeeze().ndim != 2:
continue
cf = axs[counter].imshow(values.squeeze().T, origin=True, interpolation='nearest')
axs[counter].set_axis_off()
axs[counter].set_title(name)
fig.colorbar(cf, ax=axs[counter], shrink=0.7)
counter += 1
# -
# Above we see the familiar maps. Remember that partial_volume_0/1 represent fractions of models in the order they were given to the MultiCompartmentMicrostructureModel object: in this case, 0 represents the fraction of Ball (CSF), and the 1 represents the Watson Bundle (white matter). The `SD1WatsonDistributed_1_partial_volume_0` is the normalized volume fraction of the Stick **within the WatsonBundle**.
#
# To get the signal contribution of the Stick (intra-axonal) to the total signal, we must multiply `partial_volume_1 * SD1WatsonDistributed_1_partial_volume_0`. To get the signal contribution of the zeppelin (extra-axonal), we must multiply `partial_volume_1 * (1-SD1WatsonDistributed_1_partial_volume_0)`.
#
# In the bottom-right ODI map you can see that the concentration is highest in the corpus callosum and in coherent white matter bundles. Finally, the $\lambda_\parallel$ and $\lambda_{\textrm{iso}}$ maps are fixed to the values we gave them.
# +
# get total Stick signal contribution
vf_intra = (fitted_parameters['SD1WatsonDistributed_1_partial_volume_0'] *
fitted_parameters['partial_volume_1'])
# get total Zeppelin signal contribution
vf_extra = ((1 - fitted_parameters['SD1WatsonDistributed_1_partial_volume_0']) *
fitted_parameters['partial_volume_1'])
fig, axs = plt.subplots(1, 2, figsize=[15, 7])
axs = axs.ravel()
counter = 0
for name, values in zip(['Total vf_intra - Stick', 'Total vf_extra - Zeppelin'], [vf_intra, vf_extra]):
cf = axs[counter].imshow(values.squeeze().T, origin=True, interpolation='nearest')
axs[counter].set_axis_off()
axs[counter].set_title(name)
fig.colorbar(cf, ax=axs[counter], shrink=0.7)
counter += 1
# -
# ## Estimating Fiber Orientation Distributions (FODs)
# We estimate FODs on the right side of the corpus callosum and use the estimated concentration ODI as background.
from dipy.data import get_sphere
from dipy.viz import window, actor
sphere = get_sphere(name='symmetric724').subdivide()
# The next line produces the FOD sphere functions.
# The lower bound option artificially puts a lower bound on the ODI so they don't get too sharp.
# It does not affect the fitted_parameters themselves.
fods = NODDI_fit_hcp.fod(sphere.vertices, visual_odi_lower_bound=0.08)[70:90,: , 70:90]
import numpy as np
affine = np.eye(4)
volume_res = fitted_parameters['SD1WatsonDistributed_1_SD1Watson_1_odi'][70:90,: , 70:90]
volume_im = actor.slicer(volume_res[:, 0, :, None], interpolation='nearest', affine=affine, opacity=0.7)
ren = window.Renderer()
fod_spheres = actor.odf_slicer(
fods, sphere=sphere, scale=0.9, norm=False)
fod_spheres.display_extent(0, fods.shape[0]-1, 0, fods.shape[1]-1, 0, fods.shape[2]-1)
fod_spheres.RotateX(90)
fod_spheres.RotateZ(180)
fod_spheres.RotateY(180)
ren.add(fod_spheres)
ren.add(volume_im)
window.record(ren, size=[700, 700])
# +
import matplotlib.image as mpimg
img = mpimg.imread('fury.png')
plt.figure(figsize=[10, 10])
plt.imshow(img[100:-97, 100:-85])
plt.title('NODDI-Watson FODs with ODI background', fontsize=20)
plt.axis('off');
# -
# You can see the corpus callosum coming in the from the left and joining the centrum semiovale on the right. The intensity of the background shows ODI of the Watson distribution (inversely related to concentration $\kappa$). Notice it is low in the corpus callosum and higher in non-white matter areas. Notice that NODDI-Watson cannot estimate crossings, which are expected in the centrum semiovale on the right. This biases any comparison/estimation of volume fractions and concentrations in these areas.
from os import remove
remove('fury.png')
# ## Estimating Error Metrics: MSE and $R^2$
# Whenever you fit a model, it's always wise to check out the error of the model fit. Here we estimate the Mean Squared Error (MSE) and the $R^2$ coefficient of determination. In MSE the lower is better, while $R^2$ has an upper bound of one, which represents a perfect model fit.
# +
mse = NODDI_fit_hcp.mean_squared_error(data_hcp)
R2 = NODDI_fit_hcp.R2_coefficient_of_determination(data_hcp)
fig, axs = plt.subplots(1, 2, figsize=[15, 15])
cf = axs[0].imshow(mse.squeeze().T, origin=True, vmax=0.02, cmap='Greys_r')
axs[0].set_axis_off()
fig.colorbar(cf, ax=axs[0], shrink=0.33)
axs[0].set_title('Mean Squared Error', fontsize=20)
cf = axs[1].imshow(R2.squeeze().T, origin=True, vmin=0.6, cmap='Greys_r')
axs[1].set_axis_off()
fig.colorbar(cf, ax=axs[1], shrink=0.33)
axs[1].set_title('R2 - Coefficient of Determination', fontsize=20);
# -
# Ideally, you want the MSE and $R^2$ to show no structure of the underlying data - which would mean the model can at least fit the data equally well everywhere. Instead, in NODDI-Watson we can see clearly see the structure of the different brain areas. This means that the model cannot fit the diffusion signal equally well in all brainn areas, and that interpretation of model parameters will be biased when comparing them between these different areas.
#
# When doing these studies, ALWAYS verify whether differences in estimated parameters reflect actual tissue changes, or only reflect the limitations of the model itself.
# ## References
# - <NAME>, et al. "NODDI: practical in vivo neurite orientation dispersion and density imaging of the human brain." Neuroimage 61.4 (2012): 1000-1016.
| examples/example_noddi_watson.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: JavaScript
// language: javascript
// name: jslab
// ---
// # Ways to write HTML in react
// [x] JSX
// [x] React.createElement()
//
// # Props
// [x] They are passed to the component as a parameter
// [x] They should be immutable (They should not beb changed directly)
//
// # Styled Components
// [x] They are a way to write CSS in JS
// [x] You can have props in the style
//
// # Hooks
// [x] They are a way to write state in JS
| React/basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calibration using graphs
# +
# %matplotlib inline
import os
import sys
results_dir = './results'
os.makedirs(results_dir, exist_ok=True)
from functools import partial
from dask import delayed
from distributed import progress
import dask.bag as bag
sys.path.append(os.path.join('..', '..'))
results_dir = './results'
os.makedirs(results_dir, exist_ok=True)
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
from matplotlib import pyplot as plt
from arl.calibration.solvers import solve_gaintable
from arl.calibration.operations import apply_gaintable
from arl.data.data_models import Image
from arl.data.polarisation import PolarisationFrame
from arl.data.parameters import get_parameter
from arl.visibility.base import create_blockvisibility
from arl.skycomponent.operations import create_skycomponent
from arl.image.deconvolution import deconvolve_cube
from arl.image.operations import show_image
from arl.image.iterators import raster_iter
from arl.visibility.iterators import vis_timeslice_iter
from arl.util.testing_support import create_named_configuration
from arl.imaging import predict_2d, advise_wide_field
from arl.graphs.dask_init import get_dask_Client
from arl.graphs.graphs import create_invert_wstack_graph, create_predict_wstack_graph, \
create_selfcal_graph_list
from arl.graphs.generic_graphs import create_generic_image_graph
from arl.util.graph_support import create_simulate_vis_graph, \
create_predict_gleam_model_graph, create_corrupt_vis_graph, \
create_gleam_model_graph
from arl.pipelines.graphs import create_continuum_imaging_pipeline_graph
from arl.graphs.vis import simple_vis
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
# -
c=get_dask_Client()
# We create a graph to make the visibility
# +
nfreqwin=3
ntimes=5
frequency=numpy.linspace(0.8e8,1.2e8,nfreqwin)
if nfreqwin > 1:
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
else:
channel_bandwidth=numpy.array([1e7])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
vis_graph_list=create_simulate_vis_graph('LOWBD2-CORE',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre)
# -
# Find the optimum values for wide field imaging
wprojection_planes=1
advice=advise_wide_field(vis_graph_list[0].compute(), guard_band_image=4.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice['vis_slices']
# Now make a graph to fill with a model drawn from GLEAM. We then add phase errors of off 1 radian rms to each station. We will compute this graph in order not to confuse it's processing with the imaging.
corrupted_vis_graph_list = create_predict_gleam_model_graph(vis_graph_list,
frequency=[frequency[len(frequency)//2]],
channel_bandwidth=[channel_bandwidth[len(frequency)//2]],
c_predict_graph=create_predict_wstack_graph,
vis_slices=vis_slices)
corrupted_vis_graph_list = create_corrupt_vis_graph(corrupted_vis_graph_list,
phase_error=1.0)
corrupted_vis_graph_list=c.compute(corrupted_vis_graph_list)
# Now make a graph to construct the LSM. The LSM is drawn from GLEAM as well but only includes sources brighter than 1 Jy
LSM_graph=create_gleam_model_graph(vis_graph_list[len(vis_graph_list)//2],
frequency=[frequency[len(frequency)//2]],
channel_bandwidth=[channel_bandwidth[len(frequency)//2]],
flux_limit=1.0).compute()
# Now make a dirty image to see the effect of the phase errors introduced
dirty_graph = create_invert_wstack_graph(corrupted_vis_graph_list, LSM_graph,
vis_slices=vis_slices, dopsf=False)
future=c.compute(dirty_graph)
dirty=future.result()[0]
show_image(dirty, title='No selfcal')
plt.show()
# First make a selfcal graph in which the different Visibility's are selfcal'ed independently. We will look at the graph for just one Visibility.
selfcal_vis_graph_list = create_selfcal_graph_list(corrupted_vis_graph_list, LSM_graph,
c_predict_graph=create_predict_wstack_graph,
vis_slices=vis_slices,
global_solution=False)
simple_vis(selfcal_vis_graph_list[0])
# Now make a global solution. Note that all Visibilities are now coupled.
selfcal_vis_graph_list = create_selfcal_graph_list(corrupted_vis_graph_list, LSM_graph,
c_predict_graph=create_predict_wstack_graph,
vis_slices=vis_slices,
global_solution=True)
simple_vis(selfcal_vis_graph_list[0])
# The graph for making the dirty image now shows a global synchronisation point. We alleviate this by only sending averaged visibilities to the gather step before averaging over the solution interval, the model visibility is divided out. Only the gaintable is sent back for application.
dirty_graph = create_invert_wstack_graph(selfcal_vis_graph_list, LSM_graph, facets=2,
vis_slices=vis_slices, dopsf=False)
simple_vis(dirty_graph)
future=c.compute(dirty_graph)
dirty=future.result()[0]
show_image(dirty, title='With selfcal')
plt.show()
c.shutdown()
| examples/arl/calibration-pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Showcase ROUNDING across MSAccess, Python, Matlab & C#
#
# ## Linux dependencies for this notebook:
# - *octave*: install it with:
#
# sudo apt install octave
#
#
# - *dotnet-core*:
# - Follow [these `apt`(*Debian, Ubutnu*) instructions](https://dotnet.microsoft.com/download/linux-package-manager/debian9/sdk-current).
# to install .NET SDK from MS repository:
# - Source `/etc/profile` (OR append `~/.dotnet/tools` in *PATH*, OR login & logout).
# - Install REPL for *C#*:
#
# dotnet tool install -g dotnet-script
#
# ## INPUT numbers to be rounded
# Interesting numbers taken from: https://stackoverflow.com/a/45424214/548792
a = [0.49999999999999994, 5000000000000001.0, -2.4, 2.4]
#a = [0.499999999999994, 500000000000001.0, -2.4, 2.4] Access input
a
# ## *MSAccess* behavior
# While *Access* internally stores floats as `doubles` (8-bytes) <br>
# **BUT...** the GUI only allows floats with 15 decimals, and not 17 needed to [represent IEEE 754 doubles](https://en.wikipedia.org/wiki/Double-precision_floating-point_format#IEEE_754_double-precision_binary_floating-point_format:_binary64)
# (15 decimals are enough to be preserved when passed through an IEEE 754 double).
#
#
# ```
# Input Out1 Out2 Out3
# 0.499999999999994 0 0.999999999999994 0.5 ## Input had 15 decimals (instead of 17)!!
# 500000000000001 500000000000001 500000000000002 500000000000001 ## Input had 15 digits (instead of 16)!!
# -2.4 -2 -1.9 -2.4
# 2.4 2 2.9 2.4
#
#
# WHERE:
# - Out1: int(Input + 0.5)
# - Out2: Input + 0.5
# - Out3: int(10 * Input + 0.5) / 10
# ```
# COPIED manually from `Out1`, above
r_access = [0, 500000000000001, -2, 2]
# ## Python floats
import math
# %precision 17
r_python = [math.floor(n + 0.5) for n in a]
r_python
# ## Numpy float64
# +
import numpy as np
## Note that `%precision` magic also modifies numpy's precision.
np.set_printoptions(precision=17, floatmode='fixed', suppress=True)
# -
aa = np.array(a)
r_numpy = np.floor(aa + 0.5)
r_numpy
# ## Python DECIMALS
# +
from decimal import Decimal as D, ROUND_HALF_UP
def round_dec(n):
decimals = D('1')
n = D(n).quantize(decimals, rounding=ROUND_HALF_UP)
return float(n)
r_decimals = [round_dec(n) for n in a]
r_decimals
# -
# ## Matlab (Octave)
# + magic_args="octave --out r_octave" language="script"
# format long
# a = [0.49999999999999994, 5000000000000001.0, -2.4, 2.4];
# %a = [0.499999999999994, 500000000000001.0, -2.4, 2.4]; % Access input
# disp(sprintf(repmat('%0.17g ', 1, length(a)), floor(a + 0.5)))
# -
r_matlab = [float(n) for n in r_octave.strip().split(' ')]
r_matlab
# ## .Net Core C#
# +
import os
tmpfile = f"/tmp/rounding-{os.getpid()}.csx"
# -
# %%writefile $tmpfile
double [] a = {0.49999999999999994, 5000000000000001.0, -2.4, 2.4};
//double [] a = {0.499999999999994, 500000000000001.0, -2.4, 2.4}; // Access input
for(int i = 0; i < a.GetLength(0); i++)
Console.WriteLine(Math.Floor(a[i] + 0.5).ToString("F"));
# r_dotnet = !dotnet-script $tmpfile
# !rm $tmpfile
r_dotnet = [float(n) for n in r_dotnet]
r_dotnet
# # COMPARE
# +
import pandas as pd
df = pd.DataFrame(np.array([r_access, r_python, r_numpy, r_decimals, r_matlab, r_dotnet]).T,
columns="access python numpy decimals matlab dotnet".split(),
index=a)
display(df)
# -
# ## Comments:
# - All implementations but "decimals" fail to reproduce *access* on number 0.49999999999999994. <br>
# - *Access* works this way because it accepts only "decimals(15)" as input (not IEEE 754 doubles that require 17 decimals).
# - If same input as *access* is given to all impls (by uncommenting lines), then thay all agree.
# - For all the rest numbers, all implementations work fine with `floor(n + 0.5)` function.
| Notebooks/Rounding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#First and Foremost, load packages I need and the data I selected for this project, hotel booking data gathered from July 2015
#to August 2017
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.feature_selection import RFE
from sklearn import preprocessing
import seaborn as sns
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# %matplotlib inline
df = pd.read_csv("./hotel_bookings.csv")
df.head()
# -
# ## Exploratory Analysis
df.shape
df.describe()
df.columns
#Heat Map to give me an overview of relationship between each numerical variables
sns.heatmap(df.corr(), annot=False);
# ## Business Questions:
# #### 1. How was the rate fluctuate during the whole year? Is the trend the same for Resort and City Hotel?
# #### 2. What are some major difference between 2 types of hotel
# #### 3. Predict Cancelation Probability
# ## 1. When is the best time to take annual leave for Travel?
#To get hotel rate trend for the whole year, the best way in this dataset is to create a new date for time series visualization.
#I don't know whether some packages can identify Month word, so for sake of security, I map it to number and then generate
#Date Variable.
month = df.arrival_date_month.map({'January':1, 'February':2, 'March':3, 'April':4, 'May':5, 'June':6, 'July':7, 'August':8, \
'September':9, 'October':10, 'November':11, 'December':12})
df.insert(5, 'month', month)
arrival_date = pd.to_datetime((df.arrival_date_year*10000+df.month*100+df.arrival_date_day_of_month).apply(str),format='%Y%m%d')
df.insert(3, 'arrival_date', arrival_date)
df.head()
#Then I only collected data I need for current subject
df_rate_trend = df.groupby(by = ['hotel','arrival_date'])['adr'].mean().reset_index(0)
Trend_City_Hotel = df_rate_trend[df_rate_trend.hotel == 'City Hotel']
Trend_Resort_Hotel = df_rate_trend[df_rate_trend.hotel == 'Resort Hotel']
df_rate_trend
def rate_trend_visual(df, hotel_type):
'''
INPUT
df - pandas dataframe including data we need for line chart
hotel_type - string describing which hotel type including 'resort hotel' and 'city hotel'
OUTPUT
Line chart showing yearly rate trend from July 2015 to July 2017 for either hotel type
One saved chart in current repository
Perform to obtain the correct X and y objects
This function shows hotel yearly rate trend using the following steps to produce X and y:
1. Grab data filtered by hotel type
2. Plot line chart using seaborn package
3. Add x and y axis titel; chart titel
4. Save chart image to current repository for further use in post or presentation
'''
df = df_rate_trend[df_rate_trend.hotel == hotel_type]
sns.set(rc={'figure.figsize':(10, 4)})
ax = df['adr'].plot(linewidth=1)
ax.set_ylabel('Average Daily Rate')
ax.set_xlabel('Customer Arrival Date')
ax.set_title('2015-2017 '+ hotel_type + ' Daily Rate Trend')
ax.figure.savefig(hotel_type + ".png");
#Now use rate trend visualization function to generate charts
rate_trend_visual(df_rate_trend, 'City Hotel')
#Resort yearly rate trend
rate_trend_visual(df_rate_trend, 'Resort Hotel')
# ## 2. Major Difference between 2 types of hotel
#Select Key features I want to know deeper between two hotels, First, I select all numeric variables
df.select_dtypes(include=['number']).columns
#Maybe tease out 'agent', 'company', 'arrival_date_year', 'arrival_date_week_number', 'month', 'arrival_date_day_of_month'
col_name = list(set(df.select_dtypes(include=['number']).columns) - set(['agent', 'company', 'arrival_date_year', \
'arrival_date_week_number', 'month', 'arrival_date_day_of_month']))
#Now I finish preparing my data
df_compare = df[['hotel'] + col_name]
df_compare.head()
df_compare = df_compare.groupby(by='hotel').mean().transpose()
#compare difference between 2 hetels
df_compare['Difference'] = (df_compare['Resort Hotel'] - df_compare['City Hotel'])/df_compare['City Hotel']
df_compare.style.bar(subset=['Difference'], align='mid', color=['#d65f5f', '#5fba7d']).format({'Difference': "{:.2%}"})
# ## 3. Will You Cancel Your Hotel Booking?
# ### Predict whether customers will cancel their booking using Logistic Regression and Decision Tree; Estmate performance of each model.
#Exploratory Analysis for the whole dataset
df.is_canceled.value_counts()
#Calculate cancellation rate
44224 / (44224 + 75166)
#Data Selection and Cleaning for modeling
#Only 4 columns exist missing value. After analysis, 2 of them, agent and company are not needed in my model; 2 of then appears
#missed not deliberately, more like data entry errors, so I decided to select only necessary columns for model and delete
#all missing data.
df.isnull().sum()
#Drop all missing values in 'children' and 'country' because in this case, missing values exist because of data entry problems
#For Company and agent column, I just deleted the whole column since they're just ID, making no sense for modeling
df.dropna(subset = ['children','country'], inplace = True)
df.shape
#Select Numerical Variables First and create a new dataframe; Create dummy variables for categorical variables then;
#Tease out Agent and Company, which are just ID numbers.
Num_col = list(set(df.select_dtypes(include=['number']).columns) - set(['agent', 'company']))
df_new = df[Num_col]
#Tease out reservation_status and reservation_status_date since they happen always after customers cancel bookings, I assume
#it will not affect cancellation rate
Cat_col = list(set(df.select_dtypes(include=['object']).columns) - set(['reservation_status_date', 'reservation_status']))
#Now add dummy variables
for n in Cat_col:
df_new = pd.concat([df_new, pd.get_dummies(df[n], prefix=n, prefix_sep='_', drop_first=True)], axis = 1)
df_new.shape
# +
#Now ths data is ready! It's time for modeling!
#Split into explanatory and response variables
X = df_new.drop('is_canceled', axis=1)
X = preprocessing.normalize(X)
y = df_new['is_canceled']
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
#USE Recursive Feature Elimination in Sklearn to get the best features
logreg = LogisticRegression()
rfe = RFE(logreg)
rfe = rfe.fit(X_train, y_train)
print(rfe.support_)
print(rfe.ranking_);
# +
#Now rfe helps me select the best features
X = df_new.drop('is_canceled', axis=1)
X_Col = X.columns[rfe.support_]
X = X[X_Col]
X = preprocessing.normalize(X)
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
#Implement Logistic Regression Model
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_test_preds = logreg.predict(X_test)
#r2_score(y_test, y_test_preds)
#metrics.accuracy_score(y_test, y_test_preds)
"The accuracy for logistic model was {} on {} values.".format(metrics.accuracy_score(y_test, y_test_preds), len(y_test))
# -
#This is a roc curve function used to evaluate model performance
def roc_curve_f(y_test, y_test_preds, y_pred_prob, model_name):
'''
INPUT
y_test - pandas series or array including testing y values
y_test_preds - pandas series or array including predicted y values using testing x
y_pred_prob - pandas series or array including predicted y probability values using testing x
model_name - string to show which model was implemented for prediction
OUTPUT
ROC Curve to demonstrate model performance
One saved chart picture in current repository
'''
sns.set(rc={'figure.figsize':(6, 4)})
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
logit_roc_auc = roc_auc_score(y_test, y_test_preds)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
plt.figure()
plt.plot(fpr, tpr, label= model_name + '(area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#Then observe model performance for logistic regression
y_pred_prob = logreg.predict_proba(X_test)[:,1]
roc_curve_f(y_test, y_test_preds, y_pred_prob, 'Logistic Regression')
#Then I tried Decision Tree to see whether I can get a better model
X = df_new.drop('is_canceled', axis=1)
#X = preprocessing.normalize(X)
y = df_new['is_canceled']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth = 20, min_samples_leaf=4)
clf_gini.fit(X_train, y_train)
y_test_preds = clf_gini.predict(X_test)
metrics.accuracy_score(y_test, y_test_preds)
"The accuracy for Decision Tree Classifier was {} on {} values.".format(metrics.accuracy_score(y_test, y_test_preds), \
len(y_test))
#Then I tried Random Forest Classifier to see whether I can get a better model
from sklearn.ensemble import RandomForestClassifier
X = df_new.drop('is_canceled', axis=1)
y = df_new['is_canceled']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
clf_gini = RandomForestClassifier(criterion = "gini", random_state = 100, max_depth = 20, min_samples_leaf=4)
clf_gini.fit(X_train, y_train)
y_test_preds = clf_gini.predict(X_test)
metrics.accuracy_score(y_test, y_test_preds)
"The accuracy for random Forest Classifier was {} on {} values.".format(metrics.accuracy_score(y_test, y_test_preds), \
len(y_test))
#Get the most important features through Random Forest results
importance = clf_gini.feature_importances_
imp_sort = sorted(importance, reverse=True)
ranklst = [imp_sort.index(x) + 1 for x in importance]
[X.columns[ranklst.index(n)] for n in ranklst if n in range(30)]
#Then observe model performance for Random Forest
y_pred_prob = clf_gini.predict_proba(X_test)[:,1]
roc_curve_f(y_test, y_test_preds, y_pred_prob, 'Random Forest Classifier')
| Hotel_Bookings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Demonstrating the Workflow of the Online_IO Class: Online Gradient Descent
#
# ### <NAME>, *AMSC PhD, UMD-College Park*
#
# We will demonstrate through the Markdown and code blocks below the workflow of the `Online_IO` class for the `BMPS_online_GD` online inverse optimization option. We will be replicating an experiment from Bärmann, <NAME>, & Schneider's 2018 paper, "An Online-Learning Approach to Inverse Optimization" [1]. The algorithm is also from [1]; our contribution is the implementation, and the construction of the `Online_IO` class.
#
# We provide more detailed explanation regarding the set up of `Online_IO` in the DCZ Jupyter notebook. In this notebook, we will take users through the experiment for [1].
#
# Environment set up comes first! We then load in the experiment data.
# +
import sys
sys.path.insert(0,"C:\\Users\\StephanieAllen\\Documents\\1_AMSC663\\Repository_for_Code")
import math
import copy
import time
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import matplotlib.pyplot as plt #http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html#simple-plot
import pickle
from online_IO_files.online_IO_source.online_IO import Online_IO #importing the GIO class for testing
### Uncomment the line below to generate new data ###
#from online_IO_files.experiments.experiment_BMPS_consumer_behavior_gen_data import num_samples
num_samples = 500
# +
### Loading Data ###
loading_data_info = open("BMPS_p_t.pickle","rb")
p_t_samples = pickle.load(loading_data_info)
loading_data_info.close()
#pdb.set_trace()
loading_data_info2 = open("BMPS_x_t.pickle","rb")
x_t_samples = pickle.load(loading_data_info2)
loading_data_info2.close()
loading_data_info3 = open("BMPS_RHS_t.pickle","rb")
RHS_t_samples = pickle.load(loading_data_info3)
loading_data_info3.close()
loading_data_info4 = open("BMPS_true_utility_vec.pickle","rb")
true_u_vec = pickle.load(loading_data_info4)
true_u_vec = -1*true_u_vec #since we are minimizing the negative
#so the guesses that come back will be negative utility values
loading_data_info4.close()
print("Demonstrating that random values were generated for p_t")
print("p_40",p_t_samples[40])
print("p_80",p_t_samples[80])
print("p_120",p_t_samples[120])
print("p_200",p_t_samples[200])
print("*******************************")
print("Demonstrating that we get different x_t")
print("x_40",np.transpose(x_t_samples[40]))
print("x_80",np.transpose(x_t_samples[80]))
print("x_120",np.transpose(x_t_samples[120]))
print("x_200",np.transpose(x_t_samples[200]))
print("********************")
print("RHS",RHS_t_samples[40])
print("RHS",RHS_t_samples[80])
print("RHS",RHS_t_samples[120])
print("RHS",RHS_t_samples[200])
# -
# We set up the optimization model below from the experiment defined in [1]. Remember, we just have to define the index sets and the parameters!
# +
##### Set up FORWARD Model #######
#a. Sets and Vars
cb_model_BMPS = pyo.ConcreteModel()
cb_model_BMPS.varindex = pyo.RangeSet(1,50)
cb_model_BMPS.x = pyo.Var(cb_model_BMPS.varindex,domain=pyo.NonNegativeReals)
cb_model_BMPS.numvars = pyo.Param(initialize=50)
cb_model_BMPS.eqindex = pyo.RangeSet(1,1)
#b. Parameters
cb_model_BMPS.p_t = pyo.Param(cb_model_BMPS.eqindex,\
cb_model_BMPS.varindex,initialize=0) #do not need to define mutability here
cb_model_BMPS.RHS = pyo.Param([1],initialize={1:0}) #initializing with the value
cb_model_BMPS.uvec = pyo.Param(cb_model_BMPS.varindex,\
initialize=-0.1) #initializing a bit below 0 so that dont have constant objective function issue
# -
# Unlike for the DCZ experiment, we have to define **another** `pyomo` model to represent feasible region for the possible parameterizations of the objective function. We call this the set $\mathcal{C}$ in the documentation. [1]
#
# We noted in the documentation that we had to flip our bounds to be negative because, in converting the maximization problem to a minimization problem, we had to start looking at the negative parameterization of the objective function. See documentation for more details.
#
# Finally, note that the variable HAS to be named $c$ in this `pyomo` model.
# +
##### Set Up Feasible Region Model for us #####
feasible_c_region = pyo.ConcreteModel()
feasible_c_region.varindex = pyo.RangeSet(1,50)
feasible_c_region.c = pyo.Var(feasible_c_region.varindex)
##### Placing Constraints Upon c #####
def less_than_zero(model,i):
return model.c[i] <= 0
feasible_c_region.less_than_zero_constraint = pyo.Constraint(feasible_c_region.varindex,\
rule=less_than_zero)
def greater_than_negative_one(model,i):
return -1 <= model.c[i]
feasible_c_region.greater_than_negative_one_constraint = pyo.Constraint(feasible_c_region.varindex,\
rule=greater_than_negative_one)
# -
# Having defined our `pyomo` models, we can now define an instance of the `Online_IO` class and call the `initialize_IO_method` with the algorithm specification as "BMPS_online_GD."
# +
##### Step 1: Create an Instance of the Online_IO Class #####
##### & Initiate the BMPS_online_GD Algorithm #####
online_cb = Online_IO(cb_model_BMPS,Qname='None',cname='uvec',Aname='p_t',\
bname='RHS',Dname='None',fname='None',dimQ=(0,0),dimc=(50,1),\
dimA=(1,50),dimD=(0,0),binary_mutable=[0,0,1,1,0,0],non_negative=1,\
feasible_set_C=feasible_c_region,var_bounds=(0,1))
online_cb.initialize_IO_method("BMPS_online_GD",alg_specific_params={'diam_flag':0})
# -
# Then we run the algorithm! For BMPS, this requires three steps [1]:
# * Call the `next_iteration` method with a 1 for the argument `part_for_BMPS`. This will take care of the projection to $\mathcal{C}$. [1]
# * Call the `receive_data` method to provide the $p_t$ and $x_t$ for this iteration.
# * Call `next_iteration` again at the end with 2 for the argument `part_for_BMPS` to finish out the algorithm.
# +
##### Steps 2-4: Run the Algorithm! #####
#Need to save some data
c_t_dict_vecs = {}
xbar_dict_vecs = {}
for i in range(1,num_samples+1):
### Step 2: "Project onto C" (from BMPS 2018) ###
online_cb.next_iteration(part_for_BMPS=1) #we have to break the "next iteration"
#of the BMPS_online_GD algorithm into two parts, since "project onto F"
#(from BMPS 2018) comes before the data update step
### Step 3: Update Subproblem with p_t and obtain "expert solution x_t" (from BMPS 2018) ###
#For p_t, we pass in a dictionary with keys as the names of the parameters
#that we are updating and the items attached to the keys as the dictionaries
#containing the data.
online_cb.receive_data(p_t={"p_t":p_t_samples[i],"RHS":RHS_t_samples[i]},x_t=x_t_samples[i])
## The next few statements do some basic checks. They aren't comprehensive, but ##
## they are useful to showcase because they indicate some approaches to debugging pyomo models ##
## See documentation for sources regarding the pyomo package and its documentation (and some sites I found helpful) ##
# Asserting that the parameters p_t and RHS_t both gets put into the BMPS_subproblem #
assert online_cb.BMPS_subproblem.A.extract_values() == p_t_samples[i]
assert online_cb.BMPS_subproblem.b.extract_values() == RHS_t_samples[i]
# Doing Some constraint Checking #
for constr in online_cb.BMPS_subproblem.component_objects(pyo.Constraint):
assert constr._constructed == True, "Error in constraint construction (body)"
for c in constr:
lb = pyo.value(constr[c].lower)
ub = pyo.value(constr[c].upper)
assert ((lb is not None) or (ub is not None)), "Error in constraint construction (LHS/RHS)"
### Step 4: Finish out the Iteration by "solving subproblem", "performing gradient ###
### descent step", and calculating the learning rate (BMPS, 2018)
online_cb.next_iteration(part_for_BMPS=2)
## Save some data ##
c_t_dict_vecs[i] = copy.deepcopy(online_cb.c_t_BMPS)
xbar_dict_vecs[i] = copy.deepcopy(online_cb.xbar_t_BMPS)
# -
# Then make some graphs (based upon the formulas provided by BMPS [1])!! Note that the constant parameter for the error bound will have to be set for each run (since we don't have a comprehensive way of setting it at the moment). See documentation for more information.
# +
##### Step 4: Graph BMPS 2018 Error Measures #####
true_u_vec = np.reshape(true_u_vec,(50,1))
BMPS_obj_error = np.zeros((num_samples,))
BMPS_sol_error = np.zeros((num_samples,))
BMPS_total_error = np.zeros((num_samples,))
for i in range(1,num_samples+1): #cannot directly compare graph to paper bc we use 1/sqrt(t) - might need to run for more iterations?
BMPS_obj_error[i-1] = np.dot(np.transpose(c_t_dict_vecs[i]),(x_t_samples[i]-xbar_dict_vecs[i]))
BMPS_sol_error[i-1] = np.dot(np.transpose(true_u_vec),(x_t_samples[i]-xbar_dict_vecs[i])) #since we made u_vec as like negative ok?
BMPS_total_error[i-1] = BMPS_obj_error[i-1] - BMPS_sol_error[i-1]
BMPS_obj_error = np.divide(np.cumsum(BMPS_obj_error),np.arange(1,num_samples+1))
BMPS_sol_error = np.divide(np.cumsum(BMPS_sol_error),np.arange(1,num_samples+1))
BMPS_total_error = np.divide(np.cumsum(BMPS_total_error),np.arange(1,num_samples+1))
#################################
#plt.subplot(131)
error_graph1 = plt.plot(np.arange(1,num_samples+1),BMPS_obj_error)
plt.xlabel('Iterations (t)',fontsize=20)
plt.ylabel('Objective error: u_t^T (xbar_t-x_t)',fontsize=20)
plt.title('BMPS Objective Error',fontsize=20)
plt.figure()
#plt.subplot(132)
error_graph2 = plt.plot(np.arange(1,num_samples+1),BMPS_sol_error)
plt.xlabel('Iterations (t)',fontsize=20)
plt.ylabel('Solution error: u_true^T (xbar_t-x_t)',fontsize=20)
plt.title('BMPS Solution Error',fontsize=20)
plt.figure()
#plt.subplot(133)
constant = 8
bound_func = lambda x: constant*(1/(np.sqrt(x))) #following the lead of BMPS 2018
error_graph3 = plt.plot(np.arange(1,num_samples+1),BMPS_total_error,label="sol_error")
error_graph3 = plt.plot(np.arange(1,num_samples+1),bound_func(np.arange(1,num_samples+1)),'--',\
label="regret bound = constant*(1/sqrt(t))")
#error_graph3 = plt.plot(np.arange(1,num_samples+1),BMPS_sol_error)
plt.legend()
plt.xlabel('Iterations (t)',fontsize=20)
plt.ylabel('Total error',fontsize=20)
plt.title('BMPS Total Error',fontsize=20)
plt.figure()
##Plot Logged##
error_graph3 = plt.semilogy(np.arange(1,num_samples+1),BMPS_total_error,label="sol_error")
error_graph3 = plt.semilogy(np.arange(1,num_samples+1),bound_func(np.arange(1,num_samples+1)),'--',\
label="regret bound = log(constant/sqrt(t))")
#error_graph3 = plt.plot(np.arange(1,num_samples+1),BMPS_sol_error)
plt.legend()
plt.xlabel('Iterations (t)',fontsize=20)
plt.ylabel('Total error (logged)',fontsize=20)
plt.title('BMPS Total Error (Logged on Y axis)',fontsize=20)
plt.figure()
# -
# ### Selected References (see documentation for more)
#
# [1] Bärmann, Andreas, <NAME>, <NAME>, and <NAME>. "An Online-Learning Approach to Inverse Optimization." arXiv preprint arXiv:1810.12997 (2018).
| online_IO_files/jupyter_notebook_demo/demo_Online_IO_class_BMPS_option.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
# +
import os, json
p1_test_path_to_data = "../../ReaSCAN-v1.0/ReaSCAN-compositional-p1-test/data-compositional-splits.txt"
print(f"Reading dataset from file: {p1_test_path_to_data}...")
p1_test_data = json.load(open(p1_test_path_to_data, "r"))
print(len(p1_test_data["examples"]["test"]))
p2_test_path_to_data = "../../ReaSCAN-v1.0/ReaSCAN-compositional-p2-test/data-compositional-splits.txt"
print(f"Reading dataset from file: {p2_test_path_to_data}...")
p2_test_data = json.load(open(p2_test_path_to_data, "r"))
print(len(p2_test_data["examples"]["test"]))
p3_test_path_to_data = "../../ReaSCAN-v1.0/ReaSCAN-compositional-p3-test/data-compositional-splits.txt"
print(f"Reading dataset from file: {p3_test_path_to_data}...")
p3_test_data = json.load(open(p3_test_path_to_data, "r"))
print(len(p3_test_data["examples"]["test"]))
# -
ReaSCAN_path_to_data = "../../ReaSCAN-v1.0/ReaSCAN-compositional/data-compositional-splits.txt"
print(f"Reading dataset from file: {ReaSCAN_path_to_data}...")
ReaSCAN_data = json.load(open(ReaSCAN_path_to_data, "r"))
train_command_set = set([])
for example in ReaSCAN_data["examples"]["train"]:
train_command_set.add(example["command"])
for example in p1_test_data["examples"]["test"]:
assert example["command"] in train_command_set
for example in p2_test_data["examples"]["test"]:
assert example["command"] in train_command_set
for example in p3_test_data["examples"]["test"]:
assert example["command"] in train_command_set
print("Test-1 Passed")
import hashlib
train_example_hash = set([])
for example in ReaSCAN_data["examples"]["train"]:
example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))
train_example_hash.add(example_hash_object.hexdigest())
assert len(train_example_hash) == len(ReaSCAN_data["examples"]["train"])
# +
p1_test_example_hash = set([])
for example in p1_test_data["examples"]["test"]:
example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))
p1_test_example_hash.add(example_hash_object.hexdigest())
assert len(p1_test_example_hash) == len(p1_test_data["examples"]["test"])
p2_test_example_hash = set([])
for example in p2_test_data["examples"]["test"]:
example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))
p2_test_example_hash.add(example_hash_object.hexdigest())
assert len(p2_test_example_hash) == len(p2_test_data["examples"]["test"])
p3_test_example_hash = set([])
for example in p3_test_data["examples"]["test"]:
example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))
p3_test_example_hash.add(example_hash_object.hexdigest())
assert len(p3_test_example_hash) == len(p3_test_data["examples"]["test"])
# +
p1_test_dup_count = 0
for hash_str in p1_test_example_hash:
if hash_str in train_example_hash:
p1_test_dup_count += 1
p2_test_dup_count = 0
for hash_str in p2_test_example_hash:
if hash_str in train_example_hash:
p2_test_dup_count += 1
p3_test_dup_count = 0
for hash_str in p3_test_example_hash:
if hash_str in train_example_hash:
p3_test_dup_count += 1
# -
print(f"p1_test_dup_count={p1_test_dup_count}")
print(f"p2_test_dup_count={p2_test_dup_count}")
print(f"p3_test_dup_count={p3_test_dup_count}")
def get_example_hash_set(split):
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-{split}/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
split_test_data_test_example_hash = set([])
for example in split_test_data["examples"]["test"]:
example_hash_object = hashlib.md5(json.dumps(example).encode('utf-8'))
split_test_data_test_example_hash.add(example_hash_object.hexdigest())
assert len(split_test_data_test_example_hash) == len(split_test_data["examples"]["test"])
return split_test_data_test_example_hash
# +
a1_hash = get_example_hash_set("a1")
a2_hash = get_example_hash_set("a2")
a3_hash = get_example_hash_set("a3")
b1_hash = get_example_hash_set("b1")
b2_hash = get_example_hash_set("b2")
c1_hash = get_example_hash_set("c1")
c2_hash = get_example_hash_set("c2")
# -
a1_dup_count = 0
for hash_str in a1_hash:
if hash_str in train_example_hash:
a1_dup_count += 1
a2_dup_count = 0
for hash_str in a2_hash:
if hash_str in train_example_hash:
a2_dup_count += 1
a3_dup_count = 0
for hash_str in a3_hash:
if hash_str in train_example_hash:
a3_dup_count += 1
print(f"a1_dup_count={a1_dup_count}")
print(f"a2_dup_count={a2_dup_count}")
print(f"a3_dup_count={a3_dup_count}")
b1_dup_count = 0
for hash_str in b1_hash:
if hash_str in train_example_hash:
b1_dup_count += 1
b2_dup_count = 0
for hash_str in b2_hash:
if hash_str in train_example_hash:
b2_dup_count += 1
print(f"b1_dup_count={b1_dup_count}")
print(f"b2_dup_count={b2_dup_count}")
c1_dup_count = 0
for hash_str in c1_hash:
if hash_str in train_example_hash:
c1_dup_count += 1
c2_dup_count = 0
for hash_str in c2_hash:
if hash_str in train_example_hash:
c2_dup_count += 1
print(f"c1_dup_count={c1_dup_count}")
print(f"c2_dup_count={c2_dup_count}")
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-a1/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
for example in split_test_data["examples"]["test"]:
assert "yellow,square" in example["command"]
for example in ReaSCAN_data["examples"]["train"]:
assert "yellow,square" not in example["command"]
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-a2/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
for example in split_test_data["examples"]["test"]:
if "red,square" not in example["command"]:
# then, some background object referred in the command needs to be a red square!!
if example["derivation"] == "$OBJ_0":
assert example['situation']['placed_objects']['0']['object']['shape'] == "square"
assert example['situation']['placed_objects']['0']['object']['color'] == "red"
elif example["derivation"] == "$OBJ_0 ^ $OBJ_1":
assert example['situation']['placed_objects']['0']['object']['shape'] == "square" or example['situation']['placed_objects']['1']['object']['shape'] == "square"
assert example['situation']['placed_objects']['0']['object']['color'] == "red" or example['situation']['placed_objects']['1']['object']['color'] == "red"
elif example["derivation"] == "$OBJ_0 ^ $OBJ_1 & $OBJ_2":
assert example['situation']['placed_objects']['0']['object']['shape'] == "square" or example['situation']['placed_objects']['1']['object']['shape'] == "square" or example['situation']['placed_objects']['2']['object']['shape'] == "square"
assert example['situation']['placed_objects']['0']['object']['color'] == "red" or example['situation']['placed_objects']['1']['object']['color'] == "red" or example['situation']['placed_objects']['2']['object']['color'] == "red"
else:
pass
setcom, settar, setrel1, setrel2 = set([]), set([]), set([]), set([])
totalset = set([])
for example in split_test_data["examples"]["test"]:
totalset.add(example["command"])
if "red,square" in example["command"]:
setcom.add(example["command"])
elif "$OBJ_0" in example["derivation"] and (example['situation']['placed_objects']['0']['object']['shape'] == "square" and \
example['situation']['placed_objects']['0']['object']['color'] == "red"):
settar.add(example["command"])
elif "$OBJ_1" in example["derivation"] and (example['situation']['placed_objects']['1']['object']['shape'] == "square" and \
example['situation']['placed_objects']['1']['object']['color'] == "red"):
setrel1.add(example["command"])
elif "$OBJ_2" in example["derivation"] and (example['situation']['placed_objects']['2']['object']['shape'] == "square" and \
example['situation']['placed_objects']['2']['object']['color'] == "red"):
setrel2.add(example["command"])
else:
raise Exception("Not supposed to happen")
pass
print(f"{len(totalset)}, {len(setcom)}, {len(settar)}, {len(setrel1)}, {len(setrel2)}")
print(f"{len(totalset)}, {len(setcom | settar | setrel1 | setrel2)}")
assert len(totalset) == len(setcom | settar | setrel1 | setrel2)
setcom
settar
setrel1
setrel2
all_train_commands = set([])
for example in ReaSCAN_data["examples"]["train"]:
all_train_commands.add(example["command"])
assert "red,square" not in example["command"]
if example["derivation"] == "$OBJ_0":
assert not (example['situation']['placed_objects']['0']['object']['shape'] == "square" and \
example['situation']['placed_objects']['0']['object']['color'] == "red")
elif example["derivation"] == "$OBJ_0 ^ $OBJ_1":
assert not (example['situation']['placed_objects']['0']['object']['shape'] == "square" and \
example['situation']['placed_objects']['0']['object']['color'] == "red")
assert not (example['situation']['placed_objects']['1']['object']['shape'] == "square" and \
example['situation']['placed_objects']['1']['object']['color'] == "red")
elif example["derivation"] == "$OBJ_0 ^ $OBJ_1 & $OBJ_2":
assert not (example['situation']['placed_objects']['0']['object']['shape'] == "square" and \
example['situation']['placed_objects']['0']['object']['color'] == "red")
assert not (example['situation']['placed_objects']['1']['object']['shape'] == "square" and \
example['situation']['placed_objects']['1']['object']['color'] == "red")
assert not (example['situation']['placed_objects']['2']['object']['shape'] == "square" and \
example['situation']['placed_objects']['2']['object']['color'] == "red")
all_train_commands.intersection(totalset)
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-a3/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
for example in split_test_data["examples"]["test"]:
assert "small,cylinder" in example['command'] or \
"small,red,cylinder" in example['command'] or \
"small,blue,cylinder" in example['command'] or \
"small,yellow,cylinder" in example['command'] or \
"small,green,cylinder" in example['command']
for example in ReaSCAN_data["examples"]["train"]:
assert not ("small,cylinder" in example['command'] or \
"small,red,cylinder" in example['command'] or \
"small,blue,cylinder" in example['command'] or \
"small,yellow,cylinder" in example['command'] or \
"small,green,cylinder" in example['command'])
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-b1/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
# +
from collections import namedtuple, OrderedDict
seen_command_structs = {}
seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!
seen_object_co = set([])
seen_rel_co = set([])
for example_selected in ReaSCAN_data["examples"]["train"]:
rel_map = OrderedDict({})
for ele in example_selected["relation_map"]:
rel_map[tuple(ele[0])] = ele[1]
example_struct = OrderedDict({
'obj_pattern_map': example_selected["object_pattern_map"],
'rel_map': rel_map,
'obj_map': example_selected["object_expression"],
'grammer_pattern': example_selected['grammer_pattern'],
'adverb': example_selected['adverb_in_command'],
'verb': example_selected['verb_in_command']
})
obj_co = []
for k, v in example_selected["object_expression"].items():
if v not in seen_concepts:
seen_concepts[v] = 1
else:
seen_concepts[v] += 1
obj_co += [v]
obj_co.sort()
seen_object_co.add(tuple(obj_co))
rel_co = []
for k, v in rel_map.items():
if v not in seen_concepts:
seen_concepts[v] = 1
else:
seen_concepts[v] += 1
rel_co += [v]
rel_co.sort()
seen_rel_co.add(tuple(rel_co))
# -
seen_rel_co
# +
test_seen_command_structs = {}
test_seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!
test_seen_object_co = set([])
test_seen_rel_co = set([])
for example_selected in split_test_data["examples"]["test"]:
rel_map = OrderedDict({})
for ele in example_selected["relation_map"]:
rel_map[tuple(ele[0])] = ele[1]
example_struct = OrderedDict({
'obj_pattern_map': example_selected["object_pattern_map"],
'rel_map': rel_map,
'obj_map': example_selected["object_expression"],
'grammer_pattern': example_selected['grammer_pattern'],
'adverb': example_selected['adverb_in_command'],
'verb': example_selected['verb_in_command']
})
obj_co = []
for k, v in example_selected["object_expression"].items():
if v not in test_seen_concepts:
test_seen_concepts[v] = 1
else:
test_seen_concepts[v] += 1
obj_co += [v]
obj_co.sort()
test_seen_object_co.add(tuple(obj_co))
rel_co = []
for k, v in rel_map.items():
if v not in test_seen_concepts:
test_seen_concepts[v] = 1
else:
test_seen_concepts[v] += 1
rel_co += [v]
rel_co.sort()
test_seen_rel_co.add(tuple(rel_co))
# -
test_seen_rel_co
test_seen_rel_co.intersection(seen_rel_co)
assert len(test_seen_rel_co.intersection(seen_rel_co)) == len(test_seen_rel_co)
test_seen_object_co.intersection(seen_object_co)
test_seen_object_co
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-b2/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
# +
test_seen_command_structs = {}
test_seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!
test_seen_object_co = set([])
test_seen_rel_co = set([])
for example_selected in split_test_data["examples"]["test"]:
rel_map = OrderedDict({})
for ele in example_selected["relation_map"]:
rel_map[tuple(ele[0])] = ele[1]
example_struct = OrderedDict({
'obj_pattern_map': example_selected["object_pattern_map"],
'rel_map': rel_map,
'obj_map': example_selected["object_expression"],
'grammer_pattern': example_selected['grammer_pattern'],
'adverb': example_selected['adverb_in_command'],
'verb': example_selected['verb_in_command']
})
obj_co = []
for k, v in example_selected["object_expression"].items():
if v not in test_seen_concepts:
test_seen_concepts[v] = 1
else:
test_seen_concepts[v] += 1
obj_co += [v]
obj_co.sort()
test_seen_object_co.add(tuple(obj_co))
rel_co = []
for k, v in rel_map.items():
if v not in test_seen_concepts:
test_seen_concepts[v] = 1
else:
test_seen_concepts[v] += 1
rel_co += [v]
rel_co.sort()
test_seen_rel_co.add(tuple(rel_co))
# -
test_seen_rel_co
# +
seen_command_structs = {}
seen_concepts = {} # add in seen concepts, so we can select concepts that are seen, but new composites!
seen_object_co = set([])
seen_rel_co = set([])
for example_selected in ReaSCAN_data["examples"]["train"]:
rel_map = OrderedDict({})
for ele in example_selected["relation_map"]:
rel_map[tuple(ele[0])] = ele[1]
example_struct = OrderedDict({
'obj_pattern_map': example_selected["object_pattern_map"],
'rel_map': rel_map,
'obj_map': example_selected["object_expression"],
'grammer_pattern': example_selected['grammer_pattern'],
'adverb': example_selected['adverb_in_command'],
'verb': example_selected['verb_in_command']
})
obj_co = []
for k, v in example_selected["object_expression"].items():
if v not in seen_concepts:
seen_concepts[v] = 1
else:
seen_concepts[v] += 1
obj_co += [v]
obj_co.sort()
seen_object_co.add(tuple(obj_co))
rel_co = []
for k, v in rel_map.items():
if v not in seen_concepts:
seen_concepts[v] = 1
else:
seen_concepts[v] += 1
rel_co += [v]
rel_co.sort()
seen_rel_co.add(tuple(rel_co))
# -
test_seen_rel_co.intersection(seen_rel_co)
seen_rel_co
assert len(test_seen_object_co) == len(test_seen_object_co.intersection(seen_object_co))
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-c1/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
for example in split_test_data["examples"]["test"]:
assert example["derivation"] == "$OBJ_0 ^ $OBJ_1 & $OBJ_2 & $OBJ_3"
assert (example["command"].count("and")) == 2
split_test_path_to_data = f"../../ReaSCAN-v1.0/ReaSCAN-compositional-c2/data-compositional-splits.txt"
print(f"Reading dataset from file: {split_test_path_to_data}...")
split_test_data = json.load(open(split_test_path_to_data, "r"))
for example in split_test_data["examples"]["test"]:
assert example["derivation"] == "$OBJ_0 ^ $OBJ_1 ^ $OBJ_2"
assert (example["command"].count("that,is")) == 2
for example in ReaSCAN_data["examples"]["train"]:
assert "$OBJ_0 ^ $OBJ_1 ^ $OBJ_2" not in example["derivation"]
assert "$OBJ_0 ^ $OBJ_1 & $OBJ_2 & $OBJ_3" not in example["derivation"]
assert "$OBJ_0" == example["derivation"] or "$OBJ_0 ^ $OBJ_1" == example["derivation"] or "$OBJ_0 ^ $OBJ_1 & $OBJ_2" == example["derivation"]
| code/dataset/verify_splits_local.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# [Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)
# -
# # Widget List
import ipywidgets as widgets
# + [markdown] slideshow={"slide_type": "slide"}
# ## Numeric widgets
# -
# There are many widgets distributed with ipywidgets that are designed to display numeric values. Widgets exist for displaying integers and floats, both bounded and unbounded. The integer widgets share a similar naming scheme to their floating point counterparts. By replacing `Float` with `Int` in the widget name, you can find the Integer equivalent.
# ### IntSlider
# - The slider is displayed with a specified, initial `value`. Lower and upper bounds are defined by `min` and `max`, and the value can be incremented according to the `step` parameter.
# - The slider's label is defined by `description` parameter
# - The slider's `orientation` is either 'horizontal' (default) or 'vertical'
# - `readout` displays the current value of the slider next to it. The options are **True** (default) or **False**
# - `readout_format` specifies the format function used to represent slider value. The default is '.2f'
#
widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### FloatSlider
# -
widgets.FloatSlider(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
# An example of sliders **displayed vertically**.
widgets.FloatSlider(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='vertical',
readout=True,
readout_format='.1f',
)
# ### FloatLogSlider
# The `FloatLogSlider` has a log scale, which makes it easy to have a slider that covers a wide range of positive magnitudes. The `min` and `max` refer to the minimum and maximum exponents of the `base`, and the `value` refers to the actual value of the slider.
widgets.FloatLogSlider(
value=10,
base=10,
min=-10, # max exponent of base
max=10, # min exponent of base
step=0.2, # exponent step
description='Log Slider'
)
# ### IntRangeSlider
widgets.IntRangeSlider(
value=[5, 7],
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
)
# ### FloatRangeSlider
widgets.FloatRangeSlider(
value=[5, 7.5],
min=0,
max=10.0,
step=0.1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
# ### IntProgress
widgets.IntProgress(
value=7,
min=0,
max=10,
description='Loading:',
bar_style='', # 'success', 'info', 'warning', 'danger' or ''
style={'bar_color': 'maroon'},
orientation='horizontal'
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### FloatProgress
# -
widgets.FloatProgress(
value=7.5,
min=0,
max=10.0,
description='Loading:',
bar_style='info',
style={'bar_color': '#ffff00'},
orientation='horizontal'
)
# The numerical text boxes that impose some limit on the data (range, integer-only) impose that restriction when the user presses enter.
#
# ### BoundedIntText
widgets.BoundedIntText(
value=7,
min=0,
max=10,
step=1,
description='Text:',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### BoundedFloatText
# -
widgets.BoundedFloatText(
value=7.5,
min=0,
max=10.0,
step=0.1,
description='Text:',
disabled=False
)
# ### IntText
widgets.IntText(
value=7,
description='Any:',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### FloatText
# -
widgets.FloatText(
value=7.5,
description='Any:',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Boolean widgets
# -
# There are three widgets that are designed to display a boolean value.
# ### ToggleButton
widgets.ToggleButton(
value=False,
description='Click me',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Checkbox
# - `value` specifies the value of the checkbox
# - `indent` parameter places an indented checkbox, aligned with other controls. Options are **True** (default) or **False**
#
# -
widgets.Checkbox(
value=False,
description='Check me',
disabled=False,
indent=False
)
# ### Valid
#
# The valid widget provides a read-only indicator.
widgets.Valid(
value=False,
description='Valid!',
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Selection widgets
# -
# There are several widgets that can be used to display single selection lists, and two that can be used to select multiple values. All inherit from the same base class. You can specify the **enumeration of selectable options by passing a list** (options are either (label, value) pairs, or simply values for which the labels are derived by calling `str`).
#
# <div class="alert alert-info">
# Changes in *ipywidgets 8*:
#
# Selection widgets no longer accept a dictionary of options. Pass a list of key-value pairs instead.
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Dropdown
# -
widgets.Dropdown(
options=['1', '2', '3'],
value='2',
description='Number:',
disabled=False,
)
# The following is also valid, displaying the words `'One', 'Two', 'Three'` as the dropdown choices but returning the values `1, 2, 3`.
widgets.Dropdown(
options=[('One', 1), ('Two', 2), ('Three', 3)],
value=2,
description='Number:',
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### RadioButtons
# -
widgets.RadioButtons(
options=['pepperoni', 'pineapple', 'anchovies'],
# value='pineapple', # Defaults to 'pineapple'
# layout={'width': 'max-content'}, # If the items' names are long
description='Pizza topping:',
disabled=False
)
# #### With dynamic layout and very long labels
widgets.Box(
[
widgets.Label(value='Pizza topping with a very long label:'),
widgets.RadioButtons(
options=[
'pepperoni',
'pineapple',
'anchovies',
'and the long name that will fit fine and the long name that will fit fine and the long name that will fit fine '
],
layout={'width': 'max-content'}
)
]
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Select
# -
widgets.Select(
options=['Linux', 'Windows', 'OSX'],
value='OSX',
# rows=10,
description='OS:',
disabled=False
)
# ### SelectionSlider
widgets.SelectionSlider(
options=['scrambled', 'sunny side up', 'poached', 'over easy'],
value='sunny side up',
description='I like my eggs ...',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
# ### SelectionRangeSlider
#
# The value, index, and label keys are 2-tuples of the min and max values selected. The options must be nonempty.
import datetime
dates = [datetime.date(2015, i, 1) for i in range(1, 13)]
options = [(i.strftime('%b'), i) for i in dates]
widgets.SelectionRangeSlider(
options=options,
index=(0, 11),
description='Months (2015)',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### ToggleButtons
# -
widgets.ToggleButtons(
options=['Slow', 'Regular', 'Fast'],
description='Speed:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Description of slow', 'Description of regular', 'Description of fast'],
# icons=['check'] * 3
)
# ### SelectMultiple
# Multiple values can be selected with <kbd>shift</kbd> and/or <kbd>ctrl</kbd> (or <kbd>command</kbd>) pressed and mouse clicks or arrow keys.
widgets.SelectMultiple(
options=['Apples', 'Oranges', 'Pears'],
value=['Oranges'],
#rows=10,
description='Fruits',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## String widgets
# -
# There are several widgets that can be used to display a string value. The `Text`, `Textarea`, and `Combobox` widgets accept input. The `HTML` and `HTMLMath` widgets display a string as HTML (`HTMLMath` also renders math). The `Label` widget can be used to construct a custom control label.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Text
# -
widgets.Text(
value='Hello World',
placeholder='Type something',
description='String:',
disabled=False
)
# ### Textarea
widgets.Textarea(
value='Hello World',
placeholder='Type something',
description='String:',
disabled=False
)
# ### Combobox
widgets.Combobox(
# value='John',
placeholder='Choose Someone',
options=['Paul', 'John', 'George', 'Ringo'],
description='Combobox:',
ensure_option=True,
disabled=False
)
# ### Password
#
# The `Password` widget hides user input on the screen. **This widget is not a secure way to collect sensitive information because:**
#
# + The contents of the `Password` widget are transmitted unencrypted.
# + If the widget state is saved in the notebook the contents of the `Password` widget is stored as plain text.
widgets.Password(
value='password',
placeholder='Enter password',
description='Password:',
disabled=False
)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Label
#
# The `Label` widget is useful if you need to build a custom description next to a control using similar styling to the built-in control descriptions.
# -
widgets.HBox([widgets.Label(value="The $m$ in $E=mc^2$:"), widgets.FloatSlider()])
# ### HTML
widgets.HTML(
value="Hello <b>World</b>",
placeholder='Some HTML',
description='Some HTML',
)
# ### HTML Math
widgets.HTMLMath(
value=r"Some math and <i>HTML</i>: \(x^2\) and $$\frac{x+1}{x-1}$$",
placeholder='Some HTML',
description='Some HTML',
)
# ## Image
file = open("images/WidgetArch.png", "rb")
image = file.read()
widgets.Image(
value=image,
format='png',
width=300,
height=400,
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Button
# -
button = widgets.Button(
description='Click me',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
button
# The `icon` attribute can be used to define an icon; see the [fontawesome](https://fontawesome.com/icons) page for available icons.
# A callback function `foo` can be registered using `button.on_click(foo)`. The function `foo` will be called when the button is clicked with the button instance as its single argument.
# ## Output
#
# The `Output` widget can capture and display stdout, stderr and [rich output generated by IPython](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#module-IPython.display). For detailed documentation, see the [output widget examples](https://ipywidgets.readthedocs.io/en/latest/examples/Output Widget.html).
# ## Play (Animation) widget
# The `Play` widget is useful to perform animations by iterating on a sequence of integers with a certain speed. The value of the slider below is linked to the player.
play = widgets.Play(
value=50,
min=0,
max=100,
step=1,
interval=500,
description="Press play",
disabled=False
)
slider = widgets.IntSlider()
widgets.jslink((play, 'value'), (slider, 'value'))
widgets.HBox([play, slider])
# ## Date picker
#
# For a list of browsers that support the date picker widget, see the [MDN article for the HTML date input field](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/date#Browser_compatibility).
widgets.DatePicker(
description='Pick a Date',
disabled=False
)
# ## Time picker
#
# For a list of browsers that support the time picker widget, see the [MDN article for the HTML time input field](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/time#Browser_compatibility).
widgets.TimePicker(
description='Pick a Time',
disabled=False
)
# ## Datetime picker
#
# For a list of browsers that support the datetime picker widget, see the [MDN article for the HTML datetime-local input field](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/datetime-local#Browser_compatibility). For the browsers that do not support the datetime-local input, we try to fall back on displaying separate date and time inputs.
#
# ### Time zones
#
# There are two points worth to note with regards to timezones for datetimes:
# - The browser always picks datetimes using *its* timezone.
# - The kernel always gets the datetimes in the default system timezone of the kernel (see https://docs.python.org/3/library/datetime.html#datetime.datetime.astimezone with `None` as the argument).
#
# This means that if the kernel and browser have different timezones, the default string serialization of the timezones might differ, but they will still represent the same point in time.
widgets.DatetimePicker(
description='Pick a Time',
disabled=False
)
# ## Color picker
widgets.ColorPicker(
concise=False,
description='Pick a color',
value='blue',
disabled=False
)
# ## File Upload
#
# The `FileUpload` allows to upload any type of file(s) into memory in the kernel.
widgets.FileUpload(
accept='', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'
multiple=False # True to accept multiple files upload else False
)
# The upload widget exposes a `value` attribute that contains the files uploaded. The value attribute is a tuple with a dictionary for each uploaded file. For instance:
#
# ```python
# uploader = widgets.FileUpload()
# display(uploader)
#
# # upload something...
#
# # once a file is uploaded, use the `.value` attribute to retrieve the content:
# uploader.value
# #=> (
# #=> {
# #=> 'name': 'example.txt',
# #=> 'type': 'text/plain',
# #=> 'size': 36,
# #=> 'last_modified': datetime.datetime(2020, 1, 9, 15, 58, 43, 321000, tzinfo=datetime.timezone.utc),
# #=> 'content': <memory at 0x10c1b37c8>
# #=> },
# #=> )
# ```
#
# Entries in the dictionary can be accessed either as items, as one would any dictionary, or as attributes:
#
# ```
# uploaded_file = uploader.value[0]
# uploaded_file["size"]
# #=> 36
# uploaded_file.size
# #=> 36
# ```
#
# The contents of the file uploaded are in the value of the `content` key. They are a [memory view](https://docs.python.org/3/library/stdtypes.html#memory-views):
#
# ```python
# uploaded_file.content
# #=> <memory at 0x10c1b37c8>
# ```
#
# You can extract the content to bytes:
#
# ```python
# uploaded_file.content.tobytes()
# #=> b'This is the content of example.txt.\n'
# ```
#
# If the file is a text file, you can get the contents as a string by [decoding it](https://docs.python.org/3/library/codecs.html):
#
# ```python
# import codecs
# codecs.decode(uploaded_file.content, encoding="utf-8")
# #=> 'This is the content of example.txt.\n'
# ```
#
# You can save the uploaded file to the filesystem from the kernel:
#
# ```python
# with open("./saved-output.txt", "wb") as fp:
# fp.write(uploaded_file.content)
# ```
#
# To convert the uploaded file into a Pandas dataframe, you can use a [BytesIO object](https://docs.python.org/3/library/io.html#binary-i-o):
#
# ```python
# import io
# import pandas as pd
# pd.read_csv(io.BytesIO(uploaded_file.content))
# ```
#
# If the uploaded file is an image, you can visualize it with an [image](#Image) widget:
#
# ```python
# widgets.Image(value=uploaded_file.content.tobytes())
# ```
#
# <div class="alert alert-info">
# Changes in *ipywidgets 8*:
#
# The `FileUpload` changed significantly in ipywidgets 8:
#
# - The `.value` traitlet is now a list of dictionaries, rather than a dictionary mapping the uploaded name to the content. To retrieve the original form, use `{f["name"]: f.content.tobytes() for f in uploader.value}`.
# - The `.data` traitlet has been removed. To retrieve it, use `[f.content.tobytes() for f in uploader.value]`.
# - The `.metadata` traitlet has been removed. To retrieve it, use `[{k: v for k, v in f.items() if k != "content"} for f in w.value]`.
# </div>
#
# <div class="alert alert-warning">
# Warning: When using the `FileUpload` Widget, uploaded file content might be saved in the notebook if widget state is saved.
# </div>
# ## Controller
#
# The `Controller` allows a game controller to be used as an input device.
widgets.Controller(
index=0,
)
# ## Container/Layout widgets
#
# These widgets are used to hold other widgets, called children. Each has a `children` property that may be set either when the widget is created or later.
# ### Box
items = [widgets.Label(str(i)) for i in range(4)]
widgets.Box(items)
# ### HBox
items = [widgets.Label(str(i)) for i in range(4)]
widgets.HBox(items)
# ### VBox
items = [widgets.Label(str(i)) for i in range(4)]
left_box = widgets.VBox([items[0], items[1]])
right_box = widgets.VBox([items[2], items[3]])
widgets.HBox([left_box, right_box])
# ### GridBox
#
# This box uses the HTML Grid specification to lay out its children in two dimensional grid. The example below lays out the 8 items inside in 3 columns and as many rows as needed to accommodate the items.
items = [widgets.Label(str(i)) for i in range(8)]
widgets.GridBox(items, layout=widgets.Layout(grid_template_columns="repeat(3, 100px)"))
# ### Accordion
accordion = widgets.Accordion(children=[widgets.IntSlider(), widgets.Text()], titles=('Slider', 'Text'))
accordion
# ### Tabs
#
# In this example the children are set after the tab is created. Titles for the tabs are set in the same way they are for `Accordion`.
tab_contents = ['P0', 'P1', 'P2', 'P3', 'P4']
children = [widgets.Text(description=name) for name in tab_contents]
tab = widgets.Tab()
tab.children = children
tab.titles = [str(i) for i in range(len(children))]
tab
# ### Stacked
#
# The `Stacked` widget can have multiple children widgets as for `Tab` and `Accordion`, but only shows one at a time depending on the value of ``selected_index``:
button = widgets.Button(description='Click here')
slider = widgets.IntSlider()
stacked = widgets.Stacked([button, slider])
stacked # will show only the button
# This can be used in combination with another selection-based widget to show different widgets depending on the selection:
dropdown = widgets.Dropdown(options=['button', 'slider'])
widgets.jslink((dropdown, 'index'), (stacked, 'selected_index'))
widgets.VBox([dropdown, stacked])
# ### Accordion, Tab, and Stacked use `selected_index`, not value
#
# Unlike the rest of the widgets discussed earlier, the container widgets `Accordion` and `Tab` update their `selected_index` attribute when the user changes which accordion or tab is selected. That means that you can both see what the user is doing *and* programmatically set what the user sees by setting the value of `selected_index`.
#
# Setting `selected_index = None` closes all of the accordions or deselects all tabs.
# In the cells below try displaying or setting the `selected_index` of the `tab` and/or `accordion`.
tab.selected_index = 3
accordion.selected_index = None
# ### Nesting tabs and accordions
#
# Tabs and accordions can be nested as deeply as you want. If you have a few minutes, try nesting a few accordions or putting an accordion inside a tab or a tab inside an accordion.
#
# The example below makes a couple of tabs with an accordion children in one of them
tab_nest = widgets.Tab()
tab_nest.children = [accordion, accordion]
tab_nest.titles = ('An accordion', 'Copy of the accordion')
tab_nest
# + [markdown] nbsphinx="hidden"
# [Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)
| docs/source/examples/Widget List.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import keras
import sys
import os
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.layers import Bidirectional, TimeDistributed, GRU
from keras.layers import LSTM, Input, Reshape, Concatenate, Flatten,Convolution1D
from keras.layers import Conv1D, Conv2D, GlobalMaxPooling1D, MaxPooling1D, MaxPool2D
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.optimizers import Adam, SGD
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold, cross_val_score
import itertools
import pandas as pd
import spacy
import nltk
import re
from tqdm import tqdm
from os.path import expanduser, exists
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
# %matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
# pd.set_option('display.max_colwidth', 300) #widen pandas rows display
# +
import sys
# Add the ptdraft folder path to the sys.path list
sys.path.append('/home/paperspace/sonic/fakeNews/')
from data_util import get_fake_or_real_news, get_politifact, get_fact_fake
x_train, x_test, y_train, y_test, word_index, labels, num_classes, X, y = get_fake_or_real_news()
# x_train, x_test, y_train, y_test, word_index, labels, num_classes, X, y = get_politifact()
# x_train, x_test, y_train, y_test, word_index, labels, num_classes, X, y = get_fact_fake()
vocabulary_inv = dict((v, k) for k, v in word_index.items())
print('Found %s unique tokens.' % len(vocabulary_inv))
vocabulary_inv[0] = "<PAD/>"
# Data Preparation
print("Load data...")
# x_train, y_train, x_test, y_test, vocabulary_inv = load_data(data_source)
# x_train, y_train, x_test, y_test, vocabulary_inv = load_data_hansard(data_train)
if sequence_length != x_test.shape[1]:
print("Adjusting sequence length for actual size")
sequence_length = x_test.shape[1]
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)
print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))
# x_train[:10]
# +
def plot_history(history):
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def plot_confusion_matrix(model, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
y_pred = model.predict(x_test)
# print(y_pred[:10])
# print(y_test[:10])
class_labels = np.argmax(y_test, axis=1)
# print(class_labels[:10])
# print(y_pred.argmax(axis=1))
print(metrics.classification_report(class_labels, y_pred.argmax(axis=1),
target_names=data_train['label'].unique(), digits=3))
cm = confusion_matrix(class_labels,
y_pred.argmax(axis=1))
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
# plt.figure(figsize=(12,8))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# -
# # Emedding
# +
embeddings_index = {}
KERAS_DATASETS_DIR = expanduser('~/.keras/datasets/')
GLOVE_FILE = 'glove.840B.300d.txt'
print("Processing", GLOVE_FILE)
embeddings_index = {}
with open(KERAS_DATASETS_DIR + GLOVE_FILE, encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))
# -
def get_embedding_layer():
count = 0
embedding_matrix = np.random.uniform(-1,0,(len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
count += 1
print('Word embeddings: %d' % len(embeddings_index))
print('found number of tokens in embedding space: ', count)
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
# +
# # %%time
# from keras.layers import TimeDistributed
# from keras.layers import RepeatVector
# epochs=50
# batch_size=128
# # configure problem
# n_features = 50
# n_timesteps_in = 5
# n_timesteps_out = 2
# def encoder_decoder_baseline():
# # define model
# model = Sequential()
# model.add(get_embedding_layer())
# model.add(LSTM(150, dropout=0.2, recurrent_dropout=0.2))
# # model.add(LSTM(150, input_shape=(n_timesteps_in, n_features)))
# model.add(RepeatVector(n_timesteps_in))
# model.add(LSTM(150, return_sequences=True))
# model.add(TimeDistributed(Dense(MAX_SEQUENCE_LENGTH, activation='softmax')))
# model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# return model
# model = encoder_decoder_baseline()
# print(model.summary())
# network_hist = model.fit(x_train, y_train,
# batch_size=batch_size,
# validation_data=(x_test, y_test),
# epochs=epochs,
# verbose=1,
# validation_split=0.2)
# score = model.evaluate(x_test, y_test,
# batch_size=batch_size, verbose=1)
# print('Test loss:', score[0])
# print('Test accuracy:', score[1])
# print(score)
# plot_history(network_hist)
# # Plot normalized confusion matrix
# plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# Very hopefull... work on this
# https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py
epochs=50
batch_size=128
# train a 1D convnet with global maxpooling
def word_embedding_model():
print('building word embedding model')
embedding_layer = get_embedding_layer()
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(64, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(64, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(64, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(64, activation='relu')(x)
preds = Dense(num_classes, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
return model
model = word_embedding_model()
print(model.summary())
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
validation_data=(x_test, y_test),
epochs=epochs,
verbose=1,
validation_split=0.2)
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print(score)
print(model.metrics_names)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py
# 84% 85%
# epochs = 20
# batch_size = 64
def imbd_lstm():
print('Build LSTM model...')
model = Sequential()
model.add(get_embedding_layer())
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(num_classes, activation='sigmoid'))
# try using different optimizers and different optimizer configs
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy'])
return model
model = imbd_lstm()
print(model.summary())
print('Train...')
network_hist = model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test),
validation_split=0.2)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# https://richliao.github.io/supervised/classification/2016/12/26/textclassifier-RNN/
# 81%
# epochs=10
batch_size=128
def getRNN_classifier():
embedding_layer = get_embedding_layer()
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
l_lstm = Bidirectional(LSTM(128))(embedded_sequences)
preds = Dense(num_classes, activation='softmax')(l_lstm)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
return model
model = getRNN_classifier()
print("model fitting - Bidirectional LSTM")
model.summary()
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
validation_data=(x_test, y_test),
epochs=epochs,
verbose=1,
validation_split=0.2)
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# https://github.com/Theo-/sentiment-analysis-keras-conv/blob/master/train_keras.py
def sentiment_keras():
model = Sequential()
embedding_layer = get_embedding_layer()
model.add(embedding_layer)
# Convolutional model (3x conv, flatten, 2x dense)
model.add(Convolution1D(64, 3, padding='same'))
model.add(Convolution1D(32, 3, padding='same'))
model.add(Convolution1D(16, 3, padding='same'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(180, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
return model
model = sentiment_keras()
print(model.summary())
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
validation_data=(x_test, y_test),
epochs=epochs, verbose=1,
validation_split=0.2)
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# https://github.com/keras-team/keras/blob/master/examples/imdb_cnn.py
# 81% 82%
# set parameters:
batch_size = 256
filters = 250
kernel_size = 3
hidden_dims = 250
# epochs = 10
def imbd_cnn():
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
# model.add(get_embedding_layer())
model.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# try using different optimizers and different optimizer configs
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy'])
return model
model = imbd_cnn()
print(model.summary())
print('Training...')
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs, validation_split=0.2,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# https://github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
# 85% - 82% -85% 86%
# Convolution
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 128
# epochs = 20
def LSTMModel():
print('Build model...')
model = Sequential()
model.add(get_embedding_layer())
model.add(Dropout(0.2))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy'])
return model
model = LSTMModel()
print(model.summary())
print('Train...')
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs, validation_split=0.2,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# https://github.com/bhaveshoswal/CNN-text-classification-keras/blob/master/model.py
# 84% - %83 - 83%
vocabulary_size = len(word_index)
# embedding_dim = 100
filter_sizes = [3,4,5]
num_filters = 512
drop = 0.5
# epochs = 20
# batch_size = 10
def CNNTextClassification():
# this returns a tensor
print("Creating Model...")
inputs = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedding_layer = get_embedding_layer()
embedding = embedding_layer(inputs)
# Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)
reshape = Reshape((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, 1))(embedding)
conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal', activation='relu')(reshape)
conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal', activation='relu')(reshape)
conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal', activation='relu')(reshape)
maxpool_0 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[0] + 1, 1),
strides=(1,1), padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[1] + 1, 1),
strides=(1,1), padding='valid')(conv_1)
maxpool_2 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[2] + 1, 1),
strides=(1,1), padding='valid')(conv_2)
concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(units=num_classes, activation='softmax')(dropout)
# this creates a model that includes
model = Model(inputs=inputs, outputs=output)
checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5',
monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
model = CNNTextClassification()
print(model.summary())
print("Traning Model...")
network_hist = model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs, verbose=1,
validation_data=(x_test, y_test)) # starts training
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# Keras convolutional model
# 80% 85% 85%
# batch_size = 10
epochs = 30
def keras_conv():
model = Sequential()
model.add(get_embedding_layer())
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Dropout(0.25))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=2, activation='elu', padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dense(256, activation='tanh'))
model.add(Dense(256, activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(units=num_classes, activation='softmax'))
adam = Adam(lr=0.0001, decay=1e-6)
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
return model
mdoel = keras_conv()
print(model.summary())
print('Training...')
# Fit the model
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
shuffle=True,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[EarlyStopping(min_delta=0.00025, patience=2)])
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# %%time
# simple
# epochs = 20
# 84%, 82%, 83%
def simple_model():
# define model
print('simpel model')
model = Sequential()
model.add(get_embedding_layer())
model.add(Flatten())
model.add(Dense(num_classes, activation='sigmoid'))
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(optimizer=adam,
loss='categorical_crossentropy', metrics=['acc'])
return model
model = simple_model()
print(model.summary())
print('Training...')
# Fit the model
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
shuffle=True,
epochs=epochs,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
# https://richliao.github.io/supervised/classification/2016/12/26/textclassifier-RNN/
# 83% 83% 86%
epochs = 20
def RNN_model():
embedding_layer = get_embedding_layer()
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
l_lstm = Bidirectional(LSTM(100))(embedded_sequences)
preds = Dense(num_classes, activation='softmax')(l_lstm)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
return model
model = RNN_model()
print("model fitting - Bidirectional LSTM")
print(model.summary())
network_hist = model.fit(x_train, y_train,
batch_size=batch_size,
shuffle=True,
epochs=epochs,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
plot_history(network_hist)
# Plot normalized confusion matrix
plot_confusion_matrix(model, classes=labels, title='Confusion matrix')
# +
def create_model():
# create model
embedding_layer = get_embedding_layer()
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
model = Sequential()
model.add(embedding_layer)
model.add(Dense(128, input_dim=MAX_SEQUENCE_LENGTH, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Flatten())
model.add(Dense(num_classes, activation='sigmoid'))
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# print(model.summary())
return model
# +
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
def GRUModel():
print('Build model...')
model = Sequential()
model.add(get_embedding_layer())
model.add(Dropout(0.2))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(GRU(lstm_output_size))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy'])
return model
model = GRUModel()
print(model.summary())
# +
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.metrics import classification_report, f1_score, precision_score, recall_score
import itertools
from tqdm import tqdm
from sklearn.metrics import confusion_matrix
def test(model, data):
x_test, y_test = data
y_pred = model.predict(x_test, batch_size=30)
print('-' * 50)
# print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])
# print(y_pred)
y_pred = model.predict_classes(x_test)
# print(y_pred)
# print(np.argmax(y_pred, 1))
# print(classification_report(np.argmax(y_test, axis=1),
# y_pred, target_names=target_names))
# result = model.evaluate(x_test, y_test, batch_size=batch_size)
# print( confusion_matrix(y_test, y_pred, 1) )
f1 = f1_score(np.argmax(y_test, 1), y_pred, average='macro')
percision = precision_score(np.argmax(y_test, 1), y_pred, average='macro')
recall = recall_score(np.argmax(y_test, 1), y_pred, average='macro')
return f1, percision, recall
# +
# %%time
epochs = 10
batch_size=64
print(X.shape)
print(Y.shape)
# evaluate using 10-fold cross validation
skf = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
models = {
# 'not_learning' : not_learning() ,
# 'create_model' : create_model(),
# 'getRNN_classifier' : getRNN_classifier(),
# 'simple_model' : simple_model(),
# 'ker/as_conv' : keras_conv(),
# 'RNN_model' : RNN_model(),
# 'CNNTextClassification' : CNNTextClassification(),
# 'LSTMModel' : LSTMModel(),
# 'word_embedding_model' : word_embedding_model(),
# 'sentiment_keras' : sentiment_keras(),
# 'imbd_lstm' : imbd_lstm(),
# 'GRUModel' : GRUModel(),
# 'imbd_cnn' : imbd_cnn()
}
# def getModel(name):
# if(not_learning):
# return not_learning()
# else if('create_model' ):
# return create_model()
# else if('getRNN_classifier' ):
# return getRNN_classifier()
# else if('simple_model'):
# return simple_model()
# else if('keras_conv' ):
# return keras_conv()
# else if('RNN_model' ):
# return RNN_model(),
# else if('CNNTextClassification' ):
# # 'CNNTextClassification' : CNNTextClassification(),
# else if('LSTMModel'):
# return LSTMModel()
# else if('word_embedding_model' ):
# return word_embedding_model(),
# else if('sentiment_keras' ):
# return sentiment_keras(),
# else if('imbd_lstm' ):
# return imbd_lstm(),
# else if('GRUModel' ):
# 'GRUModel' : GRUModel(),
# else if('create_model' ):
# # 'imbd_cnn' : imbd_cnn()
score = []
for train_index, test_index in tqdm(skf.split(X, y)):
# print("TRAIN:", train_index, "TEST:", test_index)
x_train, x_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
model = GRUModel()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs, verbose=0,
validation_data=(x_test, y_test))
# result = model.evaluate(x_test, y_test, batch_size=batch_size)
f1, percision, recall = test(model=model, data=(x_test, y_test))
score.append({'f1': f1, 'percision' : percision, 'recall' : recall})
print('F1: ', f1, ' percision: ', percision, ' recall: ', recall)
# print(score_temp)
# score.append([name, score_temp])
# print('average percision over kfolds:', np.average([x['percision'] for x in score_temp]))
print('average f1 over kfolds:', np.average([x['f1'] for x in score]))
print('average percision over kfolds:', np.average([x['percision'] for x in score]))
print('average recall over kfolds:', np.average([x['recall'] for x in score]))
# print(score)
df_result = pd.DataFrame(score)
df_result.head()
df_result.to_csv('data/NNResults.csv')
# +
df_result.plot()
df_result
# -
print('average score over kfolds:', np.mean([x[1] for x in score]))
| NNExperiments_FakeNews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Can you Beat the Bot?
#
#
#
# This notebook pits you, the human (?), against a reward-hacking and meta-learning machine agent.
# +
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
import os
from random import shuffle
import numpy as np
import torch
import time
from carle.env import CARLE
from carle.mcl import CornerBonus, SpeedDetector, PufferDetector, AE2D, RND2D
from game_of_carle.agents.harli import HARLI
from game_of_carle.agents.carla import CARLA
from game_of_carle.agents.grnn import ConvGRNN
from game_of_carle.agents.toggle import Toggle
import bokeh
import bokeh.io as bio
from bokeh.io import output_notebook, show, curdoc
from bokeh.plotting import figure
from bokeh.layouts import column, row
from bokeh.models import TextInput, Button, Paragraph
from bokeh.models import ColumnDataSource
from bokeh.events import DoubleTap, Tap
import matplotlib.pyplot as plt
my_cmap = plt.get_cmap("viridis")
output_notebook()
# +
device_string = "cpu"
obs_dim = 32
act_dim = 6
agent = HARLI(device=device_string, obs_dim=obs_dim, act_dim=act_dim)
policy_list = []
directory_list = os.listdir("../policies/")
for filename in directory_list:
if "HARLI" in filename and "glider" in filename:
policy_list.append(os.path.join("..", "policies", filename))
policy_list.sort()
# instantiate CARLE with a speed detection wrapper
env = CARLE(height=obs_dim, width=obs_dim, action_height=act_dim, action_width=act_dim, device=device_string)
env = SpeedDetector(env)
# set rules
# this agent was trained on B3/S023, B3/S236, B3/S237, and B3/S238
my_rules = "B3678/S34678"
agent.set_params(np.load(policy_list[0]))
print(f"{len(policy_list)} HARLI mobility policies found.")
# +
def modify_doc(doc):
def agent_off():
global agent_on
global rule_index
global my_period
my_period = 512
agent_on = False
rule_index = 0
reset_next_ruleset()
button_go.label == "Run >"
doc.add_root(control_layout)
p.title = "Human's Turn"
def summary_screen():
global p_bar
if button_go.label != "Run >":
doc.remove_periodic_callback(doc.session_callbacks[0])
button_go.label = "Run >"
my_width = 0.25
x = [0, 2, 4 ]
x2 = [elem+my_width*2 for elem in x]
harli_top = [elem[1] / max([100, elem[2]]) for elem in harli_scores]
human_top = [elem[1] / max([100, elem[2]]) for elem in human_scores]
p_bar = figure(plot_width=3*256, plot_height=3*256, title="BAR PLOT!")
p_bar.vbar(x, width=0.5, top=harli_top, color=[[elem * 255 for elem in my_cmap(.35)]]*3, legend_label="BOT")
p_bar.vbar(x2, width=0.5, top=human_top,color=[[elem * 255 for elem in my_cmap(.75)]]*3, legend_label="HMN ")
p_bar.vbar([6], width=0, top=0.0)
doc.add_root(p_bar)
doc.add_root(message1)
doc.add_root(message2)
doc.add_root(message3)
doc.add_root(message4)
doc.add_root(message5)
doc.add_root(message6)
human_average = np.mean([elem[1] / max([100, elem[2]]) for elem in human_scores])
harli_average = np.mean([elem[1] / max([100, elem[2]]) for elem in harli_scores])
message1.text = "**************************************************** REWARDS per step ****************************************************"
message2.text = "__rules___________________________________________human_____________________________________HARLI__"
message3.text = f"{harli_scores[0][0]}_____________________________________{human_scores[0][1] / max([100, human_scores[0][2]]):.4f}"\
f"_____________________________________{harli_scores[0][1] / max([100, harli_scores[0][2]]):.4f}"
message4.text = f"{harli_scores[1][0]}_____________________________________{human_scores[1][1] / max([100, human_scores[1][2]]):.4f}"\
f"_____________________________________{harli_scores[1][1] / max([100, harli_scores[1][2]]):.4f}"
message5.text = f"{harli_scores[2][0]}_____________________________________{human_scores[2][1] / max([100, human_scores[2][2]]):.4f}"\
f"_____________________________________{harli_scores[2][1] / max([100, harli_scores[2][2]]):.4f}"
message6.text = f"Average__________________________________________{human_average:.4f}_____________________________________{harli_average:.4f}"
doc.remove_root(display_layout)
doc.remove_root(button_go)
doc.remove_root(control_layout)
doc.remove_root(rule_layout)
doc.remove_root(message_layout)
doc.add_root(button_start_over)
def update():
global obs
global stretch_pixel
global action
global agent_on
global my_step
global max_steps
global rule_index
global rewards
global reward_sum
if agent_on and rule_index == len(rules) and my_step >= max_steps:
harli_scores.append((rules[rule_index-1], reward_sum, my_step))
agent_off()
elif not(agent_on) and rule_index == len(rules) and my_step >= max_steps:
human_scores.append((rules[rule_index-1], reward_sum, my_step))
summary_screen()
elif agent_on and my_step >= max_steps:
harli_scores.append((rules[rule_index-1], reward_sum, my_step))
reset_next_ruleset()
elif not(agent_on) and my_step >= max_steps:
human_scores.append((rules[rule_index-1], reward_sum, my_step))
reset_next_ruleset()
else:
obs, r, d, i = env.step(action)
rewards = np.append(rewards, r.cpu().numpy().item())
if agent_on:
action = agent(obs)
else:
action = torch.zeros_like(action)
padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze()
my_img = (padded_action*2 + obs.squeeze()).cpu().numpy()
my_img[my_img > 3.0] = 3.0
(padded_action*2 + obs.squeeze()).cpu().numpy()
new_data = dict(my_image=[my_img])
my_weights = agent.get_weights().reshape(dim_wh, dim_ww)
new_weights = dict(my_image=[my_weights])
#new_line = dict(x=np.arange(my_step+2), y=rewards)
new_line = dict(x=[my_step], y=[r.cpu().numpy().item()])
source.stream(new_data, rollover=1)
source_plot.stream(new_line, rollover=2000)
source_weights.stream(new_weights, rollover=1)
my_step += 1
reward_sum += r.item()
turn_msg = "Bot's turn: " if agent_on else "Human's turn: "
message.text = f"{turn_msg}step {my_step}, reward: {r.item():.4f}, mean reward per step: {(reward_sum/my_step):.4f} \n"\
f" rule index = {rule_index}"
def go():
if button_go.label == "Run >":
my_callback = doc.add_periodic_callback(update, my_period)
button_go.label = "Pause"
#doc.remove_periodic_callback(my_callback)
else:
doc.remove_periodic_callback(doc.session_callbacks[0])
button_go.label = "Run >"
doc.remove_root(button_start)
doc.remove_root(message1)
doc.remove_root(message2)
doc.remove_root(message3)
doc.remove_root(message4)
doc.remove_root(message5)
doc.add_root(control_layout)
def reset_next_ruleset():
global obs
global action
global stretch_pixel
global my_step
global rewards
global use_spaceship
global rewards
global reward_sum
global rule_index
global human_scores
global harli_scores
reward_sum = 0.0
my_step = 0
new_line = dict(x=[my_step], y=[0])
obs = env.reset()
stretch_pixel = torch.zeros_like(obs).squeeze()
stretch_pixel[0,0] = 3
agent.reset()
if agent_on:
action = agent(obs)
else:
action = torch.zeros_like(action)
padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze()
my_img = (padded_action*2 + obs.squeeze()).cpu().numpy()
my_img[my_img > 3.0] = 3.0
(padded_action*2 + obs.squeeze()).cpu().numpy()
new_data = dict(my_image=[my_img])
my_weights = agent.get_weights().reshape(dim_wh, dim_ww)
new_weights = dict(my_image=[my_weights])
source.stream(new_data, rollover=1)
source_plot.stream(new_line, rollover=2)
source_weights.stream(new_weights, rollover=1)
message.text = f"step {my_step} \n"\
f"{policy_list[0]}"
rewards = np.array([0])
source_plot.stream(new_line, rollover=1)
source.stream(new_data, rollover=8)
env.rules_from_string(rules[rule_index])
rule_index += 1
if (button_go.label == "Pause") and not(agent_on) and rule_index != 0:
go()
def start():
global my_period
global my_step
global rule_index
global max_steps
global human_scores
global harli_scores
global rules
global p_bar
global agent_on
rule_index = 0
max_steps = 128
agent_on = True
harli_scores = []
human_scores = []
rules = ["B3___/S23___", \
"B368_/S245__", \
"B3678/S34678"]
shuffle(rules)
p_bar = figure(plot_width=3*256, plot_height=3*256, title="BAR PLOT!")
doc.remove_root(message1)
doc.remove_root(message2)
doc.remove_root(message3)
doc.remove_root(message4)
doc.remove_root(message5)
doc.remove_root(message6)
doc.remove_root(p_bar)
doc.add_root(display_layout)
doc.remove_root(button_start_over)
#doc.add_root(rule_layout)
doc.add_root(message_layout)
doc.add_root(message1)
doc.add_root(message2)
doc.add_root(message3)
doc.add_root(message4)
doc.add_root(message5)
reset_next_ruleset()
message.text = ""
message1.text = f"Can you beat the bot?"
message2.text = f" You are tasked with maximizing the mean reward displayed in the top right by clicking cells in the {act_dim} by {act_dim} 'action space' in the center of the grid on the left."
message3.text = f" The grid universe will cycle through {len(rules)} different shuffled rulesets, your only guide is the reward signal!"
message4.text = f" Your adversary has never trained on these rulesets either, and starts from a random initialization of weights."
message5.text = f" Click 'Go!' to get started, HARLI will go first. ___ (I'm sorry this text is small)"
doc.add_root(button_start)
p.title = "Bot's Turn"
def faster():
global my_period
my_period = max([my_period * 0.5, 32])
go()
go()
def slower():
global my_period
my_period = min([my_period * 2, 8192])
go()
go()
def human_toggle(event):
global action
if not(agent_on):
coords = [np.round(env.height*event.y/256-0.5), np.round(env.width*event.x/256-0.5)]
offset_x = (env.height - env.action_height) / 2
offset_y = (env.width - env.action_width) / 2
coords[0] = coords[0] - offset_x
coords[1] = coords[1] - offset_y
coords[0] = np.uint8(np.clip(coords[0], 0, env.action_height-1))
coords[1] = np.uint8(np.clip(coords[1], 0, env.action_height-1))
action[:, :, coords[0], coords[1]] = 1.0 * (not(action[:, :, coords[0], coords[1]]))
#padded_action = stretch_pixel/2 + env.action_padding(action).squeeze()
padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze()
my_img = (padded_action*2 + obs.squeeze()).cpu().numpy()
my_img[my_img > 3.0] = 3.0
(padded_action*2 + obs.squeeze()).cpu().numpy()
new_data = dict(my_image=[my_img])
source.stream(new_data, rollover=8)
global obs
global my_period
global agent_on
global action
global reward_sum
global max_steps
global rule_index
global human_scores
global harli_scores
dim_wh = 24
dim_ww = 24
obs = env.reset()
my_weights = agent.get_weights().reshape(dim_wh, dim_ww)
p = figure(plot_width=3*256, plot_height=3*256, title="CA Universe")
p_plot = figure(plot_width=int(1.25*256), plot_height=int(1.25*256), title="'Reward'")
p_weights = figure(plot_width=int(1.255*256), plot_height=int(1.25*256), title="Weights")
p_bar = figure(plot_width=3*256, plot_height=3*256, title="BAR PLOT!")
reward_sum = 0.0
my_period = 32
agent_on = True
action = torch.zeros(1, 1, env.action_height, env.action_width)
source = ColumnDataSource(data=dict(my_image=[obs.squeeze().cpu().numpy()]))
source_plot = ColumnDataSource(data=dict(x=np.arange(1), y=np.arange(1)*0))
source_weights = ColumnDataSource(data=dict(my_image=[my_weights]))
img = p.image(image='my_image',x=0, y=0, dw=256, dh=256, palette="Magma256", source=source)
line_plot = p_plot.line(line_width=3, color="firebrick", source=source_plot)
img_w = p_weights.image(image='my_image',x=0, y=0, dw=240, dh=240, palette="Magma256", source=source_weights)
button_go = Button(sizing_mode="stretch_width", label="Run >")
button_slower = Button(sizing_mode="stretch_width",label="<< Slower")
button_faster = Button(sizing_mode="stretch_width",label="Faster >>")
button_next_ruleset = Button(sizing_mode="stretch_width", label="Next ruleset")
button_start = Button(sizing_mode="stretch_width", label="Go!")
button_start_over = Button(sizing_mode="stretch_width", label="Go again!")
message = Paragraph(default_size=900)
message1 = Paragraph(default_size=900)
message2 = Paragraph(default_size=900)
message3 = Paragraph(default_size=900)
message4 = Paragraph(default_size=900)
message5 = Paragraph(default_size=900)
message6 = Paragraph(default_size=900)
p.on_event(Tap, human_toggle)
button_start_over.on_click(start)
button_go.on_click(go)
button_start.on_click(go)
button_faster.on_click(faster)
button_slower.on_click(slower)
button_next_ruleset.on_click(reset_next_ruleset)
control_layout = row(button_slower, button_go, button_faster)
rule_layout = row(button_next_ruleset)
display_layout = row(p, column(p_plot, p_weights))
message_layout = row(message)
start()
show(modify_doc)
| notebooks/human_vs_harli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Crop single cell images in the HPA dataset
#
# This notebook shows an approach to crop images to obtain single-cell level images from the Human Protein Atlas (HPA)-Single Cell Classification competition. Functions and other ideas were used from other Kaggle notebooks listed below.
#
# ## 1 Transform images in the training set to single-cell images
# For a single image and using the cell segmentations mask from CellSegmentator we can generate a RGB image for each cell in the image as follows:
#
# 1. Segment image using CellSegmentator.
# 2. Add bounding boxes using cell mask from the first step.
# 3. Crop using the bounding boxes.
#
# Repeating this process for all images generates a new dataset containing single-cell level images for each original image. Additionally we should generate a new .csv file containing the new single-cell identifiers and their labels.
#
# ## 2 General comments
# In this notebook only the first 10 images in the train set were used to demonstrate how the cropping process works. However the train set is so large it will require more computing power to complete the whole task in a reasonable time. Some image quality evaluation could be implemented, for example removing images that are too small or that only contain a small area of a single cell could be removed. Maybe this is not the most efficient way to complete this task, so any suggestions or comments are always welcome.
#
#
# Reference Notebooks:
#
# (1) [HPA_segmentation_and_BBOXES](https://www.kaggle.com/philipjamessullivan/hpa-segmentation-and-bboxes) by philipjamessullivan and acqua.
#
# (2) [Crop images using bounding box](https://www.kaggle.com/whizzkid/crop-images-using-bounding-box) by <NAME>.
#
# (3) [pretrained ResNet34 with RGBY (0.460 public LB)](https://www.kaggle.com/iafoss/pretrained-resnet34-with-rgby-0-460-public-lb) by Iafoss.
#
#
#
# +
# libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import warnings
import os,gc,cv2
import shutil
import random
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
# %matplotlib inline
warnings.filterwarnings('ignore')
# HPA single-cell image segmentation
# !pip install https://github.com/CellProfiling/HPA-Cell-Segmentation/archive/master.zip
import hpacellseg.cellsegmentator as cellsegmentator
from hpacellseg.utils import label_cell, label_nuclei
# -
# directories
DIR = '../input/hpa-single-cell-image-classification/'
os.listdir(DIR)
# csv files:
CSV_PATH=os.path.join(DIR,'train.csv')
# image files:
IMG_FOLDER_PATH=os.path.join(DIR, 'train/')
# create new directories in the output directories for masks and cropped images
CROPS = './sc_cropped_img_HPA/'
if not os.path.exists(CROPS):
os.makedirs(CROPS)
# directory to save dataframe containing new single cell image identifiers and labels
NEW_DF='./train_sc_df.csv'
# read id/label csv to array
id_labels_array=pd.read_csv(CSV_PATH)
# fix labels (convert to arrays)
id_labels_array["Label"]=id_labels_array["Label"].apply(lambda x:list(map(int, x.split("|"))))
# create list of all ids from the id_labels_array
ids_images=(id_labels_array["ID"]).tolist()
# create dictionary of all unique labels from the id_labels_array
labels=id_labels_array.set_index('ID').T.to_dict('list')
# fix the dictionary format (2d arrays to 1D arrays)
labels = {num: labels[0] for num, labels in labels.items()}
# +
# Define CellSegmentator class
# [source: https://github.com/CellProfiling/HPA-Cell-Segmentation]
#-----------------------------------------------------------
# [1] path to the nuclei model weights:
NUC_MODEL = './nuclei-model.pth'
#-----------------------------------------------------------
# [2] path to the cell model weights:
CELL_MODEL = './cell-model.pth'
#-----------------------------------------------------------
# [3] scale_factor: determines how much the images should be
# scaled before being fed to the models. For HPA Cell images,
# a value of 0.25 (default) is good.
#-----------------------------------------------------------
# [4] device: Inform Torch which device to put the model on.
#Valid values are ‘cpu’ or ‘cuda’ or pointed cuda device
# like 'cuda:0’. Defaults to cuda.
#-----------------------------------------------------------
# [5] padding: If True, add some padding before feeding the
# images to the neural networks. This is not required but
# can make segmentations, especially cell segmentations,
# more accurate. Defaults to False. Note: If you have issues
# running the segmentation due to image dimensions, setting
# padding to True may help.
#-----------------------------------------------------------
# [6] multi_channel_model: If True, use the pretrained
# three-channel version of the model. Having this set to
# True gives you better cell segmentations but requires
# you to give the model endoplasmic reticulum images as
# part of the cell segmentation. Otherwise, the version
# trained with only two channels, microtubules and nuclei,
# will be used. Defaults to True
#-----------------------------------------------------------
segmentator = cellsegmentator.CellSegmentator(
NUC_MODEL,
CELL_MODEL,
scale_factor=0.25,
device="cpu",
padding=True,
multi_channel_model=True,
)
# +
# functions:
def open_rgby(img_id, img_dir):
'''
Reads individual images
of different filters (R, G, B, Y)
and stack them.
---------------------------------
Arguments:
img_id -- image file identifier
img_dir -- the path containing all image files
Returns:
stacked (RGBY) image '''
colors=['red','green','blue','yellow']
flags=cv2.IMREAD_UNCHANGED
img=[cv2.imread(os.path.join(img_dir, img_id+'_'+color+'.png'), flags).astype(np.float32)/255 for color in colors]
return np.stack(img, axis=-1)
def generate_single_mask(img_id, img_dir):
'''
Computes cell mask for a single image
using HPA CellSegmentator tool
---------------------------------
Arguments:
img_id -- image file identifier
img_dir -- the path containing all image files
Returns:
cell_mask as ndarray'''
path=os.path.join( img_dir + img_id )
ch_r = Image.open(path+"_red.png")
ch_y = Image.open(path+"_yellow.png")
ch_b = Image.open(path+"_blue.png")
nuc_segmentations = segmentator.pred_nuclei([np.asarray(ch_b)])
cell_segmentations = segmentator.pred_cells([
[np.asarray(ch_r)],
[np.asarray(ch_y)],
[np.asarray(ch_b)] ])
cell_nuclei_mask, cell_mask = label_cell(nuc_segmentations[0], cell_segmentations[0])
cell_mask = np.uint8(cell_mask)
return cell_mask
def bbox(mask,cell_id):
'''
Finds the bounding box for each
single cell in an image
---------------------------------
Arguments:
mask -- cell mask for single image as ndarray
cell_id -- cell identifier (integer) from HPA CellSegmentator
Returns:
bounding-box coordinates as list: xmin, ymin, xmax, ymax'''
a = np.where(mask == cell_id)
xmin, ymin, xmax, ymax = np.min(a[1]), np.min(a[0]), np.max(a[1]), np.max(a[0])
return xmin, ymin, xmax, ymax
def crop_sc_imgs(imgid_list, dtrain, img_dir, out_dir, csv_dir):
'''
Crops images to obtain
single-cell images
---------------------------------
Arguments:
imgid_list -- image file identifier
dtrain -- train set dataframe containing image indentifiers and labels
img_dir -- the path containing all image files
out_dir -- path for saving single-cell cropped images
csv_dir -- path to save new train set dataframe
Returns:
stacked (RGBY) images for cells in each image
new dataframe containing cell image identifiers and their labels
'''
new_ids=[]
new_labels=[]
for imgid in tqdm(imgid_list):
img=open_rgby(img_id=imgid, img_dir=img_dir)
# read image level multi-labels
df_img=dtrain[ dtrain["ID"]==imgid ]
cell_label=df_img["Label"].iloc[0]
# image cell mask
cell_mask=generate_single_mask(img_id=imgid, img_dir=img_dir)
cell_mask_flattened=np.ravel(cell_mask)
cell_ids=set(cell_mask_flattened); cell_ids.remove(0)
for cell_id in cell_ids:
cell_img_id=imgid+'_sc_'+str(cell_id)
new_ids.append(cell_img_id)
new_labels.append(cell_label)
xmin, ymin, xmax, ymax = bbox(cell_mask,cell_id)
new_img=img[ymin:ymax, xmin:xmax]
new_img=cv2.convertScaleAbs(new_img, alpha=(255.0))
cv2.imwrite( os.path.join(out_dir+ cell_img_id +'.png'), new_img )
new_df=pd.DataFrame({'ID': new_ids, 'Label': new_labels })
new_df.to_csv(csv_dir, index=False)
# -
# reads train set dataframe again
train_df=pd.read_csv(CSV_PATH)
# select only 10 images and use the function
images=ids_images[0:10]
crop_sc_imgs(imgid_list=images, dtrain=train_df, img_dir=IMG_FOLDER_PATH, out_dir=CROPS, csv_dir=NEW_DF)
# checks old and new dataframes
new_train_df=pd.read_csv(NEW_DF)
print( train_df.head(), '\n' )
print( new_train_df.head(), '\n' )
# plot a randomly selected single-cell cropped image
sc_imgs=glob.glob( os.path.join(CROPS + '*.png') )
random_img=random.sample(sc_imgs, 1)[0]
ex_sc_img=cv2.imread(random_img)
plt.imshow(ex_sc_img)
plt.axis('off')
plt.title('single-cell image example')
plt.show()
| Kaggle_notebooks/hpa-challenge-2021_generate-crop-sc-imgs-hpa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="pJ-w7K4eu6SK"
# # Logistic regression
#
# In this exercise you will train a logistic regression model via gradient descent in two simple scenarios.
#
# The general setup is as follows:
# * we are given a set of pairs $(x, y)$, where $x \in R^D$ is a vector of real numbers representing the features, and $y \in \{0,1\}$ is the target,
# * for a given $x$ we model the probability of $y=1$ by $h(x):=g(w^Tx)$, where $g$ is the sigmoid function: $g(z) = \frac{1}{1+e^{-z}}$,
# * to find the right $w$ we will optimize the so called logarithmic loss: $J(w) = -\frac{1}{n}\sum_{i=1}^n y_i \log{h(x_i)} + (1-y_i) \log{(1-h(x_i))}$,
# * with the loss function in hand we can improve our guesses iteratively:
# * $w_j^{t+1} = w_j^t - \text{step_size} \cdot \frac{\partial J(w)}{\partial w_j}$,
# * we can end the process after some predefined number of epochs (or when the changes are no longer meaningful).
# + [markdown] id="xt2z7CdJu6SQ"
# Let's start with the simplest example - linear separated points on a plane.
# + id="Wg_d38Fou6SU"
# %matplotlib inline
import numpy as np
np.random.seed(123)
# these parametrize the line
a = 0.3
b = -0.2
c = 0.001
# 1/0 mapping
def lin_rule(x, noise=0.):
return int(a * x[0] + b * x[1] + c + noise < 0.)
# Just for plotting
def get_y_fun(a, b, c):
def y(x):
return - x * a / b - c / b
return y
lin_fun = get_y_fun(a, b, c)
# + id="ZZEHHKP8u6Si" colab={"base_uri": "https://localhost:8080/"} outputId="08534a0d-4376-43cd-b939-67fbbfcb3987"
# Training data
n = 500
range_points = 1
sigma = 0.05
X = np.array(range_points * 2 * (np.random.rand(n, 2) - 0.5))
y = np.array([lin_rule(x, sigma * np.random.normal()) for x in X])
print(X[:10])
print(y[:10])
# + [markdown] id="CoTCKl3Yu6St"
# Let's plot the data.
# + id="qc99EecDu6Sw" colab={"base_uri": "https://localhost:8080/", "height": 663} outputId="621282b2-c9c0-4771-f58e-733b923d497a"
import matplotlib.pyplot as plt
import seaborn as sns
range_plot = 1.1
h = .002
plt.figure(figsize=(11,11))
plt.scatter(X[:, 0], X[: , 1], c=y)
_x = np.linspace(-range_plot, range_plot, 1000)
_y = lin_fun(_x)
plt.plot(_x, _y)
# + [markdown] id="vq3J7fZpu6S4"
# Now, let's implement and train a logistic regression model.
# + id="sZRi38V1qmiF"
import numpy as np
def g(z):
return np.divide(1, 1 + np.exp(-z))
def cost(y, h):
assert len(y) == len(h)
return -1/len(y) * (np.dot(y.transpose(), np.log(h)) + np.dot((1 - y).transpose(), np.log(1 - h)))
# + id="Lw-eg0x0u6S6"
import numpy as np
class LogisticRegression:
def _prepare_X(self, X):
X = np.c_[np.ones(X.shape[0]), X]
return X
def _h(self, X, theta):
return g(np.dot(X, theta))
def fit(self, X, y, n_epochs=10000, alpha=0.25):
n = len(y)
X = self._prepare_X(X)
theta = np.zeros(X.shape[1])
losses = [cost(y, self._h(X, theta))]
for i in range(n_epochs):
theta = theta - alpha/n * np.dot(X.transpose(), self._h(X, theta) - y)
losses.append(cost(y, self._h(X, theta)))
self._theta = theta
return losses
def predict(self, X):
X = self._prepare_X(X)
return (g(np.dot(X, self._theta)) >= 0.5).astype(int)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="mstxg9rNkNev" outputId="37b9a065-64a9-4bbb-aff6-1a76d779221e"
regr = LogisticRegression()
losses = regr.fit(X, y)
plt.plot(losses)
plt.show()
# + [markdown] id="BextVVMWu6TB"
# Let's visually asses our model. We can do this by using our estimates for $a,b,c$.
# + id="odWHQD9Au6TE" colab={"base_uri": "https://localhost:8080/", "height": 663} outputId="91c5c5d5-1dfa-4fff-869e-e6f9cdd0a1b3"
plt.figure(figsize=(11,11))
[c, a, b] = regr._theta
lin_fun2 = get_y_fun(a, b, c)
_y2 = lin_fun2(_x)
plt.figure(figsize=(11,11))
plt.scatter(X[:, 0], X[: , 1], c=y)
plt.plot(_x, _y, _x, _y2)
plt.show()
# + [markdown] id="u43DFWVFu6TO"
# Let's now complicate the things a little bit and make our next problem nonlinear.
# + id="qNCns_WIu6TS"
# Parameters of the ellipse
s1 = 1.
s2 = 2.
r = 0.75
m1 = 0.15
m2 = 0.125
# 1/0 mapping, checks whether we are inside the ellipse
def circle_rule(x, noise=0.):
return int(s1 * (x[0] - m1) ** 2 + s2 * (x[1] - m2) ** 2 + noise < r ** 2.)
# + id="H91RdYcOu6Tb" colab={"base_uri": "https://localhost:8080/"} outputId="36d1c7ce-47e2-4ac6-81fb-8bb86be11f73"
# Training data
n = 500
range_points = 1
sigma = 0.1
X = np.array(range_points * 2 * (np.random.rand(n, 2) - 0.5))
y = np.array([circle_rule(x, sigma * np.random.normal()) for x in X])
print(X[:10])
print(y[:10])
# + [markdown] id="1keKZp-su6Tl"
# Let's plot the data.
# + id="_5qQnZLBu6Tr" colab={"base_uri": "https://localhost:8080/", "height": 663} outputId="88accde6-c7fb-487a-d8ca-268381520ad7"
range_plot = 1.1
h = .005
plt.figure(figsize=(11,11))
xx, yy = np.meshgrid(np.arange(-range_plot, range_plot, h), np.arange(-range_plot, range_plot, h))
Z = np.array(list(map(circle_rule, np.c_[xx.ravel(), yy.ravel()])))
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[: , 1], c=y)
# + [markdown] id="qMwKzVQZu6Tw"
# Now, let's train a logistic regression model to tackle this problem. Note that we now need a nonlinear decision boundary.
# + [markdown] id="Kcnc848fu6Tx"
# Hint:
# <sub><sup><sub><sup><sub><sup>
# Use feature engineering.
# </sup></sub></sup></sub></sup></sub>
# + id="shlIfIw7hDtC"
def prep_fe(X):
X_fe = np.copy(X)
for i in range(X.shape[1]):
c = X[:, i]
X_fe = np.c_[X_fe, c * c]
return X_fe
# + id="cPINtZzou6T0" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="029272a0-cbbe-40d3-96fd-b8568fafb39f"
X_fe = prep_fe(X)
losses = regr.fit(X_fe, y, alpha=0.9)
plt.plot(losses)
plt.show()
# + [markdown] id="8nYLJvI4u6T7"
# Let's visually asses our model.
#
# Contrary to the previous scenario, converting our weights to parameters of the ground truth curve may not be straightforward. It's easier to just provide predictions for a set of points in $R^2$.
# + id="8vn13Nfuu6T9" colab={"base_uri": "https://localhost:8080/"} outputId="95c34518-601f-45d8-fc5b-277f07c67ecf"
range_plot = 1.1
h = .005
xx, yy = np.meshgrid(np.arange(-range_plot, range_plot, h), np.arange(-range_plot, range_plot, h))
X_plot = np.c_[xx.ravel(), yy.ravel()]
print(X_plot)
print(X_plot.shape)
X_plot_fe = prep_fe(X_plot)
preds = regr.predict(X_plot_fe)
# + id="cE_jWcRZu6UG" colab={"base_uri": "https://localhost:8080/", "height": 663} outputId="07004d15-3cb1-4d96-b355-e135b7415b2b"
plt.figure(figsize=(11,11))
Z = preds
Z = np.array(Z).reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
Z = np.array(list(map(circle_rule, X_plot)))
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, alpha=0.1, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=y)
| Deep neural networks/Exercise_4_Logistic_Regression_P3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Credits
# TensorFlow translation of [Lasagne tutorial](https://github.com/DeepLearningDTU/02456-deep-learning/blob/master/week5/lab51_AE.ipynb). Thanks to [skaae](https://github.com/skaae), [casperkaae](https://github.com/casperkaae) and [larsmaaloee](https://github.com/larsmaaloee).
# # Dependancies and supporting functions
# Loading dependancies and supporting functions by running the code block below.
from __future__ import division, print_function
import matplotlib
import matplotlib.pyplot as plt
from IPython.display import Image, display, clear_output
# %matplotlib nbagg
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets
import tensorflow as tf
from tensorflow.python.framework.ops import reset_default_graph
# # Auto-encoders 101
# In this notebook you will implement a simple auto-encoder (AE). We assume that you are already familiar with the basics of neural networks. We'll start by defining an AE similar to the one used for the finetuning step by [<NAME> and <NAME>](https://www.cs.toronto.edu/~hinton/science.pdf). We'll experiment with the AE setup and try to run it on the MNIST dataset. There has been a wide variety of research into the field of auto-encoders and the technique that you're about to learn is very simple compared to recent advances (e.g. [the Ladder network](https://arxiv.org/abs/1507.02672) and [VAEs](https://arxiv.org/abs/1312.6114)). However, the basic idea stays the same.
#
# AEs are used within unsupervised learning, in which you do not have a target $y$. Instead it *encodes* an input $x$ into a latent state $z$ and decodes $z$ into a reconstruction $\hat{x}$. This way the parameters of the network can be optimized w.r.t. the difference between $x$ and $\hat{x}$. Depending on the input distribution, the difference can be measured in various ways, e.g. mean squared error (MSE). In many applications the auto-encoder will find an internal state of each data point corresponding to a feature. So if we are to model the MNIST dataset, one could expect that the internal state would correspond to a digit-class and/or the shape.
#
# *The exercises are found at the bottom of the notebook*
# ## MNIST
# First let us load the MNIST dataset and plot a few examples. We only load a limited amount of classes to speed up training.
# +
from sklearn.utils import shuffle
# To speed up training we'll only work on a subset of the data containing only the numbers 0, 1.
data = np.load('../lab1_FFN/mnist.npz')
num_classes = 2
idxs_train = []
idxs_valid = []
idxs_test = []
for i in range(num_classes):
idxs_train += np.where(data['y_train'] == i)[0].tolist()
idxs_valid += np.where(data['y_valid'] == i)[0].tolist()
idxs_test += np.where(data['y_test'] == i)[0].tolist()
x_train = data['X_train'][idxs_train].astype('float32')
# Since this is unsupervised, the targets are only used for validation.
targets_train = data['y_train'][idxs_train].astype('int32')
x_train, targets_train = shuffle(x_train, targets_train, random_state=1234)
x_valid = data['X_valid'][idxs_valid].astype('float32')
targets_valid = data['y_valid'][idxs_valid].astype('int32')
x_test = data['X_test'][idxs_test].astype('float32')
targets_test = data['y_test'][idxs_test].astype('int32')
print("training set dim(%i, %i)." % x_train.shape)
print("validation set dim(%i, %i)." % x_valid.shape)
print("test set dim(%i, %i)." % x_test.shape)
# -
#plot a few MNIST examples
idx = 0
canvas = np.zeros((28*10, 10*28))
for i in range(10):
for j in range(10):
canvas[i*28:(i+1)*28, j*28:(j+1)*28] = x_train[idx].reshape((28, 28))
idx += 1
plt.figure(figsize=(7, 7))
plt.axis('off')
plt.imshow(canvas, cmap='gray')
plt.title('MNIST handwritten digits')
# ### Building the model
# When defining the model the latent layer $z$ must act as a bottleneck of information. We initialize the AE with 1 hidden layer in the encoder and decoder using relu units as non-linearities. The latent layer has a dimensionality of 2 in order to make it easy to visualise. Since $x$ are pixel intensities that are normalized between 0 and 1, we use the sigmoid non-linearity to model the reconstruction.
from tensorflow.contrib.layers import fully_connected
from tensorflow.python.ops.nn import relu, sigmoid
# +
# define in/output size
num_features = x_train.shape[1]
# reset graph
reset_default_graph()
# define the model
x_pl = tf.placeholder(tf.float32, [None, num_features], 'x_pl')
l_enc = fully_connected(inputs=x_pl, num_outputs=128, activation_fn=relu, scope='l_enc')
l_z = fully_connected(inputs=l_enc, num_outputs=2, activation_fn=None, scope='l_z') # None indicates a linear output.
l_dec = fully_connected(inputs=l_z, num_outputs=128, activation_fn=relu, scope='l_dec')
l_out = fully_connected(inputs=l_dec, num_outputs=num_features, activation_fn=sigmoid) # iid pixel intensities between 0 and 1.
# -
# Following we define the TensorFlow functions for training and evaluation.
# +
# calculate loss
loss_per_pixel = tf.square(tf.sub(l_out, x_pl))
loss = tf.reduce_mean(loss_per_pixel, name="mean_square_error")
# if you want regularization
#reg_scale = 0.0005
#regularize = tf.contrib.layers.l2_regularizer(reg_scale)
#params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
#reg_term = sum([regularize(param) for param in params])
#loss += reg_term
# define our optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.25)
# make training op for applying the gradients
train_op = optimizer.minimize(loss)
# -
# test the forward pass
_x_test = np.zeros(shape=(32, num_features))
# initialize the Session
sess = tf.Session()
# test the forward pass
sess.run(tf.initialize_all_variables())
feed_dict = {x_pl: _x_test}
res_forward_pass = sess.run(fetches=[l_out], feed_dict=feed_dict)
print("l_out", res_forward_pass[0].shape)
# In the training loop we sample each batch and evaluate the error, latent space and reconstructions every epoch.
# +
batch_size = 100
num_epochs = 100
num_samples_train = x_train.shape[0]
num_batches_train = num_samples_train // batch_size
num_samples_valid = x_valid.shape[0]
num_batches_valid = num_samples_valid // batch_size
updates = []
train_loss = []
valid_loss = []
cur_loss = 0
plt.figure(figsize=(12, 24))
try:
for epoch in range(num_epochs):
#Forward->Backprob->Update params
cur_loss = []
for i in range(num_batches_train):
idxs = np.random.choice(range(x_train.shape[0]), size=(batch_size), replace=False)
x_batch = x_train[idxs]
# setup what to fetch, notice l
fetches_train = [train_op, loss, l_out, l_z]
feed_dict_train = {x_pl: x_batch}
# do the complete backprob pass
res_train = sess.run(fetches_train, feed_dict_train)
_, batch_loss, train_out, train_z = tuple(res_train)
cur_loss += [batch_loss]
train_loss += [np.mean(cur_loss)]
updates += [batch_size*num_batches_train*(epoch+1)]
# evaluate
fetches_eval = [loss, l_out, l_z]
feed_dict_eval = {x_pl: x_valid}
res_valid = sess.run(fetches_eval, feed_dict_eval)
eval_loss, eval_out, eval_z = tuple(res_valid)
valid_loss += [eval_loss]
if epoch == 0:
continue
# Plotting
plt.subplot(num_classes+1,2,1)
plt.title('Error')
plt.legend(['Train Error', 'Valid Error'])
plt.xlabel('Updates'), plt.ylabel('Error')
plt.plot(updates, train_loss, color="black")
plt.plot(updates, valid_loss, color="grey")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.grid('on')
plt.subplot(num_classes+1,2,2)
plt.cla()
plt.title('Latent space')
plt.xlabel('z0'), plt.ylabel('z1')
color = iter(plt.get_cmap('brg')(np.linspace(0, 1.0, num_classes)))
for i in range(num_classes):
clr = next(color)
plt.scatter(eval_z[targets_valid==i, 0], eval_z[targets_valid==i, 1], c=clr, s=5., lw=0, marker='o', )
plt.grid('on')
c=0
for k in range(3, 3 + num_classes*2, 2):
plt.subplot(num_classes+1,2,k)
plt.cla()
plt.title('Inputs for %i' % c)
plt.axis('off')
idx = 0
canvas = np.zeros((28*10, 10*28))
for i in range(10):
for j in range(10):
canvas[i*28:(i+1)*28, j*28:(j+1)*28] = x_valid[targets_valid==c][idx].reshape((28, 28))
idx += 1
plt.imshow(canvas, cmap='gray')
plt.subplot(num_classes+1,2,k+1)
plt.cla()
plt.title('Reconstructions for %i' % c)
plt.axis('off')
idx = 0
canvas = np.zeros((28*10, 10*28))
for i in range(10):
for j in range(10):
canvas[i*28:(i+1)*28, j*28:(j+1)*28] = eval_out[targets_valid==c][idx].reshape((28, 28))
idx += 1
plt.imshow(canvas, cmap='gray')
c+=1
plt.savefig("out51.png")
display(Image(filename="out51.png"))
clear_output(wait=True)
except KeyboardInterrupt:
pass
# -
# ### Exercise 1 - Analyzing the AE
# 1. The above implementation of an AE is very simple.
# - *Experiment with the number of layers and non-linearities in order to improve the reconstructions.*
# - *What happens with the network when we change the non-linearities in the latent layer (e.g. sigmoid)?*
# - *Try to increase the number of digit classes in the training set and analyze the results.*
# - *Test different optimization algorithms and decide whether you should use regularizers*.
#
# 2. Currently we optimize w.r.t. mean squared error.
# - *Find another error function that could fit this problem better.*
# - *Evaluate whether the error function is a better choice and explain your findings.*
#
# 3. Complexity of the bottleneck.
# - *Increase the number of units in the latent layer and train.*
# - *Visualize by using [PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) or [t-SNE](http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html).*
# ### Exercise 2 - Adding classification (for the ambitious)
# The above training has been performed unsupervised. Now let us assume that we only have a fraction of labeled data points from each class (implemented below). As we know, semi-supervised learning can be utilized by combining unsupervised and supervised learning. Now you must analyze whether a trained AE from the above exercise can aid a classifier.
#
# 1. Build a simple classifier (like the ones from week1) where you:
# - *Train on the labeled dataset and evaluate the results.*
# 2. Build a second classifier and train on the latent output $z$ of the AE.
# 3. Build a third classifier and train on the reconstructions of the AE.
# 4. Evaluate the classifiers against each other and implement a model that improves the classification by combining the input, latent output and reconstruction.
# +
# Generate a subset of labeled data points
num_labeled = 10 # You decide on the size of the fraction...
def onehot(t, num_classes):
out = np.zeros((t.shape[0], num_classes))
for row, col in enumerate(t):
out[row, col] = 1
return out
idxs_train_l = []
for i in range(num_classes):
idxs = np.where(targets_train == i)[0]
idxs_train_l += np.random.choice(idxs, size=num_labeled).tolist()
x_train_l = x_train[idxs_train_l]
targets_train_l = targets_train[idxs_train_l]
print("labeled training set dim(%i, %i)." % x_train_l.shape)
plt.figure(figsize=(12, 7))
for i in range(num_classes*num_labeled):
im = x_train_l[i].reshape((28, 28))
plt.subplot(1, num_classes*num_labeled, i + 1)
plt.imshow(im, cmap='gray')
plt.axis('off')
| tensorflow/tensorflow-tutorial-3/lab5_AE/lab5_AE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 模块导入
import pandas as pd
import numpy as np
import json
from keras.utils import np_utils
data = pd.read_csv("data/train_clean.csv")
# # 连续值特征数据处理
def continuous_features(data):
def normalize_feature(df):
return df.apply(lambda column: (column - column.mean()) / column.std())
continuous_train = data[["time", "resolution_ratio"]]
continuous_train = normalize_feature(continuous_train)
continuous_train["ip"] = data["ip"]
continuous_train = np.array(continuous_train)
return continuous_train
continuous_train = continuous_features(data)
continuous_train.shape
# # one-hot特征数据处理
def one_hot_features(data):
def make_one_hot(feature, one_hot_data):
try:
n = len(index_json[feature])
except:
n = int(data[feature].max())
return np_utils.to_categorical(one_hot_data, n + 1)
with open("data/index_json.json", "r") as f:
index_json = json.load(f)
one_hot_set = {}
for i in ["apptype", "dvctype", "ntt", "carrier", "orientation", "lan"]:
one_hot_set[i] = make_one_hot(i, np.uint8(data[i].values.reshape(len(data[i]), 1)))
one_hot_train = np.hstack(tuple(one_hot_set.values()))
return one_hot_train
one_hot_train = one_hot_features(data)
one_hot_train.shape
# # embedding特征数据处理
def embedding_features(data):
embedding_train = data[["pkgname", "adunitshowid", "mediashowid", "city", \
"adidmd5", "imeimd5","openudidmd5", "macmd5", \
"model", "osv"]]
embedding_train = np.array(embedding_train)
return embedding_train
embedding_train = embedding_features(data)
embedding_train.shape
# # 整合
Y_train = data["label"]
Y_train.shape
unembedding_train = np.hstack((continuous_train, one_hot_train))
unembedding_train.shape
| .ipynb_checkpoints/data_process-checkpoint.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xeus-cling-cpp14
// ---
// # `xtensor` basics usage features
//
// A brief overview of xtensor basic usage. Most of the examples are provided by the [official documentation](https://xtensor.readthedocs.io/en/latest/).
// Including the necessary libs:
// +
#include <iostream>
#include <typeinfo>
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp" // this one is necessary for a proper display in notebooks
#include "xtensor/xview.hpp"
#include "xtensor/xmath.hpp"
// -
// ## Basic `xarray` manipulations
// Declaring arrays:
xt::xarray<double> arr1 {
{1, 2, 3},
{2, 5, 7},
{2, 5, 7}
};
arr1
xt::xarray<double> arr2{
{5, 6, 7}
};
// Below we perform a sum of the second line from `arr1` with `arr2`:
// +
xt::xarray<double> result = xt::view(arr1, 1) + arr2;
std::cout << result << std::endl;
// -
// Be aware about the use of views. C++ `rvalues` are temporary, so the combination with `xexpression`s could be dangerous:
// +
auto result_2 = xt::view(arr1, 1) + arr2;
std::cout << result_2 << std::endl;
// -
// This occurs because `result_2` is not a `xarray<T>` object! It's inferred as `xexpression` (which does not hold any value):
std::cout << typeid(result_2).name();
// So it doesn't work as one must expected because it avoids evaluation at intermediate results. A proper way to fix it could be:
// +
xt::xarray<double> arr1_1 = xt::view(arr1, 1);
auto result_3 = arr1_1 + arr2;
std::cout << result_3 << std::endl;
// -
// Another interesting thing we can do is reshaping. Let's declare another array. Now, an array with integer entries (note the Template input):
xt::xarray<int> arr3 {
{1, 2, 3, 4, 5, 6, 7, 8, 9}
};
// `xt::array<T>` has a convenience method for reshaping, which is used as follows:
// +
arr3.reshape({3, 3});
std::cout << arr3;
// -
// It is very similar to `numpy.ndarray` counterpart. Note that the new shape must match the size of original array entries.
// Access index is pretty straightforward. Let's consider `arr1`:
std::cout << arr1(0, 1);
arr1
// Or, with an 1D array:
arr3
// +
xt::xarray<int> arr4 {
{1, 2, 3, 4}
};
std::cout << arr4(2);
// -
// Now, let's use an universal function (`xt::pow`) to demonstrate the broadcasting feature. Firstly, let's declare two arrays with different template inputs and sizes:
// +
xt::xarray<double> arr5 {
{1.0, 2.0, 3.0}
};
xt::xarray<unsigned int> arr6 {
{4, 5, 6, 7}
};
// -
// Now, we transform `arr6` in a column array:
arr6.reshape({4, 1});
arr6
// Note that reshaping is performed inline! What happens if we use `xt::pow` with these two arrays? Let's see:
// +
xt::xarray<double> result_4 = xt::pow(arr5, arr6);
std::cout << result_4;
// -
// Nice! However, be aware that the dimensions must be compatible. For example, if `arr6.reshape({1, 4})` was performed, the broadcasting wouldn't proceed.
// ## Expressions and lazy evaluation
// In order to understand the conceptions, let's declare 3 new arrays:
// +
xt::xarray<double> x {
{2, 2, 4}
};
xt::xarray<double> y {
{1, 0, 1}
};
xt::xarray<double> z {
{0.3, 0.5, 0.7}
};
// -
// ### Expressions
// An expression can be build by operating the arrays inferred by `auto` such as
auto f = x + y * xt::sin(z);
// You can use it to compose in a more complex expression:
auto f2 = y + 2 * xt::cos(f);
// The expression avoids evaluation at intermediate results. It improves the performance as near to if you had written a simple loop. If you want to evaluate, just assign it to a `lvalue` variable (better saying, to a container):
// +
xt::xarray<double> result_eval = y + 2 * xt::cos(f);
std::cout << result_eval;
// -
// ### Lazy evaluation
// This feature allows to compute the expression results only for the indices of interest. Let's consider now larger arrays:
// +
#include "xtensor/xrandom.hpp" // to use pseudo-random generator
// Note that this is an expression
auto random_basis = xt::random::rand<double>({100000, 1});
xt::xarray<double> a = random_basis;
xt::xarray<double> b = random_basis;
// -
// Note that `a` and `b` are large, with 100000 entries. If we want only a certain input, we can evaluate only at the right index. Consider that we have the following expression:
auto f_large = xt::cos(a) + xt::sin(b);
// If we only want two values, say, at 1200 and 2500, we only need to evaluate:
double first_res = f_large(1200);
// and
double second_res = f_large(2500);
// Only the above two values have been computed!
// ### Forcing evaluation
// If, for any reason, you need to evaluate an `xexpression`, you can usa `xt::eval`. It can return an rvalue to a newly allocated container or a reference to a container. In order to avoid copies, you should use `auto&&`, the universal reference on LHS. For example:
xt::xarray<double> array_a = {1, 2, 3};
xt::xarray<double> array_b = {3, 2, 1};
// and we define the following xexpression (which is not evaluated):
auto calculation = array_a + array_b;
// Now we can get an `xarray` rvalue container as:
// +
auto&& container_1 = xt::eval(calculation);
std::cout << container_1;
// -
// ### Expression interface
// All `xexpression`s in `xtensor` provide the following convenience interfaces:
// #### Array storage (main) informations
// * `dimension()`: returns the number of dimensions;
//
// * `shape()`: returns the shape of the expression;
//
// * `size()`: returns the amount of entries;
// +
#include <vector>
using array_type = xt::xarray<double>;
using shape_type = array_type::shape_type;
shape_type shape = {3, 2, 4};
array_type arr7(shape);
arr7 = xt::random::rand<double>(shape);
// -
// Let's check the dimension:
std::cout << arr7.dimension();
// Now the shape (a little more tricky):
// +
const shape_type& s = arr7.shape();
auto res_check = s == shape;
// -
res_check
// Be aware that the method `shape()` do not return readable shape format as in `numpy`. See below:
arr7.shape()
// which is a memory reference (not sure, I need to confirm it).
// Finally, `.size()` informs the amount of entries a `xarray` has:
arr7.size()
// #### Accessing
// Let's check all the `arr7` entries to ensure that everything is alright:
arr7
// If we want to look at the index $(x, y, z)$, where $x$, $y$, and $z$ denotes a block, the line within a block and the column within a block, respectively. For example, to access $(1, 1, 1)$, we can use the method `.at()`:
arr7.at(1, 1, 1)
// or, say, the entry $(0, 1, 3)$:
arr7.at(0, 1, 3)
// You can simply do:
arr7(0, 1, 3)
// which omitted the method. There is also the `operator[]`, which can be used in forms:
arr7[23]
// In this case, we use `arr[i]`, where `i` would be the index in the flattened array fashion. We can also define a multi-index as below:
// +
std::vector<size_t> idx = {0, 1, 3};
std::cout << arr7[idx];
// -
// Other forms can be used. Look at the [official documentation](https://xtensor.readthedocs.io/en/latest/expression.html) for further details.
// #### Iterators
// Under construction...
// ## Closing words
// Almost all `numpy` features can be performed by `xtensor` lib. An one-to-one equivalence can be checked [here](https://xtensor.readthedocs.io/en/latest/numpy.html).
| xtensor/notebooks/xtensor-basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Initial Processing & EDA
#
# By: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# **6 CSV files are created from this notebook**:
#
# `df` $\rightarrow$ `files/output/organizations_merged.csv`
# - Organizations with Pledge 1% tag information
#
# `df_industry` $\rightarrow$ `files/output/organizations_by_category_group.csv`
# - Organizations' `uuid` with 46 indicator columns that correspond to their `category_groups_list`
#
# `p1_fund_rnds` $\rightarrow$ `files/output/p1_funding_rounds.csv`
# - Funding rounds with Pledge 1% tag information for the funded companies
#
# `p1_invest` $\rightarrow$ `files/output/p1_investments.csv`
# - Investments with Pledge 1% tag information for the funded companies
#
# `p1_invest_prtnr` $\rightarrow$ `files/output/p1_investment_partners.csv`
# - Partner investments with Pledge 1% tag information for the funded companies
#
# `p1_jobs` $\rightarrow$ `files/output/p1_jobs.csv`
# - Jobs with Pledge 1% tag information for the affiliated companies
# +
'''Importing basic data analysis packages'''
import numpy as np
import pandas as pd
import csv
import warnings
import os
import time
import math
warnings.filterwarnings('ignore')
'''Plotting packages'''
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(font_scale=1.3)
'''Function: memory reduction of dataframe'''
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# -
# # Load Data
#
# Use `tar -xvzf 20200908_bulk_export.tar.gz` to unzip Crunchbase export (for Windows)
#
# I saved Crunchbase CSVs under `/files/csv/` and placed them in my `.gitignore` file.
#
# I saved created CSVs under `/files/output/` and placed them in my `.gitignore` file.
#
# Check out summary of data from Crunchbase export <a href='https://data.crunchbase.com/docs/daily-csv-export'>here</a>.
# +
###########################
# Pledge 1% Company UUIDs #
###########################
print('='*100)
p1 = pd.read_csv('files/p1.csv')
print('PLEDGE 1%/p1 cols: {}\nSHAPE: {}'.format(p1.columns.to_list(), p1.shape))
p1 = reduce_mem_usage(p1)
#################
# Organizations #
#################
print('='*100)
org = pd.read_csv('files/csv/organizations.csv')
print('ORGANIZATION/org cols: {}\nSHAPE: {}'.format(org.columns.to_list(), org.shape))
org = reduce_mem_usage(org)
#############
# Financial #
#############
print('='*100)
fund_rnds = pd.read_csv('files/csv/funding_rounds.csv')
print('FUNDING ROUNDS/fund_rnds cols: {}\nSHAPE: {}'.format(fund_rnds.columns.to_list(), fund_rnds.shape))
fund_rnds = reduce_mem_usage(fund_rnds)
invest = pd.read_csv('files/csv/investments.csv')
print('\nINVESTMENTS/invest cols: {}\nSHAPE: {}'.format(invest.columns.to_list(), invest.shape))
invest = reduce_mem_usage(invest)
invest_prtnr = pd.read_csv('files/csv/investment_partners.csv')
print('\nPARTNER INVESTMENTS/invest_prtnr cols: {}\nSHAPE: {}'.format(invest_prtnr.columns.to_list(), invest_prtnr.shape))
invest_prtnr = reduce_mem_usage(invest_prtnr)
########
# Jobs #
########
print('='*100)
jobs = pd.read_csv('files/csv/jobs.csv')
print('JOBS/jobs cols: {}\nSHAPE: {}'.format(jobs.columns.to_list(), jobs.shape))
jobs = reduce_mem_usage(jobs)
print('='*100)
# -
# ### Check out heads of relevant dataframes, remove unusable columns
#
# #### Pledge 1% -- `p1`
#
# ['uuid', 'p1_tag', 'p1_date']
#
# #### Organizations -- `org`
# ['uuid', 'name', 'type', 'rank', 'roles', 'country_code', 'region', 'status', 'category_groups_list', 'total_funding_usd', 'founded_on', 'closed_on', 'employee_count', 'primary_role']
#
# #### Funding Rounds -- `fund_rnds`
#
# ['uuid', 'investment_type', 'announced_on', 'raised_amount_usd', 'post_money_valuation_usd', 'investor_count','lead_investor_uuids','lead_investor_count', 'org_uuid', 'org_name']
#
# #### Investments -- `invest `
#
# ['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', 'investor_type', 'is_lead_investor']
#
# #### Partner Investments -- `invest_prtnr`
#
# ['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', 'partner_uuid', 'partner_name'
#
# #### Jobs -- `jobs`
#
# ['uuid', 'person_uuid', 'person_name', 'org_uuid', 'org_name', 'started_on', 'ended_on', 'is_current', 'title', 'job_type']
# +
# Update dataframe columns
org = org[['uuid', 'name', 'type', 'rank', 'roles', 'country_code', 'region', 'status',
'category_groups_list', 'total_funding_usd', 'founded_on', 'closed_on',
'employee_count', 'primary_role']]
fund_rnds['lead_investor_count']=fund_rnds['lead_investor_uuids'].str.split(',').apply(lambda x: float(len(x)) if ~np.any(pd.isnull(x)) else 0)
fund_rnds = fund_rnds[['uuid', 'investment_type', 'announced_on', 'raised_amount_usd', 'post_money_valuation_usd', 'investor_count','lead_investor_uuids', 'lead_investor_count', 'org_uuid', 'org_name']]
invest = invest[['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', 'investor_type', 'is_lead_investor']]
invest_prtnr = invest_prtnr[['uuid', 'funding_round_uuid', 'investor_uuid', 'investor_name', 'partner_uuid', 'partner_name']]
jobs = jobs[['uuid', 'person_uuid', 'person_name', 'org_uuid', 'org_name', 'started_on', 'ended_on', 'is_current', 'title', 'job_type']]
p1.head(2)
# -
org.head(2)
fund_rnds.head(2)
invest.head(2)
invest_prtnr.head(2)
jobs.head(2)
# # Start data processing for merged dataframe (`df` = `org`+`p1`)
# +
# Merge p1 and org dataframes on the organization uuid
df = pd.merge(org, p1, how='outer', on='uuid')
# Convert Boolean to binary
df['p1_tag'] = df['p1_tag'].apply(lambda x: 1 if x == True else 0)
p1['p1_tag'] = 1
# Convert employee_count 'unknown' to np.nan to get accurate missing value count
df['employee_count'] = df['employee_count'].apply(lambda x: np.NaN if x == 'unknown' else x)
##############
# Timestamps #
##############
# Convert to datetime objects
df['p1_date'] = pd.to_datetime(df['p1_date'])
p1['p1_date'] = pd.to_datetime(p1['p1_date'])
# Get OutOfBoundsDatetime error if do not coerce for CB native timestamp columns
df['founded_on'] = pd.to_datetime(df['founded_on'], errors='coerce')
df['closed_on'] = pd.to_datetime(df['closed_on'], errors='coerce')
jobs['started_on'] = pd.to_datetime(jobs['started_on'], errors='coerce')
jobs['ended_on'] = pd.to_datetime(jobs['ended_on'], errors='coerce')
# Reduce storage for numerical features
df = reduce_mem_usage(df, verbose=False)
# Create new pledge1 dataframe that sorts by chronological order that the company took the pledge
pledge1 = df[df['p1_tag'] == 1].sort_values('p1_date')
print()
print('OUTPUT df=p1+org TO CSV `files/output/organizations_merged.csv`\n')
df.to_csv('files/output/organizations_merged.csv', )
df_num_missing = df.isna().sum()/len(df)
output = df_num_missing.to_string(float_format=lambda x: "{:.2f}%".format(x*100))
print('MISSING CB VALUES BY PERCENTAGE\n')
print(output)
print()
# Category Group list
# Merge category_list column into one string, convert to unique list
groups = list(set(df.category_groups_list.str.cat(sep=',').split(',')))
print('Number of Unique Category Groups:',len(groups))
print()
# Missing Percentages for rank, school, category groups, and closed
print('CB data with a SCHOOL value: {:.2f}%'.format(df[df['primary_role']=='school'].shape[0]/df.shape[0]*100))
print('CB data with a CLOSED value: {:.2f}%'.format(df[df['status']=='closed'].shape[0]/df.shape[0]*100))
# -
# ### Note on Crunchbase Missing Data
#
# Are there any features below we would like to impute?
# - `total_funding_usd` (88.25%)
# - `employee_count` (27.58%)
# - `rank` (4.13%)
#
# ### Note on Feature Engineering
#
# `employee_count` --> Ordinal
# - 1-10
# - 11-50
# - 51-100
# - 101-250
# - 251-500
# - 501-1000
# - 1001-5000
# - 5001-10000
# - 10000+
#
# `primary_role` --> Nominal
# - company
# - investor
# - school
#
# `category_groups_list` --> 46 Indicator Columns
#
# ### Note on Data Removal
#
# #### Excluding companies:
#
# #### Without a `rank` response. A possible proxy for "prominience"/size/stage of a company?
#
# <a href="https://about.crunchbase.com/blog/crunchbase-rank-trend-score/">More on Rank Score</a>
#
# Crunchbase Rank is a dynamic ranking for all entities (i.e., Companies, Organizations, and Schools) in the Crunchbase dataset. It measures the prominence of an entity.
#
# The Crunchbase Rank algorithm takes many signals into account including the number of connections a profile has, the level of community engagement, funding events, news articles, and acquisitions.
#
# A company’s Rank is fluid and subject to rising and decaying over time with time-sensitive events. Events such as product launches, funding events, leadership changes, and news affect a company’s Crunchbase Rank.
#
# #### With a school affiliation (`primary_role`)
#
# There are 25 Pledge 1% companies with 'school' as its `primary_role`, and 13,679 (1.21%) in Crunchbase.
#
# #### Without a `category_groups_list` response
#
# 8.72% are missing in the Crunchbase data.
#
# #### With a closed `status`
#
# There are 124 Pledge 1% companies that are 'closed', and 36,777 (3.25%) in Crunchbase.
#
# #### Other questions to explore:
# - Are there other boundaries based on Pledge 1% EDA?
# - How to sample from list of non-P1 companies? We need to make sure there is a proportional mix of postitive/negative classifications in final train/dev/test dataset.
#
# # Pledge 1% EDA Questions
#
# 1. Timeline of pledges?
#
# 2. How soon after founding date was pledge taken?
#
# 3. How do the Pledge 1% and Crunchbase companies vary by employee count?
#
# 4. How do the Pledge 1% and Crunchbase companies vary by industry categories?
#
# 5. How do the Pledge 1% and Crunchbase companies vary by country?
#
# 6. How large is the Pledge 1% network?
# #### `pledge1` dataframe
pledge1.head(2)
# #### `df` dataframe
df.head(2)
# ### Review Pledge 1% Missing Org Data
p1_num_missing = pledge1.isna().sum()/len(pledge1)
output = p1_num_missing.to_string(float_format=lambda x: "{:.2f}%".format(x*100))
print('MISSING PLEDGE 1% VALUES BY PERCENTAGE\n')
print(output)
# ### 1) Timeline of pledges?
# +
# Create cumultative total column
pledge1['cumsum'] = pledge1.p1_tag.cumsum()
_, ax = plt.subplots(figsize=(12, 6))
# Lineplot
sns.lineplot(x='p1_date',y='cumsum',data=pledge1)
# Histogram
counts,bins,_ = plt.hist(pledge1.p1_date,bins=30)
for c,b in zip(counts, bins):
plt.gca().text(b + 0.15, c, str(int(c)), fontsize='small') # +0.15 to center text
# Labels
ax.set_xlim(pd.Timestamp('2014-10-01'),pd.Timestamp('2020-11-01'))
ax.set_title('Cumulative sum of pledge companies over time')
ax.set_xlabel('Pledge taken date')
ax.set_ylabel('Total')
plt.show()
del pledge1['cumsum']
# -
# ### 2) How soon after founding date was pledge taken? Create `diff_date` column
#
# Found negative values for 178 companies.
# +
pledge1['diff_date'] = ((pledge1.p1_date - pledge1.founded_on).dt.days)/365
print('SUMMARY STATS\nNumber of Years Difference Between Pledge Taken Date and Founded Date\n')
print(pledge1.diff_date.describe(),'\n')
print('Number of companies with a negative difference:',pledge1[pledge1['diff_date'] < 0].shape[0])
print('Number of companies with a difference greater than 50:',pledge1[pledge1['diff_date'] > 50].shape[0],'\n')
# More established, older P1 companies
print('The more established, older Pledge 1% Companies:\n')
print(pledge1[pledge1['diff_date'] > 100][['name', 'diff_date']].sort_values('diff_date', ascending=False).head(10).to_string(index=False))
# +
_, ax = plt.subplots(figsize=(12, 6))
# Scatterplot
sns.scatterplot(x='p1_date', y='diff_date', data=pledge1)
# Labels
ax.set_xlim(pd.Timestamp('2014-10-01'),pd.Timestamp('2020-11-01'))
ax.set_title('The number years later that pledge was taken')
ax.set_xlabel('Pledge taken date')
ax.set_ylabel('Number of years')
# Yellow line for years > 50
ax.axhline(y=50, linewidth=1, color='y', ls='--')
# Blue line for mean
ax.axhline(y=pledge1.diff_date.mean(), linewidth=1, color='b', ls='--')
# Red line for years < 0
ax.axhline(y=0, linewidth=1, color='r', ls='--')
# Plot
plt.show()
# -
# #### Same plot but for 178 negative values for `diff_date`
# +
_, ax = plt.subplots(figsize=(12, 6))
# Scatterplot
sns.scatterplot(x='p1_date', y='diff_date', data=pledge1[pledge1['diff_date']<0])
# Labels
ax.set_xlim(pd.Timestamp('2014-10-01'),pd.Timestamp('2020-11-01'))
ax.set_title('The number years later that pledge was taken')
ax.set_xlabel('Pledge taken date')
ax.set_ylabel('Number of years')
# Red line for years < 0
ax.axhline(y=0, linewidth=1, color='r', ls='--')
# Plot
plt.show()
# -
# ### 3) How do the Pledge 1% and Crunchbase organizations vary by `employee_count`?
# +
# Exclude rows that have NaN employee_count
pledge1_employee = pledge1[~pledge1['employee_count'].isna()]
df_employee = df[~df['employee_count'].isna()]
# Create count column to sum over
df_employee['count'] = 1
# Groupby
pledge1_employee = pledge1_employee.groupby(['employee_count'])['p1_tag'].sum().sort_values(ascending=False).reset_index()
df_employee = df_employee.groupby(['employee_count'])['count'].sum().sort_values(ascending=False).reset_index()
# Barplots
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 12), sharey=True)
sns.barplot(x='p1_tag', y='employee_count', data=pledge1_employee, orient='h', ax=ax[0])
sns.barplot(x='count', y='employee_count', data=df_employee, orient='h', ax=ax[1])
# Labels
ax[0].set_title('Pledge Companies by Employee Count\n')
ax[0].xaxis.set_ticks_position('top')
ax[0].set_xlabel('Count')
ax[0].set_ylabel('Employee Count')
ax[1].set_title('Crunchbase Companies by Employee Count\n')
ax[1].xaxis.set_ticks_position('top')
ax[1].set_xlabel('Count')
ax[1].set_ylabel('')
# Plot
plt.show()
del df_employee, pledge1_employee
# -
# ### 4) How do the Pledge 1% and Crunchbase organizations vary by `category_groups_list`?
#
# Create 46 industry indicator columns from `category_groups_list`
#
# #### Pledge 1% data: `pledge1_industry` dataframe
# +
# Start timer
start=time.time()
# Create a new dataframe with a column for each unique category group, fill those with zeros
pledge1_industry = pd.concat([pledge1[~pledge1['category_groups_list'].isna()][['uuid']],pd.DataFrame(columns=groups)]).fillna(0)
# Clean up, reset index
pledge1_industry.reset_index(inplace=True, drop=True)
# Combine all category_groups_list values into a list
Categories = pledge1['category_groups_list'][~pledge1['category_groups_list'].isna()].to_list()
# Populate the matrix
for i in range(pledge1_industry.shape[0]): # By each organization
row_category_list = Categories[i].split(',') # For each category group
for industry in row_category_list:
pledge1_industry[industry][i] = 1 # Update to 1
# Create total row & column
pledge1_industry.loc['total',:] = pledge1_industry.sum(axis = 0) # row
pledge1_industry.loc[:,'total'] = pledge1_industry.sum(axis = 1) # columns=
# Replace concatenated string field at the bottom of the dataframe
pledge1_industry.iloc[pledge1_industry.shape[0]-1,0] = 'total'
# Sort columns of a dataframe in descending order, by Total row
rslt_df = pledge1_industry[pledge1_industry.columns.to_list()[1:-1]].sort_values(by ='total', axis=1, ascending=False)
# Update dataframe with sorted columns, keeping uuid and total columns in place
pledge1_industry = pd.concat([pledge1_industry[['uuid']],rslt_df,pledge1_industry[['total']]], axis=1)
# End timer
print('This took {:.4f} seconds to run.'.format(time.time()-start))
# Create plotting dataframe
# Pull totals from bottom row
pledge1_industry_totals = pledge1_industry.iloc[pledge1_industry.shape[0]-1,:].to_list()[1:-1]
# Create sorted columns
groups_sorted = pledge1_industry.columns[1:-1].to_list()
# Check it out
pledge1_industry.head(2)
# -
# #### Crunchbase data:`df_industry` dataframe
# +
###########################################################################################
# STEP 1: Create empty matrix, columns in same order as the Pledge 1% industry dataframe. #
###########################################################################################
# Start timer
start=time.time()
# Create a column for each unique category group, fill with zeros
df_industry = pd.concat([df[~df['category_groups_list'].isna()][['uuid']],pd.DataFrame(columns=groups_sorted)]).fillna(0) # Keep copy()?
# Clean up, reset index
df_industry.reset_index(inplace=True, drop=True)
# End timer
print('STEP 1 takes {:.4f} seconds to run.\n'.format(time.time()-start))
################################################################################################
# STEP 2: The dataframe is too large; loop over segments of 10K rows to populate empty matrix. #
################################################################################################
# Start timer
start=time.time()
# For print statements
counter = 1
# For looping
iteration = 0
loop = 10000
# Empty list to append 10K dataframes into
frames = []
# Combine all category_groups_list values into a list
Categories = df['category_groups_list'][~df['category_groups_list'].isna()].to_list()
# Start loop
print('STEP 2 Counting to',math.ceil(df['category_groups_list'][~df['category_groups_list'].isna()].shape[0]/loop),'--', end=' ')
# Continue until you've iterated through entire dataframe
while iteration < df['category_groups_list'][~df['category_groups_list'].isna()].shape[0]: # ~1M in size
# Create temporary sub-list of categories
Categories_temp = Categories[iteration:iteration+loop]
# Create temporary smaller empty matrix dataframe
df_temp = df_industry.iloc[iteration:iteration+loop,:]
# Populate the matrix
for i in range(df_temp.shape[0]): # By each organization
row_category_list = Categories_temp[i].split(',') # For each category group
for industry in row_category_list:
df_temp[industry][iteration+i] = 1 # Update to 1
# Output progress of loop
print(counter,end=' ')
# Update counter and iterations
counter += 1
iteration += loop
# Append new dataframe to list
frames.append(df_temp)
# End timer
print('STEP 2 takes {:.4f} seconds to run.\n'.format(time.time()-start))
# Combine all dataframes together.
df_industry = pd.concat(frames)
#####################################
# STEP 3: Add total column and row. #
#####################################
# CREATE TOTAL COLUMN
print('STEP 3 / ROW Counting to',math.ceil(df_industry.shape[0]/20000),'--',end=' ')
# Start timer
start=time.time()
# Add to dataframe
totals_column = np.zeros(df_industry.shape[0])
for i in range(df_industry.shape[0]):
if i % 20000 == 0:
print(int((i/20000)+1), end=' ')
totals_column[i] = df_industry.iloc[i,1:-1].values.sum()
df_industry['total'] = totals_column.astype('int64')
# End timer
print('This takes {:.4f} seconds to run.\n'.format(time.time()-start))
# CREATE TOTAL ROW
print('STEP 3 / COLUMN --', end=' ')
# Start timer
start=time.time()
# Add to dataframe
df_industry_totals = np.zeros(len(df_industry.columns[1:]))
for i in range(len(df_industry.columns)-1):
df_industry_totals[i] = df_industry.iloc[:,i+1].values.sum()
df_industry.loc['total',1:] = df_industry_totals
# End timer
print('This takes {:.4f} seconds to run.\n'.format(time.time()-start))
# Replace concatenated string field at the bottom of the dataframe
df_industry.iloc[df_industry.shape[0]-1,0] = 'total'
# Save to CSV
print('OUTPUT df_industry TO CSV `files/output/organizations_by_category_group.csv`')
df_industry.to_csv('files/output/organizations_by_category_group.csv', index=False)
# -
# ### ((( START HERE IF UPLOADING FROM CSV ))) -- `df_industry` dataframe
df_industry = pd.read_csv('files/output/organizations_by_category_group.csv')
print('SHAPE:',df_industry.shape)
df_industry = reduce_mem_usage(df_industry, verbose=False)
df_industry.tail(2)
# #### Spot check that indicator columns match the original dataframe values for `category_groups_list`
for test_index in [9,50,65,70,71,132,136,138]:
IN_DATA = df['category_groups_list'][~df['category_groups_list'].isna()].to_list()[test_index].split(',')
IN_MATRIX = df_industry.iloc[test_index,:-1][df_industry.iloc[test_index,:]==1].index.to_list()
if 'total' in IN_MATRIX:
IN_MATRIX.remove('total')
IN_DATA.sort()
IN_MATRIX.sort()
if IN_DATA == IN_MATRIX:
print('SUCCESS! INDEX {} MATCHES'.format(test_index))
del IN_DATA, IN_MATRIX
# #### Plot comparisons -- `pledge1_industry_plt` & `df_industry_plt` dataframes
# +
# Create plotting dataframes
pledge1_industry_totals = [3546, 1868, 1525, 1077, 1005, 949, 922, 920, 832, 784, 767,
759, 724, 591, 581, 485, 446, 417, 389, 369, 343, 317,
265, 260, 253, 245, 226, 203, 195, 188, 176, 175, 174,
170, 159, 136, 133, 129, 125, 115, 111, 107, 90, 81, 79, 75]
df_industry_totals = df_industry.iloc[-1,:].to_list()[1:-1]
groups_sorted = df_industry.columns.to_list()[1:-1]
pledge1_industry_plt = pd.DataFrame.from_dict({'Category':groups_sorted, 'Count':pledge1_industry_totals})
df_industry_plt = pd.DataFrame.from_dict({'Category':groups_sorted, 'Count':df_industry_totals})
# Barplots
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 16), sharey=True)
sns.barplot(y='Category', x='Count', data=pledge1_industry_plt, orient='h', ax=ax[0])
sns.barplot(y='Category', x='Count', data=df_industry_plt, orient='h', ax=ax[1])
# Labels
ax[0].set_title('Pledge Companies by Category\n')
ax[0].xaxis.set_ticks_position('top')
ax[0].set_ylabel('')
ax[1].set_title('Crunchbase Companies by Category\n')
ax[1].xaxis.set_ticks_position('top')
ax[1].set_ylabel('')
plt.show()
del pledge1_industry_plt, df_industry_plt, df_industry, pledge1_industry
# -
# ### 5) How do the Pledge 1% and Crunchbase organizations vary by `country_code`?
#
# #### Plot comparisons -- `pledge1_countries` & `df_countries` dataframes
# +
# Create plotting dataframes of top 25 countries
pledge1_countries = pledge1[~pledge1['country_code'].isna()]
pledge1_countries = pledge1_countries.groupby(['country_code'])['p1_tag'].sum().sort_values(ascending=False).reset_index()
pledge1_countries = pledge1_countries[pledge1_countries['p1_tag'] >= 25]
top_25_p1_countries = pledge1_countries['country_code'].to_list()
df_countries = df[~df['country_code'].isna()]
df_countries['count'] = 1
df_countries = df_countries.groupby(['country_code'])['count'].sum().sort_values(ascending=False).reset_index()
df_countries = df_countries.set_index('country_code').reindex(top_25_p1_countries).reset_index()
# Barplots
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 12), sharey=True)
sns.barplot(x='p1_tag', y='country_code', data=pledge1_countries, orient='h', ax=ax[0])
sns.barplot(x='count', y='country_code', data=df_countries, orient='h', ax=ax[1])
# Labels
ax[0].set_title('Pledge Companies by Country\n')
ax[0].xaxis.set_ticks_position('top')
ax[0].set_ylabel('Country Code')
ax[0].set_xlabel('Count')
ax[1].set_title('Crunchbase Companies by Country\n')
ax[1].xaxis.set_ticks_position('top')
ax[1].set_xlabel('Count')
ax[1].set_ylabel('')
plt.show()
del pledge1_countries, df_countries, pledge1
# -
# ### 6) How large is the Pledge 1% network?
#
# - Investors in Pledge 1% organizations
# - Partner investors in Pledge 1% organizations
# - Current job affiliations with Pledge 1% organizations
#
# #### Create `p1_fund_rnds`, `p1_invest`, `p1_invest_prtnr`, `p1_jobs` dataframes
# +
print('STEP 1: Outer merge Pledge 1% and funding rounds to get Pledge 1% funding rounds dataframe')
# Merge funding rounds and p1 dataframes on the organization uuid
p1['org_uuid'] = p1['uuid']
p1_fund_rnds = pd.merge(fund_rnds, p1, how='outer', on='org_uuid')
p1_fund_rnds = p1_fund_rnds.drop(['uuid_y'],axis=1).rename(columns={'uuid_x':'funding_round_uuid'})
p1=p1.drop(['org_uuid'],axis=1)
p1_fund_rnds = p1_fund_rnds[~p1_fund_rnds['funding_round_uuid'].isna()].reset_index(drop=True)
p1_fund_rnds.to_csv('files/output/p1_funding_rounds.csv', index=False)
print('OUTPUT p1_fund_rnds TO CSV `files/output/p1_funding_rounds.csv`\n')
print('STEP 2: Outer merge investments and Pledge 1% funding rounds to get Pledge 1% investments dataframe')
# Merge invest and p1_fund_rnds dataframes on the funding round uuid
p1_invest = pd.merge(invest, p1_fund_rnds, how='outer', on='funding_round_uuid')
p1_invest = p1_invest.rename(columns={'uuid':'investment_uuid'})
p1_invest = p1_invest[~p1_invest['investment_uuid'].isna()].reset_index(drop=True)
p1_invest.to_csv('files/output/p1_investments.csv', index=False)
print('OUTPUT p1_invest TO CSV `files/output/p1_investments.csv`\n')
print('STEP 3: Outer merge partner investments and Pledge 1% funding rounds to get Pledge 1% partner investments dataframe')
# Merge invest_prtnr and p1_fund_rnds dataframes on the funding round uuid
p1_invest_prtnr = pd.merge(invest_prtnr, p1_fund_rnds, how='outer', on='funding_round_uuid')
p1_invest_prtnr = p1_invest_prtnr.rename(columns={'uuid':'investment_uuid'})
p1_invest_prtnr = p1_invest_prtnr[~p1_invest_prtnr['investment_uuid'].isna()].reset_index(drop=True)
p1_invest_prtnr.to_csv('files/output/p1_investments_partner.csv', index=False)
print('OUTPUT p1_invest_prtnr TO CSV `files/output/p1_investments_partner.csv`\n')
print('STEP 4: Outer merge Pledge 1% and jobs to get Pledge 1% jobs dataframe')
# Merge jobs and p1 dataframes on the organization uuid
p1['org_uuid'] = p1['uuid']
p1_jobs = pd.merge(jobs,p1,how='outer',on='org_uuid')
p1=p1.drop(['org_uuid'],axis=1)
p1_jobs = p1_jobs.drop(['uuid_y'],axis=1).rename(columns={'uuid_x':'job_uuid'})
p1_jobs = p1_jobs[~p1_jobs['job_uuid'].isna()].reset_index(drop=True)
p1_jobs.to_csv('files/output/p1_jobs.csv', index=False)
print('OUTPUT p1_jobs TO CSV `files/output/p1_jobs.csv`')
del p1_fund_rnds
# -
# ### Print out comparisons between size of Crunchbase and Pledge 1% networks
# +
# Import merged p1 dataframes
df = pd.read_csv('files/output/organizations_merged.csv')
df = reduce_mem_usage(df, verbose=False)
p1_invest = pd.read_csv('files/output/p1_investments.csv')
p1_invest = reduce_mem_usage(p1_invest, verbose=False)
p1_invest_prtnr = pd.read_csv('files/output/p1_investments_partner.csv')
p1_invest_prtnr = reduce_mem_usage(p1_invest_prtnr, verbose=False)
p1_jobs = pd.read_csv('files/output/p1_jobs.csv')
p1_jobs = reduce_mem_usage(p1_jobs, verbose=False)
# Convert to datetime - p1_date
df['p1_date'] = pd.to_datetime(df['p1_date'], errors='coerce')
p1_invest['p1_date'] = pd.to_datetime(p1_invest['p1_date'], errors='coerce')
p1_invest_prtnr['p1_date'] = pd.to_datetime(p1_invest_prtnr['p1_date'], errors='coerce')
p1_jobs['p1_date'] = pd.to_datetime(p1_jobs['p1_date'], errors='coerce')
# Convert to datetime - other date fields
df['founded_on'] = pd.to_datetime(df['founded_on'], errors='coerce')
df['closed_on'] = pd.to_datetime(df['closed_on'], errors='coerce')
p1_invest['announced_on'] = pd.to_datetime(p1_invest['announced_on'], errors='coerce')
p1_invest_prtnr['announced_on'] = pd.to_datetime(p1_invest_prtnr['announced_on'], errors='coerce')
p1_jobs['started_on'] = pd.to_datetime(p1_jobs['started_on'], errors='coerce')
p1_jobs['ended_on'] = pd.to_datetime(p1_jobs['ended_on'], errors='coerce')
from datetime import datetime
def size_of_network(date, df, invest, invest_prtnr, jobs):
date = pd.Timestamp(date)
print('\nAS OF {}:\n'.format(date.strftime('%B %d, %Y').upper()))
print('='*20)
print('NUMBER OF COMPANIES')
print('-'*20)
print('\nCrunchbase: {:,}'.format(len(df['uuid'][(df['founded_on'] <= date) & ((df['closed_on'] >= date) | (pd.isnull(df['closed_on']))) &(df['primary_role']=='company')].unique())))
print('Pledge 1%: {:,}\n'.format(len(df['uuid'][(df['founded_on'] <= date) &
((df['closed_on'] >= date) | (pd.isnull(df['closed_on']))) &
(df['primary_role']=='company') &
(df['p1_date'] <= date)].unique())))
print('='*20)
print('NUMBER OF INVESTORS')
print('-'*20)
print('\nCrunchbase: {:,}'.format(len(df['uuid'][(df['founded_on'] <= date) & ((df['closed_on'] >= date) | (pd.isnull(df['closed_on']))) &(df['primary_role']=='investor')].unique())))
print('Pledge 1%: {:,}\n'.format(len(df['uuid'][(df['founded_on'] <= date) & ((df['closed_on'] >= date) | (pd.isnull(df['closed_on']))) &(df['primary_role']=='investor') & (df['p1_date'] <= date)].unique())))
print('='*20)
print('NUMBER OF INVESTMENTS')
print('-'*20)
print('\nIN ALL OF CRUNCHBASE')
print('Company Investments: {:,} ({:,} Unique Investors)'.format(len(invest['investment_uuid'][(invest['announced_on'] <= date) & (invest['investor_type']=='organization')].unique()),len(invest['investor_uuid'][(invest['announced_on'] <= date) & (invest['investor_type']=='organization')].unique())))
print('Partner Investments: {:,} ({:,} Unique investors)'.format(len(invest_prtnr['investment_uuid'][invest_prtnr['announced_on'] <= date].unique()),len(invest_prtnr['partner_uuid'][invest_prtnr['announced_on'] <= date].unique())))
print('\nIN PLEDGE 1% ORGANIZATIONS')
print('Company Investments: {:,} ({:,} Unique Investors)'.format(len(invest['investment_uuid'][(invest['announced_on'] <= date) & (invest['investor_type']=='organization') & (invest['p1_tag']==1)].unique()),len(invest['investor_uuid'][(invest['announced_on'] <= date) & (invest['investor_type']=='organization') & (invest['p1_tag']==1)].unique())))
print('Partner Investments: {:,} ({:,} Unique Investors)\n'.format(len(invest_prtnr['investment_uuid'][(invest_prtnr['announced_on'] <= date) & (invest_prtnr['p1_tag']==1)].unique()),len(invest_prtnr['partner_uuid'][(invest_prtnr['announced_on'] <= date)& (invest_prtnr['p1_tag']==1)].unique())))
print('='*20)
print('NUMBER OF PEOPLE')
print('-'*20)
print('\nCRUNCHBASE')
print('Executives: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type']=='executive') & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on'])))].unique())))
print('Board Members: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type']=='board_member') & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on'])))].unique())))
print('Advisors: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type'].isin(['advisor','board_observer'])) & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on'])))].unique())))
print('\nPLEDGE 1%')
print('Executives: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type']=='executive') & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on']))) & (jobs['p1_tag']==1) & (jobs['p1_date'] <= date)].unique())))
print('Board Members: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type']=='board_member') & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on']))) & (jobs['p1_tag']==1) & (jobs['p1_date'] <= date)].unique())))
print('Advisors: {:,}'.format(len(jobs['person_uuid'][(jobs['job_type'].isin(['advisor','board_observer'])) & (jobs['started_on']<= date) & ((jobs['ended_on']>=date) | (pd.isnull(jobs['ended_on']))) & (jobs['p1_tag']==1) & (jobs['p1_date'] <= date)].unique())))
# -
size_of_network('2015-09-08', df, p1_invest, p1_invest_prtnr, p1_jobs)
size_of_network('2020-09-08', df, p1_invest, p1_invest_prtnr, p1_jobs)
| 1_5_EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling as pdp
import pandas as pd
import numpy as np
from scripts import project_functions as pf
df=pf.load_and_process("../data/raw/tcc_ceds_music.csv")
# -
# **Research Questions**
#
# ---
# 1. How has the prevelance of different genres changed over time?
# 2. Lyrical themes of genres over the decades
# - What is the most violent genre for each decade?
# - What is the most romantic genre for each decade?
# - What is the most depressing/sad genre and how has this changed over time?
# 3. How has the instrumental sound of music in general changed over time?
#
#
#
# # Reseach Question 1: How has the prevelance of different genres changed over time?
# ---
sns.countplot(data=df,x="Decade",hue="genre",palette = "colorblind")
# ### Observations
# ---
# - pop is most significant genre for every decade except 1950s
# - in 1950s jazz was most popular
# - 1960s sees the origin of reggae, and a huge increase of country and pop
# - 1970s Same hierarchy as 1960s, large increase of rock
# - 1980s most genres remain the same, rock still rising, first appearance of hip hop
# - 1990s reggae continue to grow, most other genres stay somewhat constant
# - 2000s large drop in country, large increase in hip hop
# - 2010s hip hop has huge increase
#Divide each genre up into years to get a clearer look
#Note: Plots only include year were genre was active
sns.countplot(data=df.loc[df["genre"]=="pop"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="country"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="blues"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="jazz"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="reggae"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="rock"], x="Year",hue="genre")
sns.countplot(data=df.loc[df["genre"]=="hip hop"], x="Year",hue="genre")
# ## Question 1 Conclusion
# ---
# The observation collected through this research question is regarding genre prevalence and frequency within the data. The overall most common genre through the decades is overbearingly pop music, with the one exception being the 1950s. This is no surprise and mirrors the general popularity of pop music as pop tends to be the dominate genre since its origin. in the 1950s however the most frequent genre are blues and jazz, once again this is expected as these genres tend to be credited with the creation of rock and pop. Most genres seen to be prevalent within the data set over the decades which they were most relevant, with the one exception being that country seemed to top rock through the 1970s to 1990s. The first usage of hip hop in the data set was in the 1980s, which is accurate as around this time hip hop was just getting started.
#
# It is worth noting that the frequency of each genre is due to the sample size used in the data set. Even though the creator of the data set aim to synchronize the real-life popularity of each genre within the year with the sample size, there still could remain some in error in the sample size not being entirely accurate
#
# # Research Question 2: Lyrical themes of genres over the decades
# ---
# ## Research Question 2.1: What is the most violent genre for each decade?
sns.lineplot(data=df, x="Decade", y="violence", hue = "genre", )
#Using the violence catagory
#Most direct category for describing violence
#Category based mostly on use of violent keywords
sns.lineplot(data=df, x="Decade", y="violence", hue = "genre", ci = None, palette="bright" )
# ### Observations
# ---
# - Music has generally got more violent over time
# - From 1950 to 1960 Rock was the most violent
# - From 1960 to mid 1970 Raggae was the most violent
# - Mid 1970 to now Rock is the most violent
# - Least violent has almost always gone to country
# - Blues has had a constantly increasing violence right under rock
# - Hip-hop was surprisingly constantly low in violence
#
#
#
#Using the obscene catagory
#Not nescesarilly a description of violence, however there may be some correlation
sns.lineplot(data=df, x="Decade", y="obscene", hue = "genre", ci = None, palette="bright" )
# ### Observations
# ---
# - Hip Hop was way above any other genre
# - Remaining genres were pretty similar
# - Reggae in second place but still much lower than hip hop
# - pop has gotten much worse over time
# - Least obscene genres are country, jazz, and blues
# ## Research Question 2.2: What is the most romantic genre for each decade?
#Using the romance category
sns.lineplot(data=df, x="Decade", y="romantic", hue = "genre", ci = None, palette="bright" )
# ### Observations
# ---
# - Romance is probably most accurate category
# - Romance in music has gone down over time
# - Most genres are pretty similar and follow similar trends
# - Hip hop has lowest romantic score
# - Jazz, country, blues are most romantic over time
#
#
#Using the like/girls category
#Possible correlation with romance
plot=sns.lineplot(data=df, x="Decade", y="like/girls", hue = "genre", ci = None, palette="bright" ,legend = True)
# ### Observations
# ---
# - Not necessarily related to romance
# - Hip Hop had peak in the 1980s then dropped down to the lowest in the 2000 to 2020
# - Most consistent genres with higher girl/like score are blues and pop
# - Country score has increased over time
#Using dating category
#Possible correlation with romance
sns.lineplot(data=df, x="Decade",ci=None, y="dating", hue = "genre", palette = "bright")
# ### Observations
# ---
# - In dating category blues has had the highest score through the decades
# - Most other genres have had very minimal change over time
# - Pop and jazz are relatively higher, but still much less than country
# - Hip hop and rock have had the lowest overall score in dating
# ## Research Question 2.3: What is the most depressing/sad genre and how has this changed over time?
#Using valence category
#Valence is the measure of happiness
# Has an inverse relationship with depressing and sad
sns.lineplot(data=df, x="Decade",ci=None, y="valence", hue = "genre", palette = "bright")
#Plot of the inverse of valence
#valence_inverse = 1-valence
#Better visulisation of how depressing/sadness has changed over the decades
#Should have have a correlation with sadness
dfInverse = df.assign(valence_inverse=(1-df["valence"]))
sns.lineplot(data=dfInverse, x="Decade",ci=None, y="valence_inverse", hue = "genre", palette = "bright")
# ### Observations
# ---
# - Oppisite of valence, measure of least happy
# - Reggae is consistantly least happy, followed by hip hop
# - Reggae and hip hop are much lower than remaining genres
# - Jazz was least happy until 1970 when rock became the least happy
# - After 1970 jazz was still a close second
#
#Using category sadness
sns.lineplot(data=df, x="Decade",ci=None, y="sadness", hue = "genre", palette = "bright")
# ### Observations
# ---
# - Hip hop and reggae have lowest sadness score
# - Country and pop have highest sadness untill 2000
# - Jazz and rock are in the middle but still have a higher score
# - After the year 2000 jazz had the highest sadness score
# ## Question 2 Conclusion
# ---
# The first category discussed in the context of how music has changed over time is violence. Violence was described using the violence and obscene categories. The prevailing trend is that generally music has become increasingly violent over time with rock as the most consistently violent genre. Within the violence category it is not surprising that rock has a higher violence score as the genre historically has much more rebellious, aggressive, and angry topics and themes, especially from the 1970s onward. It is also worth noting that metal and its subgenres are grouped in with rock and metal can be stereotypically violent. The major surprise of the violence category is the low score of hip-hop. Hip hop tends to have a pattern of being violent, especially in the early years with the rise of gangster rap and rap feuds. Hip hop however does have a high score within the obscene category, which is expected, although there is no clear correlation between obscene and violence in the remaining genres.
#
# The next topic researched are the most romantic genres, the categories used were romance, like/girls, and dating. The most accurate category to look at would be romance, with like/girls and dating not entirely related but may show possible correlations. The three most romantic genres seem to be jazz, country, and blues, with the highest score alternating within these three. Most genres aren’t too far beneath except for hip hop, which has a consistently low score. The higher romance score of country and blues is also supported by like/girls and dating, where they tend to hold a relatively higher score.
#
# The last topic to discuss is what is the most depressing/sad genre, the categories used for this analysis were sadness and the inverse of valence. Valence is the measure of happiness, so the inverse used would be a measure for how unhappy a song is. Both the inverse of valence and sadness show very similar trends. Most genres tend to hold a relatively higher sadness and depressing score, with the exception being reggae and hip hop. Reggae and hip hop have a much lower score then most of the genres, which would mean they are the least depressing. The low score of reggae and hip hop is expected as reggae tends to be more laid back and uplifting, while hip hop can have more of a party like or motivating themes. The remaining genres tend to switch around in which is the most depressing/sad. For most genres the sadness has remained somewhat constant over the decades.
#
# Of all the data collected over the decades it is important to recognize a possible margin of error which. This error could arise from the limited sample data used, or the AI which scored each song could have incorrectly scored keywords. With this error it is difficult to really contrast data with similar scores in categories.
#
# # Research Question 3: How has the instrumental sound of music in general changed over time?
# ---
#General plot of instrumental changes over time
plot = sns.PairGrid(data =df,x_vars =["Decade"],y_vars = ["danceability", "loudness", "acousticness"])
plot.map(sns.lineplot)
# ### Observations
# ---
# - Danceability and loudness have gone up over the years
# - Acousticness has gone down, then somewhat leveled out around 1980s
# - Most plots are somewhat linear and show little variation in their trend
#Genre specific plot of danceability
sns.lineplot(data=df, x="Decade",ci=None, y="danceability", hue = "genre", palette = "bright")
#Genre specific plot of loudness
sns.lineplot(data=df, x="Decade",ci=None, y="loudness", hue = "genre", palette = "bright")
#Genre specific plot of danceability
sns.lineplot(data=df, x="Decade",ci=None, y="acousticness", hue = "genre", palette = "bright")
# ### Observations
# ---
# - Plots tend to follow the previous trend of the overall sound change
# - Danceability genre plots are mostly linear, with the overall plot having an steep upwards trend
# - Hip hop and reggae started in later decades, this should be taken into account when considering the overall plots
# ## Question 3 Conclusion
# ---
#
# The instrumental aspects of music have change dramatically over time. This research question looks at the change in danceability, loudness, and acousticness over the decades. For danceability, the overall nongenre specific plot shows an upwards trend starting from 1950 that is nearly linear. This would suggest that music has gotten more danceable, however, when looking at the genre specific plot of danceability the slopes are mostly flat horizontal. Since the nongenre specific plot takes the average of all the genres it is important to notice that reggae and hip hop first appeared in later decades and have a significantly higher danceability score compared to the other genres. This would mean that while most all the genres have relatively flat slopes, as reggae and hip hop are added to the nongenre specific score the average would increase. This would lead to the appearance of the nongenre specific plot having an upwards trend when new genres were just introduced and danceability has remained somewhat constant. This issue has a minimal to nonexistent effect on loudness and acousticness.
#
# Loudness shows a steep upwards trend in both the genre specific and nongenre specific plots. I would speculate this is due to the advancements in music technology, where artists can create a louder, more dense sound through machines or computers. This is also most likely the cause of the downwards trend in acousticness. The downwards trend in acousticness is seen in both the genre specific and nongenre specific plots, verifying it to hold true. I speculate this is also due to technological advancements in the music industry creating less reliance on actual acoustic instrumentations. As well as this could be due to the more electronic sound that is popular in most modern genres.
#
| analysis/.ipynb_checkpoints/Analysis of Research Questions-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # read.csv
#
# `utils` package tự động load khi `R` session khởi động, bạn có thể import **CSV files** bằng hàm `read.csv()`.
#
# 1. Sử dụng `read.csv()` để import file "[swimming_pools.csv](swimming_pools.csv)" thành data frame `pools`.
# 2. In cấu trúc data frame bằng hàm `str()`.
pools <- read.csv('swimming_pools.csv')
# Check the structure of pools
str(pools)
# # stringsAsFactors
#
# `stringsAsFactors` cho R biết khi nào nên convert dữ liệu thành dạng Factor, trong tất cả các hàm import của `utils`, mặc định là `stringsAsFactors = TRUE`
#
# Nếu ta set `stringsAsFactors = FALSE` thì tất cả dữ liệu được import sẽ giữ nguyên là string, có kiểu là `character`
# Import swimming_pools.csv correctly: pools
pools <- read.csv('swimming_pools.csv', stringsAsFactors=FALSE)
# Check the structure of pools
str(pools)
# # read.delim & read.table
#
# 1. `read.delim()` đọc dữ liệu bất kỳ thành data table, dựa vào dấu phân cách `sep` mà tách các cột.
# 2. `read.table` đọc bất kỳ dữ liệu nào có dạng tabular.
#
# Hai hàm này tương đối giống nhau, `read.table` mặc định tham số `head = FALSE` (lấy dòng đầu làm header name) và `sep=""`.
#
# Ghi chú: `head()` dùng để hiển thị `n` dòng đầu tiên của data.table
#
# # summary
#
# Sử dụng hàm `summary()` để thống kê miêu tả nhanh về data frame.
# Header k có ở dòng đầu, nên mình set header=FALSE
hotdogs <- read.delim('hotdogs.txt', sep='\t', header=FALSE)
head(hotdogs)
summary(hotdogs)
# # file.path()
#
# dùng `file.path()` để tạo đường dẫn.
path <- file.path("data", "hotdogs.txt")
path
# +
# Import the hotdogs.txt file: hotdogs
hotdogs <- read.table(path,
sep = "\t",
col.names = c("type", "calories", "sodium"))
# Call head() on hotdogs
head(hotdogs)
# -
# # Lọc which.min và which.max
#
# Tìm ra hotdogs có ít calories nhất, nhiều sodium nhất.
min.calo <- hotdogs[which.min(hotdogs$calories), ]
min.calo
max.sodium <- hotdogs[which.max(hotdogs$sodium), ]
max.sodium
# # colClasses
#
# Tham số `colClasses` là một vector, nếu một cột nào đó có giá trị `NULL` trong vector này thì cột đó sẽ bị xóa đi trong data.frame
# +
# Edit the colClasses argument to import the data correctly: hotdogs2
hotdogs2 <- read.delim("hotdogs.txt", header = FALSE,
col.names = c("type", "calories", "sodium"),
colClasses = c("factor", "NULL", "numeric"))
# Display structure of hotdogs2
str(hotdogs2)
# -
| 1-import-data/import-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy='constant')
# Preprocessing for categorical data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
# +
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(n_estimators=100, random_state=0)
# +
from sklearn.metrics import mean_absolute_error
# Bundle preprocessing and modeling code in a pipeline
my_pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('model', model)
])
# Preprocessing of training data, fit model
my_pipeline.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = my_pipeline.predict(X_valid)
# Evaluate the model
score = mean_absolute_error(y_valid, preds)
print('MAE:', score)
# -
# ## [info](https://www.kaggle.com/alexisbcook/pipelines)
| Data preprocessing/pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# View all of the classes that automap found
Base=automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Station=Base.classes.station
Measurement=Base.classes.measurement
session=Session(engine)
inspector=inspect(engine)
stations=engine.execute('SELECT * FROM Station')
print(stations.keys())
stations.fetchall()
columns=inspector.get_columns('Station')
for column in columns:
print(column['name'], column['type'])
measurements=engine.execute('SELECT * FROM Measurement LIMIT 15')
print(measurements.keys())
measurements.fetchall()
columns=inspector.get_columns('Measurement')
for column in columns:
print(column['name'], column['type'])
# # Exploratory Precipitation Analysis
# Find the most recent date in the data set.
latest_date=(session.query(Measurement.date)
.order_by(Measurement.date.desc())
.first())
latest_date
year_ago_date=dt.date(2017, 8, 23) - dt.timedelta(days=366)
print('Query Date:', year_ago_date)
year_prcp=(session.query(Measurement.date,func.max(Measurement.prcp))
.filter(func.strftime('%Y-%m-%d',Measurement.date) > year_ago_date)
.group_by(Measurement.date)
.all())
year_prcp
prcp_df=pd.DataFrame(year_prcp, columns=['date', 'prcp'])
prcp_df.set_index('date',inplace=True)
prcp_df.head(10)
prcp_df.sort_values('date')
# +
plt.rcParams['figure.figsize']=(15,7)
prcp_df.plot(linewidth=2,alpha=1,rot=0,
xticks=(0,60,120,180,240,300,365),
color='xkcd:deep aqua')
plt.xlim(-5,370)
plt.ylim(-0.4,7)
plt.yticks(size=14)
plt.xticks(fontsize=14)
plt.legend('',frameon=False)
plt.xlabel('Date',fontsize=16,color='black',labelpad=20)
plt.ylabel('Precipitation (in)',fontsize=16,color='black',labelpad=20)
plt.title('Daily Maximum Precipitation for One Year\nHonolulu, Hawaii',fontsize=20,pad=40)
plt.show()
# -
year_prcp_stats=(session.query(Measurement.date, Measurement.prcp)
.filter(Measurement.date > year_ago_date)
.all())
year_prcp_stats
year_prcp_stats_df=pd.DataFrame(year_prcp_stats, columns=['date', 'prcp'])
year_prcp_stats_df
year_prcp_stats_df.dropna()
year_prcp_stats_df.describe()
# # Exploratory Station Analysis
# Design a query to calculate the total number stations in the dataset
total_stations=session.query(Station).count()
print(f'There are {total_stations} stations at Honolulu, Hawaii.')
station_activity=(session.query(Measurement.station,func.count(Measurement.station))
.group_by(Measurement.station)
.order_by(func.count(Measurement.station).desc())
.all())
station_activity
# +
tobs=[Measurement.station,
func.min(Measurement.tobs),
func.max(Measurement.tobs),
func.avg(Measurement.tobs)]
most_active_st=(session.query(*tobs)
.filter(Measurement.station=='USC00519281')
.all())
most_active_st
most_active_st_temp=pd.DataFrame(most_active_st, columns=['station', 'min_temp',
'max_temp', 'avg_temp'])
most_active_st_temp.set_index('station', inplace=True)
most_active_st_temp
# -
year_tobs=(session.query(Measurement.date,(Measurement.tobs))
.filter(func.strftime(Measurement.date) > year_ago_date)
.filter(Measurement.station=='USC00519281')
.all())
year_tobs
# +
tobs_df=pd.DataFrame(year_tobs)
tobs_df.set_index('date',inplace=True)
plt.rcParams['figure.figsize']=(10,7)
plt.hist(tobs_df['tobs'],bins=12,alpha=0.6,edgecolor='xkcd:light gray',
linewidth=1,color='xkcd:deep aqua')
plt.title('Temperature Observation Aug 2016 - Aug 2017\nHonolulu, Hawaii',fontsize=20,pad=40)
plt.xlabel('Temperature (F)',fontsize=16,color='black',labelpad=20)
plt.ylabel('Frequency',fontsize=16,color='black',labelpad=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.ylim(0,70)
plt.show()
# -
# # Close session
# Close Session
session.close()
| climate_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from selenium import webdriver
driver = webdriver.Chrome('./chromedriver')
url = 'https://youtube-rank.com/board/bbs/board.php?bo_table=youtube'
driver.get(url)
html = driver.page_source
import pandas as pd
df1 = pd.read_html('https://youtube-rank.com/board/bbs/board.php?bo_table=youtube')
df1[0]
from bs4 import BeautifulSoup as bs
soup = bs(html,'html.parser')
contents = soup.select('tr.aos-init')
content = contents[0]
category = content.select('p.category')[0]
category.text.strip()
title = content.select('h1 > a[href*="board"]')[0]
title.text.strip()
df2 = pd.DataFrame(a,columns = ['category','title'])
df2.to_excel('./saves/youtube-rank.xls',index = False)
view = soup.select('tbody td.view_cnt')
video = soup.select('tbody td.video_cnt')
hit = soup.select('tbody td.hit strong')
len(view)
a = []
for ct in contents:
category = ct.select('p.category')[0]
title = ct.select('h1 > a[href*="board"]')[0]
a.append([category.text.strip(),title.text.strip()])
b = []
for i in range(len(hit)):
b.append([view[i].text.strip(),video[i].text.strip(),hit[i].text.strip()])
df3 = pd.DataFrame(b,columns = ['view','video','조회수'])
df2
df4 = pd.concat([df2,df3],axis=1)
df4
df4.to_excel('./saves/youtube-rank2.xls',index = False)
| scraping_selenium_youtube-rank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="9nmlyDFUBQN2" colab_type="code" outputId="c129ecb7-09da-40fd-bddf-5259b3ae8d2f" executionInfo={"status": "ok", "timestamp": 1583820907945, "user_tz": -60, "elapsed": 15482, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 641}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# !pip install hyperopt
# + id="ex0ixtVgXTBW" colab_type="code" outputId="4f3627e1-5770-48b6-c0eb-ab79b7194d8f" executionInfo={"status": "ok", "timestamp": 1583821037290, "user_tz": -60, "elapsed": 3725, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 167}
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import xgboost as xgb
from hyperopt import hp, fmin, tpe, STATUS_OK
import eli5
from eli5.sklearn import PermutationImportance
# + id="eAOOiQgcXvc1" colab_type="code" outputId="0f42dbfc-b293-4fc9-8ddd-d13bbabe955b" executionInfo={"status": "ok", "timestamp": 1583821044563, "user_tz": -60, "elapsed": 1046, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd drive/My Drive/Colab Notebooks/Data-Workshop/Matrix_part_two/Data-Workshop-Cars/
# + id="aPEN0rxFX0F1" colab_type="code" colab={}
df = pd.read_hdf('data/car.h5')
# + id="uxiRs12iX5Jp" colab_type="code" outputId="75f383a8-1137-4a88-f728-faf80dec4ad1" executionInfo={"status": "ok", "timestamp": 1583821052543, "user_tz": -60, "elapsed": 1705, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df.shape
# + id="0Y3NUmsLX7it" colab_type="code" colab={}
# Faktoryzujemy features
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue #sprawdza czy w srodku jest lista i kontynuuje, inaczej sie wykrzacza
factorized_values = df[feat].factorize()[0] #jezeli juz istnieje, nadpisz
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + id="NK99JefZYA0y" colab_type="code" colab={}
df['param_rok-produkcji'] =df['param_rok-produkcji'].map(lambda x : -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')))
# + id="woV0cUWwYRx-" colab_type="code" colab={}
def run_model (model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="YvxKiQdiZHF2" colab_type="code" colab={}
feats =['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_skrzynia-biegów__cat','param_faktura-vat__cat','param_moc','param_marka-pojazdu__cat','feature_kamera-cofania__cat',
'param_typ__cat','param_pojemność-skokowa','seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat' ]
# + id="5voqqOqYZIgq" colab_type="code" outputId="1eb02d91-753c-4b61-cbfd-2cca0d84cf9c" executionInfo={"status": "ok", "timestamp": 1583609763184, "user_tz": -60, "elapsed": 13784, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 84}
xgb_params = {
'max_depth' : 5,
'n_estimators' : 50,
'learning_rate': 0.1,
'seed':0
}
model = xgb.XGBRegressor(**xgb_params)
run_model(model, feats)
# + id="eIiU1pO9ZYQI" colab_type="code" colab={}
# + [markdown] id="AnsWKD1Kainu" colab_type="text"
# ## HyperOpt
# + id="36jATDmbakzZ" colab_type="code" outputId="425da0c9-e548-4fd5-8d23-85aadb77e635" executionInfo={"status": "ok", "timestamp": 1583611143745, "user_tz": -60, "elapsed": 1372500, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}} colab={"base_uri": "https://localhost:8080/", "height": 877}
# optimizer
def obj_func(params):
print('Training with parametres: ')
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)
return {'loss': np.abs(mean_mae), 'status' : STATUS_OK}
# space
xgb_reg_params = {
'learning_rate' : hp.choice( 'learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth' : hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),
'subsample' : hp.quniform('subsample', 0.5, 1, 0.05),
'colsample_bytree':hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'objective' : 'reg:squarederror',
'n_estimators' : 100,
'seed' : 0,
}
# run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25, return_argmin=False)
# + id="JVcd949FEpqD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="409ff3cc-b451-4d4a-f43f-e44042048cc2" executionInfo={"status": "ok", "timestamp": 1583821190598, "user_tz": -60, "elapsed": 74705, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}}
# wining result
xgb_params = {
'colsample_bytree': 0.9500000000000001,
'learning_rate': 0.15000000000000002,
'max_depth': 11,
'n_estimators': 100,
'objective': 'reg:squarederror',
'seed': 0,
'subsample': 0.65
}
model = xgb.XGBRegressor(**xgb_params)
run_model(model, feats)
# + id="ot24OgtxIA_6" colab_type="code" colab={}
# fitting the model
feats =['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_skrzynia-biegów__cat','param_faktura-vat__cat','param_moc','param_marka-pojazdu__cat','feature_kamera-cofania__cat',
'param_typ__cat','param_pojemność-skokowa']
xgb_params = {
'colsample_bytree': 0.9500000000000001,
'learning_rate': 0.15000000000000002,
'max_depth': 11,
'n_estimators': 100,
'objective': 'reg:squarederror',
'seed': 0,
'subsample': 0.65
}
model = xgb.XGBRegressor(**xgb_params)
X = df[feats].values
y = df['price_value'].values
fitting = model.fit(X, y)
# + id="zhV-_6IO7LoF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="10d2cdb5-8115-4a86-9c7f-280e8dc1ae78" executionInfo={"status": "ok", "timestamp": 1583827436096, "user_tz": -60, "elapsed": 783, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9CKN6kAPbVUuWwItPTh9PRMMlXj44Eo68IiMptw=s64", "userId": "02016148975437561947"}}
# real car data prametres factorized
X = [1,2009, 1, 0, 0, 200, 3, 0, 4, 2000 ]
# Nap na przednie = 1
# rok prod - 2009
# Stan uzywan - 1
# param_skrzynia-biegów__cat manual - 0
# param_faktura-vat__cat nie - 0
# param_moc - 200
# marka - Volkswagen - 3
# kamer cofania - nie - 0
# typ kombi - 4
# pojemnosc skokowa - 2000
model.predict(X)
| Part2_day5-param_optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_braket
# language: python
# name: python3
# ---
# # Bring your own containers to Braket Jobs
# Amazon Braket has pre-configured containers for executing Amazon Braket Hybrid Jobs, which are sufficient for many use cases involving the Braket SDK and PennyLane. However, if we want to use custom packages outside the scope of pre-configured containers, we will need to supply a custom-built container. In this tutorial, we show how to use Braket Job to train a quantum machine learning model using BYOC (Bring Your Own Container).
# <div class="alert alert-block alert-info">
# <b>Note:</b> Building the docker image in this notebook may require 4GB of memory. If you plan to use an Amazon Braket notebook instance to run this notebook, it is suggested to use a larger notebook instance, for example ml.t3.large.
# </div>
# ## 1 Prepare algorithm script
# This section introduces the algorithm script for a text sentiment classifier. The complete script for this notebook is [here](algorithm_script.py). We will not go through the script line by line. Instead, we highlight the part that is important for understanding the Braket Job.
# ### Problem setup
# The quantum machine learning problem we are targeting is to classify the positive and negative sentiments of a sentence. We use four sentences in this example:
#
# "I eat a banana every day." <br>
# "Bananas are not for her." <br>
# "Banana shakes are delicious." <br>
# "How can you like bananas?" <br>
#
# The first and the third sentence have positive sentiment on bananas, which are labeled as +1. The second and the fourth sentence have negative sentiment, which are labeled as -1. To input the sentence to a quantum machine learning model, we use spaCy package to embed the sentences into 1D vectors.
# %%capture captured
# !pip install ipywidgets
# !pip install spacy_sentence_bert
# +
import spacy_sentence_bert
nlp = spacy_sentence_bert.load_model("xx_distiluse_base_multilingual_cased_v2")
banana_string = ["I eat a banana every day.",
"Bananas are not for her.",
"Banana shakes are delicious.",
"How can you like bananas?"
]
banana_embedding = [nlp(d) for d in banana_string]
data = [d.vector for d in banana_embedding]
label = [1, -1, 1, -1]
# -
# With the "xx_distiluse_base_multilingual_cased_v2" language model, each data point is now a vector with length 512. See the [spaCy page](https://spacy.io/universe/project/spacy-sentence-bert) for details. Note that the size of the embedding vectors depends on the language model. When choosing a different model, we would expect a different shape of embedding.
for d in data:
print("data size: {}".format(d.shape))
# ### Quantum machine learning model
# We choose [Circuit-centric quantum classifiers (CCQC)](https://arxiv.org/abs/1804.00633) as our quantum model. The figure below shows an example CCQC circuit with 7 qubits. The data (classical embedding from the language model) is input to the circuit as the initial state via [amplitude encoding](https://arxiv.org/abs/1803.07128). After the initial state are two entanglement layers. The first layer entangles each qubit with its nearest neighbor, and the second layer with each qubit's third nearest neighbor. A rotation gate is then applied to the first qubit. Finally, the measurement is only done on the first qubit. The classification criterion is only based on this measurement. If the measurement is positive, it predicts a positive sentiment (+1); otherwise, it predicts a negative one (-1).
#
# 
# We use PennyLane as our machine learning framework. To use Braket managed simulators or QPUs, we set the device name to be "braket.aws.qubit". The device_arn will be passed to the algorithm script as the environment variable <code>os.environ["AMZN_BRAKET_DEVICE_ARN"]</code>; this variable is set when creating the job. For details about options of Braket devices in <code>qml.device</code>, see [Amazon Braket-Pennylane Plugin](https://github.com/aws/amazon-braket-pennylane-plugin-python). The quantum model is packaged in a CCQC class in the [algorithm script](algorithm_script.py).
# ### Monitor metrics and record results
# We can monitor the progress of the hybrid job in the Amazon Braket console. <code>log_metric</code> records the metrics so that we can view the training progress in the "Monitor" console tab. <code>save_job_result</code> allows us to view the result in the console and in the <code>job</code> variable.
# ## 2 Prepare custom container
# When we submit a quantum job, Amazon Braket starts a job instance based on EC2 and spins up a container to run our script. The environment is defined by the provided container instead of the local console where the job is submitted. If no container image is specified when submitting a job, the default container is the base Braket container. See the [developer guide](https://docs.aws.amazon.com/braket/index.html) for the configuration of the base container.
#
# Amazon Braket Jobs provides three pre-configured containers for different use cases. See the [developer guide](https://docs.aws.amazon.com/braket/index.html) for the configuration of pre-configured containers. In this example, the spaCy package is not supported in any of the three containers. One option is to install the package through <code>pip</code> at the beginning of the algorithm script.
# +
# from pip._internal import main as pipmain
# pipmain(["install", "spacy"])
# -
# When the problem size is small, we can manage to use <code>pip</code> to configure the environment. However, for large-scale applications, we expect that this method would quickly become infeasible. Braket Job provides the "bring your own container (BYOC)" option to help you manage the environment of your hybrid job. BYOC not only allows us to define what Python packages are available, but to configure those settings that are hard to do by <code>pip</code> or Python alone. In the following, we go through the steps of building our own container.
# ### Preparation 1: Docker
# To build and upload our custom container, we must have [Docker](https://docs.docker.com/get-docker/) installed. Amazon Braket Notebook Instance has Docker pre-installed. This step can be skipped if you are using the terminal of a Braket Notebook Instance.
#
# ### Preparation 2: Dockerfile
# A Dockerfile defines the environment and the software in the containers. We can start with the base Braket container and add packages according to our needs. For our quantum text classifier, we use the Dockerfile below. The first line in the dockerfile specifies the container template. We build our container upon the base Braket container. The rest of the file is to install the required packages (PennyLane and SpaCy etc.).
# !cat dockerfile
# ### Preparation 3: Initial script
# An initial script is the script that will be executed when a container starts. For this example notebook, we build our container based on the base Braket container which already includes an initial script to run Braket Jobs. Therefore, we do not need to provide an initial script. The initial script configures the paths for container image and for user code. It sets up a container and downloads the algorithm script to run in the container. It also handles errors and logs error messages. See the developer guide for more information about the initial script associated with the base Braket container.
#
# ### Preparation 4: Create ECR
# Amazon Elastic Container Registry (ECR) is a fully managed Docker container registry. Follow the [instructions](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html) to create a "private" repository using ECR console. For this example, we name our repository "amazon-braket-my-qtc" (my quantum text classifier).
#
# Alternatively, following these [instructions](https://docs.aws.amazon.com/cli/latest/reference/ecr/create-repository.html), we can also create an ECR repository using the AWS CLI.
#
# <b>Important</b>: The Amazon Braket managed policies only grant read access to repositories with the prefix `amazon-braket`. <b>In order to create (and later, push to) a repository, or to access repositories which are not prefixed with `amazon-braket`, you will need to attach additional permissions to your IAM identity.</b> If you are running this notebook on an Amazon Braket notebook instance, you may attach the [AmazonEC2ContainerRegistryFullAccess](https://docs.aws.amazon.com/AmazonECR/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-AmazonEC2ContainerRegistryFullAccess) policy to the IAM role you specified when creating the notebook instance.
#
# <b>Important: By default, Braket Jobs can only access repositories with names beginning in `amazon-braket`.</b> If you would like to access a repository with a different prefix, you will need to pass an IAM role with access to that repository using the `role_arn` argument of the `AmazonQuantumJob.create()` function.
# +
# # !aws ecr create-repository --repository-name amazon-braket-my-qtc
# -
# Now that we have finished the prerequisites, it's time to build our container!
# ### Action 1: Log into AWS CLI and Docker
# If you haven't already, follow the [instructions](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) to configure your AWS credentials using the AWS CLI. Then, run the following snippet in the terminal to log into Docker. Replace all <XXX> below with your own credentials. You will see "Login Succeeded" twice when it's done.
#
# <code>aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 2<PASSWORD>.dkr.ecr.us-west-2.amazonaws.com
# aws ecr get-login-password --region <YOUR_AWS_REGION> | docker login --username AWS --password-stdin <YOUR_ACCOUNT_ID>.dkr.ecr.<YOUR_AWS_REGION>.amazonaws.com</code>
#
# Note that the commands log into Docker twice. The first command logs into the public registry containing the Braket base container that you will base your Docker image on. The second command logs into your private registry.
#
# If the terminal does not support interactive login, you can also run the following commands to log in.
#
# <code>docker login -u AWS -p $(aws ecr get-login-password --region us-west-2) 292282985366.dkr.ecr.us-west-2.amazonaws.com
# docker login -u AWS -p $(aws ecr get-login-password --region <YOUR_AWS_REGION>) <YOUR_ACCOUNT_ID>.dkr.ecr.<YOUR_AWS_REGION>.amazonaws.com</code>
# ### Action 2: Build and push the image
# Go to the folder containing your Dockerfile. Run the lines below to build and push the image to your ECR. Remember to replace all <XXX> in the code with your own credentials. When it completes, you will see all layers are pushed in the terminal, and a new image will appear in our ECR console under the "amazon-braket-my-qtc" repository. If running into memory error when building an image due to the size of the language model, you can increase the memory limit in Docker.<br>
# <code>docker build -t dockerfile .
# docker tag dockerfile:latest <YOUR_ACCOUNT_ID>.dkr.ecr.<YOUR_AWS_REGION>.amazonaws.com/amazon-braket-my-qtc:latest
# docker push <YOUR_ACCOUNT_ID>.dkr.ecr.<YOUR_AWS_REGION>.amazonaws.com/amazon-braket-my-qtc:latest
# </code>
# Once the container image is uploaded, it is ready to be used in a Braket Job!
# ### The build-and-push shell script
# The above procedure walks you through the steps to create a container image. This procedure can be automated in a shell script. The [example script](build_and_push.sh) is provided in the same folder of this notebook. The script automatically formulates the commands to build and to push the container image to the ECR repository you assign. If the repository does not exist, it creates one. To use this shell script, the IAM identity that runs the script requires permissions to create the repository and push the image to the repository. If your IAM identity doesn't have these permissions, you can attach the [AmazonEC2ContainerRegistryFullAccess](https://docs.aws.amazon.com/AmazonECR/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-AmazonEC2ContainerRegistryFullAccess) policy or select only the relevant permissions if you require granular control.
#
# Assuming "amazon-braket-my-qtc" is your repository name, all you need to do is to prepare the Dockerfile and run the command<br>
# <code>sh build_and_push.sh amazon-braket-my-qtc
# </code>
# in the terminal. Alternatively you can uncomment and run the following cell:
# +
# # !sh build_and_push.sh amazon-braket-my-qtc
# -
# ## 3 Submit your job
# Now that we have prepared an algorithm script and the container for the job, we can submit the hybrid job to AWS using <code>AwsQuantumJob.create</code>. Remember to provide the container we just created via the <code>image_uri</code> keyword.
# <div class="alert alert-block alert-warning">
# <b>Caution:</b> The job that is created below will take a long time to run (roughly 90 minutes) and will incur simulation costs up to $40 for running tasks on the SV1 managed simulator. Please uncomment and run the job only if you are comfortable with the time and cost.
# </div>
# +
# from braket.aws import AwsQuantumJob
# image_uri = "<aws_account_id>.dkr.ecr.<your_region>.amazonaws.com/amazon-braket-my-qtc:latest"
# job = AwsQuantumJob.create(
# device="arn:aws:braket:::device/quantum-simulator/amazon/sv1",
# source_module="algorithm_script.py",
# entry_point="algorithm_script:main",
# wait_until_complete=False,
# job_name="my-aws-job",
# image_uri=image_uri,
# )
# -
# ## 4 Evaluate results
# Once training is completed, we can evaluate how our quantum model performs. First, we initialize the CCQC model.
# +
from algorithm_script import CCQC
device="arn:aws:braket:::device/quantum-simulator/amazon/sv1"
qml_model=CCQC(nwires = 9, device=device)
# -
# We then retrieve the trained weights from <code>job.result()</code>.
# The cell completes when the job finishes. It may take 90 minutes.
weights = job.result()['weights']
# If we lose the <code>job</code> variable, we can always retrieve it by its arn which can be found in the Braket console.
# +
# job_arn = "your-job-arn"
# job = AwsQuantumJob(job_arn)
# -
# Using the trained weights, we can make the prediction for each sentence with the <code>predict</code> function of our model. See the [algorithm script](algorithm_script.py) for definitions.
for i in range(4):
print(banana_string[i])
pred = qml_model.predict(*weights, data=data[i])
print("label: {} predict:{}".format(label[i], pred))
print()
# We can also test our quantum model on a sentence it has not seen.
test_string = "A banana a day keeps the doctor away."
test_data = nlp(test_string).vector
test_label = 1
print(test_string)
pred = qml_model.predict(*weights, data=test_data)
print("label: {} predict:{}".format(test_label, pred))
# ## Summary
# In this notebook, we demonstrated a use case that requires Python packages not supported by any of the pre-configured containers provided by Braket Jobs. We have learned the steps to prepare a Dockerfile and build our own container that supports our use case. Using Braket Jobs with BYOC provides the flexibility of defining custom environments and the convenience of switching between environments for different applications.
| examples/hybrid_jobs/3_Bring_your_own_container/bring_your_own_container.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project : Holiday weather
#
#
# There is nothing I like better than taking a holiday. In this project I am going to use the historic weather data from the Weather Underground for London to try to predict two good weather weeks to take off as holiday. Of course the weather in the summer of 2016 may be very different to 2014 but it should give some indication of when would be a good time to take a summer break.
#
# ## Getting the data
#
# Weather Underground keeps historical weather data collected in many airports around the world. Right-click on the following URL and choose 'Open Link in New Window' (or similar, depending on your browser):
#
# http://www.wunderground.com/history
#
# When the new page opens start typing 'LHR' in the 'Location' input box and when the pop up menu comes up with the option 'LHR, United Kingdom' select it and then click on 'Submit'.
#
# When the next page opens with London Heathrow data, click on the 'Custom' tab and select the time period From: 1 January 2014 to: 31 December 2014 and then click on 'Get History'. The data for that year should then be displayed further down the page.
#
# You can copy each month's data directly from the browser to a text editor like Notepad or TextEdit, to obtain a single file with as many months as you wish.
#
# Weather Underground has changed in the past the way it provides data and may do so again in the future.
# I have therefore collated the whole 2014 data in the provided 'London_2014.csv' file which can be found in the project folder.
#
# Now load the CSV file into a dataframe making sure that any extra spaces are skipped:
# +
import warnings
warnings.simplefilter('ignore', FutureWarning)
import pandas as pd
moscow = pd.read_csv('Moscow_SVO_2014.csv', skipinitialspace=True)
# -
# ## Cleaning the data
# First we need to clean up the data. I'm not going to make use of `'WindDirDegrees'` in my analysis, but you might in yours so we'll rename `'WindDirDegrees< br />'` to `'WindDirDegrees'`.
moscow.head()
moscow = moscow.rename(columns={'WindDirDegrees<br />' : 'WindDirDegrees'})
# remove the `< br />` html line breaks from the values in the `'WindDirDegrees'` column.
moscow['WindDirDegrees'] = moscow['WindDirDegrees'].str.rstrip('<br />')
# and change the values in the `'WindDirDegrees'` column to `float64`:
moscow['WindDirDegrees'] = moscow['WindDirDegrees'].astype('float64')
# We definitely need to change the values in the `'Date'` column into values of the `datetime64` date type.
moscow['Date'] = pd.to_datetime(moscow['Date'])
# We also need to change the index from the default to the `datetime64` values in the `'Date'` column so that it is easier to pull out rows between particular dates and display more meaningful graphs:
moscow.index = moscow['Date']
# ## Finding a summer break
#
# According to meteorologists, summer extends for the whole months of June, July, and August in the northern hemisphere and the whole months of December, January, and February in the southern hemisphere. So as I'm in the northern hemisphere I'm going to create a dataframe that holds just those months using the `datetime` index, like this:
from datetime import datetime
summer = moscow.loc[datetime(2014,6,1) : datetime(2014,8,31)]
# I now look for the days with warm temperatures.
summer[summer['Mean TemperatureC'] >= 23]
# Summer 2014 in Moscow: there are about 13 days with temperatures of 23 Celsius or higher. Best to see a graph of the temperature and look for the warmest period.
#
# So next we tell Jupyter to display any graph created inside this notebook:
# %matplotlib inline
# Now let's plot the `'Mean TemperatureC'` for the summer:
summer['Mean TemperatureC'].plot(grid=True, figsize=(10,5))
# Well looking at the graph the second half of July into 1st half of July looks good for mean temperatures over 22.5 degrees C so let's also put precipitation on the graph too:
summer[['Mean TemperatureC', 'Precipitationmm']].plot(grid=True, figsize=(10,5))
# The second half of July into 1st half of August is still looking good, with no heavy rain. Let's have a closer look by just plotting mean temperature and precipitation for July and August
july_aug = summer.loc[datetime(2014,7,21) : datetime(2014,8,14)]
july_aug[['Mean TemperatureC', 'Precipitationmm']].plot(grid=True, figsize=(10,5))
# July ending(from 27th) into 1st half of August looks pretty good, the 7th to 8th and 13th to 14th are days when the mean temperature drops below 20 degrees.
# ## Conclusions
#
# The graphs have shown the volatility of a British summer, but a couple of weeks were found when the weather wasn't too bad in 2014. Of course this is no guarantee that the weather pattern will repeat itself in future years. To make a sensible prediction we would need to analyse the summers for many more years. By the time you have finished this course you should be able to do that.
| Odomero_WT_21_174/Data Analysis-Pandas-2/Project_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: RPyc-Python
# language: Python
# name: rpyc
# ---
# ---
#
# title: Linux UART 的使用
#
# keywords: maixpy3, UART, Linux
#
# desc: maixpy3 doc: UART 的使用
#
# ---
#
# ## UART 使用教程
#
# 在 Linux 系统中,串口是以设备的形式存在的,所使用的方式和原来的单片机方式有所不同。这是系统标准的 UART 通讯,和 Linux 系统中的串口操作相似。下面以 MaixII-Dock 为例子,来简单的简述一下如何使用 UART。
#
# ### 准备
# 通过查看开发板的管脚定义图,确定需要使用的 UART 通道。下面的代码是使用 MaixII-Dock 的 UART-1 通道
#
# 
#
# ### 开始
import serial
ser = serial.Serial("/dev/ttyS1",115200) # 连接串口
print('serial test start ...')
ser.write(b"Hello Wrold !!!\n") # 输入需要通讯的内容
while True:
ser.setDTR(True)
ser.setRTS(True)
tmp = ser.readline()
print(tmp)
ser.write(tmp)
ser.setDTR(False)
ser.setRTS(False)
# 代码中的 `/dev/ttyS1` 是指串口通道1,不同的开发板,串口的表示方式不一样,请自行查看对应开发板的串口表达方式
#
# 这是标准 Python 的串口库,更多的使用查看 [Python serial](https://pypi.org/project/pyserial/)
#
# ## UART 用途
#
# 这是操作系统的标准 URAT,可以和单片机进行串口通讯,也可以对带有串口协议的设备、外设通讯。
| docs/soft/maixpy3/zh/usage/hardware/UART.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.0 64-bit (''anaconda3'': virtualenv)'
# language: python
# name: python37064bitanaconda3virtualenvdc59f3b7c1d64353bf7dcc6d7e32f36c
# ---
# +
# Import Modules
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#modeling tools
import statsmodels.api as sm
import lightgbm as lgb
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import mean_squared_error, confusion_matrix, auc, roc_auc_score, roc_curve, log_loss, make_scorer
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
# %matplotlib inline
plt.style.use('dark_background')
# %load_ext autoreload
# %autoreload 2
pd.set_option('display.min_row', 15)
pd.set_option('display.max_column', 300)
pd.set_option('display.max_colwidth', 300)
# +
original = pd.read_csv('../data/Collisions.csv')
clean_data = pd.read_pickle('../data/processed/cleaned_data.pkl')
circles = pd.read_csv('../data/Traffic_Circles.csv')
all_sdot = pd.read_csv('../data/processed/all_sdot_merged_with_wsdot_080820.csv', low_memory=False)
# -
all_sdot.head()
# ## Traffic Circles
# no null values
circles.info()
circles.head(2)
circles.columns = circles.columns.map(str.lower)
# +
# collisions that happened at a traffic circle: 1449
# some traffic circles had more, so about 593 traffic circles make up the 1613 accidents
circle_accidents = clean_data[clean_data['INTKEY'].isin(circles['intkey'])].copy()
# -
# ## Choose columns to merge
circle_accidents.columns = circle_accidents.columns.map(str.lower)
# reset index to extract year
circle_accidents.reset_index(inplace=True)
circle_accidents['year'] = circle_accidents['Datetime'].apply(lambda x: x.year)
circle_accidents['month'] = circle_accidents['Datetime'].apply(lambda x: x.month)
circle_accidents['hour'] = circle_accidents['Datetime'].apply(lambda x: x.hour)
circle_accidents.head(2)
circle_accidents['crosswalkkey'].value_counts()
circle_accidents.columns
# +
'''
KEEP COLUMNS: [['intkey', 'location', 'year', 'month', 'hour', 'pedcount', 'pedcylcount',
'vehcount', 'injuries', 'seriousinjuries', 'fatalities', 'inattentionind_y']]
'''
temp = circle_accidents[['intkey', 'location', 'year', 'month', 'hour', 'pedcount', 'pedcylcount', 'vehcount',
'injuries', 'seriousinjuries', 'fatalities', 'inattentionind_y']].copy()
# +
# group by intkey and year to keep accidents that happen at intersections each year
# index is reset to be about to use intkey to merge
circle_acc = temp.groupby(['intkey', 'year', 'month', 'hour']).sum().reset_index()
circle_acc.head()
# -
circle_acc.shape
# +
# merge collisions data with circles_acc to obtain dataset of all circles with labels of collisions or none
circles_collisions = circles.merge(circle_acc, how='left', on='intkey')
# +
# intkey has duplicates for the different years
circles_collisions[circles_collisions['intkey'] == 36723]
# +
# about 76.06% of traffic circles that had collisions resulted in an injury
print('Percent of traffic circles with incidents: ',
round((~circles_collisions['year'].isnull()).sum() / (len(circles_collisions['year']))*100 , 2),'%')
# -
circles_collisions.isnull().sum()
# +
# Drop the columns with all missing values and fill collisions information with 0 for not having collisions
# decide what to do with year
# fill_cols = [['pedcount', 'pedcylcount', 'vehcount', 'injuries',
# 'seriousinjuries', 'fatalities', 'inattentionind_y']]
circles_collisions.fillna('0', inplace=True, axis=1)
circles_collisions.drop(columns=['maintenance_agreement', 'overridecomment'], inplace=True)
# -
circles_collisions.isnull().sum()
# ## EDA and OHE
circles_collisions.head()
# - X and Y is not the actual coordinates. *drop*
# - OBJECTID not needed, there are 2 other unique identifiers. *drop*
# - COMPKEY is all unique. could be different matches than INTKEY
# - UNITID is simply another identifier. *drop*
# - CONDITION has 4 values, one is nothing, check those 2 row out.
# - CONDITION_ASSESSMENT_DATE can be dropped since there is another. *drop*
# - CURRENT_STATUS and OWNERSHIP can be dropped, only one value. *drop*
# - PRIMARYDISTRICTCD can be OHE, could be useful for separating by area
# - SECONDARYDISTRICTCD can be dropped, basically all missing. *drop*
# - COMPTYPE can be dropped, all the same. intersections have different types? circles is 16? *drop*
# - CURRENT_STATUS_DATE mainly same year, not useful. drop
# - INSTALL_DATE can be taken for its year just to see how many are built and when
# - INSTALL_PURPOSE has way too much missing. however, check out rows with 'SAFETY'
# - all with 'SAFETY' and 'CALMING' has basically all had accidents. But then again, most traffic circles do (about 56%)
# - INTKEY needs for merging with intersections
# - LANDSCAPING, SDOT_INITIATED, SEWER_ACCESS_COVER, TRCSIZE, SURVEY_MONUMENT is very imbalanced but has only 2 values
# - SDOT_INITIATED and SEWER_ACCESS_COVER can be dropped
# - deeper look into LANDSCAPING, TRCSIZE and SURVEY_MONUMENT before dropping
# - MAINTAINED_BY, NO_LAND_REASON, OVERRIDEYN, OWNERSHIP_DATE *drop*
# - TRCSHAPE has 4 values, 1 empty. can make into circle vs non-circle
# - UNITTYPE only 1 value. *drop*
# - SHAPE_LNG and SHAPE_LAT can be used for very precise merging
to_drop = ['x', 'y', 'objectid', 'unitid', 'current_status_date', 'condition_assessment_date', 'current_status', 'ownership',
'secondarydistrictcd', 'comptype', 'install_purpose', 'sdot_initiated', 'sewer_access_cover',
'maintained_by', 'no_land_reason', 'overrideyn', 'ownership_date', 'unittype']
circles_collisions.drop(columns=to_drop, inplace=True)
circles_collisions.head(2)
# ### Extract dates
# change to datetime first for easy extraction
circles_collisions['install_date'] = circles_collisions['install_date'].apply(pd.to_datetime)
# +
# a lot of them installed in different years. could be useful. keep.
circles_collisions['install_date'].apply(lambda x: x.year).value_counts()
# -
circles_collisions['installed'] = circles_collisions['install_date'].apply(lambda x: x.year)
# +
# drop the install_date and current_status_date column
circles_collisions.drop(columns=['install_date'], inplace=True)
# -
circles_collisions.head(2)
# ### Encode binary columns
circles_collisions[['landscaping', 'survey_monument']] = (circles_collisions[['landscaping', 'survey_monument']] == 'Y').astype(int)
# ### Check column with empty value (but not NaN)
circles_collisions[circles_collisions['trcshape'] == ' ']
# +
# can fill trcshape based on majority shape built in same year
# in 1970, most are circles
# in 2009, most are circles but can check by district. most are circles in 2009, district 5
circles_collisions['trcshape'] = circles_collisions['trcshape'].apply(lambda x: 'CRC' if x == ' ' else x)
# -
circles_collisions['trcshape'].value_counts()
circles_collisions[circles_collisions['condition'] == ' '].head()
# +
# can fill condition based on majority checked in year and installed in same year
# all district installed and current_status_date in 1970 is in good condition
circles_collisions['condition'] = circles_collisions['condition'].apply(lambda x: 'GOOD' if x == ' ' else x)
# -
circles_collisions['condition'].value_counts()
circles_collisions.columns
# +
# reorder columns
# decided to not keep compkey
circles_collisions = circles_collisions[['intkey', 'shape_lat', 'shape_lng', 'unitdesc','primarydistrictcd',
'installed', 'landscaping', 'survey_monument', 'trcsize', 'trcshape',
'condition','year', 'month', 'hour', 'pedcount', 'pedcylcount',
'vehcount', 'injuries','seriousinjuries', 'fatalities',
'inattentionind_y']]
# -
circles_collisions.groupby('intkey').count()
# +
# circles_collisions.to_csv('../data/processed/circles_collisions.csv')
# -
# ## EDA: Useful Columns
# +
# decide which columns aren't useful
circles_collisions.columns
# -
circles_collisions.info()
# Make sure numeric columns are not object type.
# +
numeric_cols = {'year':'int', 'pedcount':'float', 'pedcylcount':'float', 'vehcount':'float',
'injuries':'float','seriousinjuries':'float', 'fatalities':'float', 'inattentionind_y':'float'}
circles_collisions = circles_collisions.astype(numeric_cols)
# -
circles_collisions.head()
circles_collisions.info()
# +
eliminate = ['unitdesc', 'intkey', 'shape_lng', 'shape_lat']
data = circles_collisions.drop(columns=eliminate).copy()
# -
data.head()
sns.countplot(data['injuries'])
data.groupby('primarydistrictcd').sum()
# # Circles Severity Metric
| notebooks/cindy_trafficcrc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as sopt
from pysimu import ode2numba, ssa
from ipywidgets import *
# -
# %matplotlib notebook
# ## System definition
# +
S_base = 100.0e6
U_base = 20e3
Z_base = U_base**2/S_base
r_km = 0.127 # ohm/km
x_km = 0.113 # ohm/km
lenght = 1.0
R = r_km*lenght/Z_base
X = x_km*lenght/Z_base
Z = R +1j*X
Y = 1.0/Z
G_s_inf, B_s_inf = Y.real, Y.imag
sys = { 't_end':20.0,'Dt':0.01,'solver':'forward-euler', 'decimation':10, 'name':'ds_ini',
'models':[{'params':
{'H':6.0,
'X_s': 0.3,
'R_s': 0.1,
'K_p' : 1.0,
'T_pi' : 10.0,
'K_q':1.0,
'T_q':1.0,
'K_d':1.0,
'Omega_b' : 2*np.pi*60,
'B_s0':0.0,
'G_s_inf':G_s_inf,
'theta_inf': 0.0,
'K_a':200.0,
'K_stab':10,
'B_s_inf':B_s_inf,
'G_s0':0.0,
'V_inf':1.0
},
'f':[
'ddelta = Omega_b*(omega - 1)',
'domega = 1.0/(2*H)*(p_m - p_e - K_d*(omega - 1))',
'dxi_q = epsilon_q'
],
'g':['ur@-ur + V_s*cos(theta_s)', # polar to real
'ui@-ui + V_s*sin(theta_s)', # polar to imag
'cosu@-cosu +ur/V_s', # ideal PLL
'sinu@-sinu +ui/V_s', # ideal PLL
'v_s_d@-v_s_d + ur*cosu + ui*sinu', # original park
'v_s_q@-v_s_q - ur*sinu + ui*cosu', # original park
'epsilon_q@-epsilon_q + Q_s_ref - Q_s',
'p_m@p_m - p_m_0',
'e @ -e + K_q*(epsilon_q + xi_q/T_q) ', #
'e_d@ e_d - e*cos(delta) ', # V
'e_q@ e_q - e*sin(delta) ', # V
'i_s_d@ -e_d + R_s*i_s_d - X_s*i_s_q + v_s_d', # VSC or SYM equation
'i_s_q@ -e_q + R_s*i_s_q + X_s*i_s_d + v_s_q', # VSC or SYM equation
'p_e@-p_e+ i_s_d*e_d + i_s_q*e_q', # active power equation
'P_s@-P_s+ i_s_d*v_s_d + i_s_q*v_s_q', # active power equation
'Q_s@-Q_s+ i_s_d*v_s_q - i_s_q*v_s_d', # reactive power equation
'V_s@(G_s0 + G_s_inf)*V_s**2 - V_inf*(G_s_inf*cos(theta_s - theta_inf) + B_s_inf*sin(theta_s - theta_inf))*V_s - P_s',
'theta_s@(-B_s0 - B_s_inf)*V_s**2 + V_inf*(B_s_inf*cos(theta_s - theta_inf) - G_s_inf*sin(theta_s - theta_inf))*V_s - Q_s',
],
'u':{'p_m_0':0.8,'Q_s_ref':0.1},
'y':['ur','ui','cosu','sinu','v_s_d','v_s_q','epsilon_q','p_m','e','e_d','e_q','i_s_d','i_s_q','p_e','P_s','Q_s','V_s','theta_s'],
'y_ini':['ur','ui','cosu','sinu','v_s_d','v_s_q','epsilon_q','p_m','e','e_d','e_q','i_s_d','i_s_q','p_e','P_s','Q_s','V_s','theta_s'],
'h':[
'omega'
]}
],
'perturbations':[{'type':'step','time':1.0,'var':'V_ref','final':1.01} ]
}
x,f = ode2numba.system(sys) ;
# -
import ds_ini
syst = ds_ini.ds_ini_class()
x0 = np.ones(syst.N_x+syst.N_y)
x0[0] = 0
s = sopt.fsolve(syst.run_problem,x0 )
print(s)
s[7]
# +
fig,axes = plt.subplots(nrows=1)
points = axes.plot([],[],'o')
axes.set_xlim(-5,1)
axes.set_ylim(-5,5)
axes.grid(True)
def Jac(x):
J=np.vstack((np.hstack((syst.struct[0].Fx,syst.struct[0].Fy)),np.hstack((syst.struct[0].Gx,syst.struct[0].Gy))))
return J
def update(p_m_0 = 0.9, Q_s_ref=0.0, K_d=0.0):
syst.struct[0].p_m_0 = p_m_0
syst.struct[0].Q_s_ref = Q_s_ref
syst.struct[0].K_d = K_d
# if T_pi <0.001: T_pi = 0.001
# syst.struct[0].T_pi = T_pi
x0 = np.vstack([syst.struct[0].x, syst.struct[0].y])
x0 = np.ones(syst.N_x+syst.N_y)
#x0[0,0] = 0.0
frime = np.vstack((syst.struct[0].f,syst.struct[0].g))
s = sopt.fsolve(syst.run_problem,x0 )
syst.struct[0].x[:,0] = s[0:syst.N_x]
syst.struct[0].y[:,0] = s[syst.N_x:(syst.N_x+syst.N_y)]
#print(np.linalg.det(syst.struct[0].Gy))
e,v = np.linalg.eig(ssa.eval_A(syst))
points[0].set_xdata(e.real)
points[0].set_ydata(e.imag/np.pi/2)
delta = np.rad2deg(syst.struct[0].x[0,0])
V_s = syst.struct[0].y[-2,0]
print(f'delta = {delta:.2f}, V_s = {V_s:.2f}, zeta = {-100*e[0].real/abs(e[0]):.2f} %')
fig.canvas.draw()
update()
# -
interact(update, p_m_0 =widgets.FloatSlider(min=0.0,max=1.2,step=0.1,value=0.8, continuous_update=True),
Q_s_ref =widgets.FloatSlider(min=-0.5,max=0.5,step=0.1,value=0.8, continuous_update=False),
K_d =widgets.FloatSlider(min=0.0,max=50.0,step=0.1,value=0.8, continuous_update=False),
);
| examples/notebooks/ds_template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# # Variability in the Arm Endpoint Stiffness
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# In this notebook, we will calculate the feasible endpoint stiffness of a
# simplified arm model for an arbitrary movement. The calculation of the feasible
# muscle forces and the generation of the movement is presented in
# feasible_muscle_forces.ipynb. The steps are as follows:
#
# 1. Generate a movement using task space projection
# 2. Calculate the feasible muscle forces that satisfy the movement
# 3. Calculate the feasible endpoint stiffness
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# notebook general configuration
# %load_ext autoreload
# %autoreload 2
# imports and utilities
import numpy as np
import sympy as sp
from IPython.display import display, Image
sp.interactive.printing.init_printing()
import logging
logging.basicConfig(level=logging.INFO)
# plot
# %matplotlib inline
from matplotlib.pyplot import *
rcParams['figure.figsize'] = (10.0, 6.0)
# utility for displaying intermediate results
enable_display = True
def disp(*statement):
if (enable_display):
display(*statement)
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Step 1: Task Space Inverse Dynamics Controller
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# The task space position ($x_t$) is given as a function of the generalized
# coordinates ($q$)
#
#
# \begin{equation}\label{equ:task-position}
# x_t = g(q), x_t \in \Re^{d}, q \in \Re^{n}, d \leq n
# \end{equation}
#
#
# The first and second derivatives with respect to time (the dot notation depicts
# a derivative with respect to time) are given by
#
#
# \begin{equation}\label{equ:task-joint-vel}
# \dot{x}_t = J_t(q) \dot{q}, \; J_t(q) =
# \begin{bmatrix}
# \frac{\partial g_1}{\partial q_1} & \cdots & \frac{\partial g_1}{\partial q_n} \\
# \vdots & \ddots & \vdots \\
# \frac{\partial g_d}{\partial q_1} & \cdots & \frac{\partial g_d}{\partial q_n}
# \end{bmatrix}
# \in \Re^{d\times n}
# \end{equation}
#
#
# \begin{equation}\label{equ:task-joint-acc}
# \ddot{x}_t = \dot{J}_t\dot{q} + J_t\ddot{q}
# \end{equation}
#
#
# The task Jacobian defines a dual relation between motion and force
# quantities. The virtual work principle can be used to establish the link between
# task and join space forces (augmented by the null space)
#
#
# \begin{equation}\label{equ:joint-task-forces-vw}
# \begin{aligned}
# \tau^T \delta q &= f_t^T \delta x_t \\
# \tau^T \delta q &= f_t^T J_t \delta q \\
# \tau &= J_t^T f_t + N_{J_t} \tau_0, \; N_{J_t} = (I - J_t^T \bar{J}_t^T)
# \end{aligned}
# \end{equation}
#
#
# where $N_{J_t} \in \Re^{n \times n}$ represents the right null space of $J_t$
# and $\bar{J}_t$ the generalized inverse. Let the joint space equations of motion
# (EoMs) have the following form
#
#
# \begin{equation}\label{equ:eom-joint-space}
# \begin{gathered}
# M(q) \ddot{q} + f(q, \dot{q}) = \tau \\
# f(q, \dot{q}) = \tau_g(q) + \tau_c(q, \dot{q}) + \tau_{o}(q, \dot{q})
# \end{gathered}
# \end{equation}
#
#
# where $M \in \Re^{n \times n}$ denotes the symmetric, positive definite joint
# space inertia mass matrix, $n$ the number of DoFs of the model and ${q, \dot{q},
# \ddot{q}} \in \Re^{n}$ the joint space generalized coordinates and their
# derivatives with respect to time. The term $f \in \Re^{n}$ is the sum of all
# joint space forces, $\tau_g \in \Re^{n}$ is the gravity, $\tau_c \in \Re^{n}$
# the Coriolis and centrifugal and $\tau_{o} \in \Re^{n}$ other generalized
# forces. Term $\tau \in \Re^{n}$ denotes a vector of applied generalized forces
# that actuate the model.
#
#
# We can project the joint space EoMs in the task space by multiplying both sides
# from the left with $J_t M^{-1}$
#
#
# \begin{equation}\label{equ:eom-task-space}
# \begin{gathered}
# J_t M^{-1}M \ddot{q} + J_t M^{-1}f = J_t M^{-1}\tau \\
# \ddot{x}_t - \dot{J}_t\dot{q} + J_t M^{-1}f = J_t M^{-1} (J^T_t f_t + N_{J_t} \tau_0) \\
# \Lambda_t(\ddot{x}_t + b_t) + \bar{J}_t^T f = f_t
# \end{gathered}
# \end{equation}
#
#
# where $\Lambda_t=(J_tM^{-1}J_t^T)^{-1} \in \Re^{d \times d}$ represents the task
# space inertia mass matrix, $b_t = - \dot{J}_t\dot{q}$ the task bias term and
# $\bar{J}_t^T = \Lambda_m RM^{-1} \in \Re^{d \times n}$ the generalized inverse
# transpose of $J_t$ that is used to project joint space quantities in the task
# space. Note that $\bar{J}_t^T N_{J_t} \tau_0 = 0$.
#
# The planning will be performed in task space in combination with a Proportional
# Derivative (PD) tracking scheme
#
# \begin{equation}\label{equ:pd-controller}
# \ddot{x}_t = \ddot{x}_d + k_p (x_d - x_t) + k_d (\dot{x}_d - x_t)
# \end{equation}
#
# where $x_d, \dot{x}_d, \ddot{x}_d$ are the desired position, velocity and
# acceleration of the task and $k_p = 50, k_d = 5$ the tracking gains.
#
# The desired task goal is derived from a smooth sigmoid function that produces
# bell-shaped velocity profiles in any direction around the initial position of
# the end effector
#
#
# \begin{equation}\label{equ:sigmoid}
# \begin{gathered}
# x_d(t) = [x_{t,0}(0) + a (tanh(b (t - t_0 - 1)) + 1) / 2, x_{t,1}(0)]^T, \;
# \dot{x}_d(t) = \frac{d x_d(t)}{dt}, \; \ddot{x}_d(t) = \frac{d \dot{x}_d(t)}{dt} \\
# x_d^{'} = H_z(\gamma) x_d, \; \dot{x}_d^{'} = H_z(\gamma) \dot{x}_d,
# \; \ddot{x}_d^{'} = H_z(\gamma) \ddot{x}_d
# \end{gathered}
# \end{equation}
#
# where $x_{t, 0}$, $x_{t, 1}$ represent the $2D$ components of $x_t$, $a = 0.3$,
# $b = 4$ and $t_0 = 0$. Different directions of movement are achieved by
# transforming the goals with $H_z(\gamma)$, which defines a rotation around
# the $z$-axis of an angle $\gamma$.
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# import necessary modules
from model import ArmModel
from projection import TaskSpace
from controller import TaskSpaceController
from simulation import Simulation
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# construct model gravity disabled to improve execution time during numerical
# integration note that if enabled, different PD gains are required to track the
# movement accurately
model = ArmModel(use_gravity=0, use_coordinate_limits=1, use_viscosity=1)
model.pre_substitute_parameters()
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# simulation parameters
t_end = 2.0
angle = np.pi # direction of movement
fig_name = 'results/feasible_stiffness/feasible_forces_ts180'
# define the end effector position in terms of q's
end_effector = sp.Matrix(model.ee)
disp('x_t = ', end_effector)
# task space controller
task = TaskSpace(model, end_effector)
controller = TaskSpaceController(model, task, angle=angle)
# numerical integration
simulation = Simulation(model, controller)
simulation.integrate(t_end)
# plot simulation results
fig, ax = subplots(2, 3, figsize=(15, 10))
simulation.plot_simulation(ax[0])
controller.reporter.plot_task_space_data(ax[1])
fig.tight_layout()
fig.savefig(fig_name + '.pdf', format='pdf', dpi=300)
fig.savefig(fig_name + '.eps', format='eps', dpi=300)
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Step 2: Calculation of the Feasible Muscle Force Space
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# The feasible muscle forces are calculated below. Initially, the moment arm and
# maximum muscle force quantities are computed for each instance of the
# movement. Then the following inequality is formed assuming a linear muscle model
#
# \begin{equation}\label{equ:linear-muscle-null-space-inequality}
# \begin{gathered}
# f_m = f_{max} \circ a_m = f_m^{\parallel} +
# N_{R} f_{m0},\; 0 \preceq a_m \preceq 1
# \rightarrow \\
# \begin{bmatrix}
# - N_{R} \\
# \hdashline
# N_{R}
# \end{bmatrix}
# f_{m0} \preceq
# \begin{bmatrix}
# f_m^{\parallel} \\
# \hdashline
# f_{max} - f_m^{\parallel}
# \end{bmatrix} \\
# Z f_{m0} \preceq \beta
# \end{gathered}
# \end{equation}
#
# where $a_m \in \Re^{m}$ represents a vector of muscle activations, $f_{max} \in
# \Re^{m}$ a vector specifying the maximum muscle forces, $\circ$ the Hadamard
# (elementwise) product, $f_m^{\parallel}$ the particular muscle force solution
# that satisfies the action, $N_{R}$ the moment arm null space and $f_{m0}$ the
# null space forces.
#
# The next step is to sample the inequality $Z f_{m0} \leq \beta$. This is the
# bottleneck of the analysis. The *convex_bounded_vertex_enumeration* uses the
# lsr method, which is a vertex enumeration algorithm for finding the vertices
# of a polytope in $O(v m^3)$.
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# import necessary modules
from analysis import FeasibleMuscleSetAnalysis
# initialize feasible muscle force analysis
feasible_muscle_set = FeasibleMuscleSetAnalysis(model, controller.reporter)
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Step 3: Calculate the Feasible Task Stiffness
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# In the following section, we will introduce a method for calculating the
# feasible muscle forces that satisfy the motion and the physiological muscle
# constraints. As the muscles are the main actors of the system, it is important
# to examine the effect of muscle redundancy on the calculation of limbs'
# stiffness.
#
# The muscle stiffness is defined as
#
# \begin{equation}\label{equ:muscle-stiffness}
# K_m = \frac{\partial f_m}{\partial l_{m}},\; K_m \in \Re^{m \times m}
# \end{equation}
#
# where $f_m \in \Re^{m}$ represents the muscle forces, $l_{m} \in \Re^{m}$ the
# musculotendon lengths and $m$ the number of muscles. The joint stiffness is
# defined as
#
# \begin{equation}\label{equ:joint-stiffness}
# K_j = \frac{\partial \tau}{\partial q},\; K_j \in \Re^{n \times n}
# \end{equation}
#
# where $\tau \in \Re^{n}$, $q \in \Re^{n}$ are the generalized forces and
# coordinates, respectively and $n$ the DoFs of the system. Finally, the task
# stiffness is defined as
#
# \begin{equation}\label{equ:task-stiffness}
# K_t = \frac{\partial f_t}{\partial x_t},\; K_t \in \Re^{d \times d}
# \end{equation}
#
# where $f_t \in \Re^{d}$ denotes the forces, $x_t \in \Re^{d}$ the positions and
# $d$ the DoFs of the task.
#
# The derivation starts with a model for computing the muscle stiffness matrix
# $K_m$. The two most adopted approaches are to either use the force-length
# characteristics of the muscle model or to approximate it using the definition of
# the short range stiffness, where the latter is shown to explain most of the
# variance in the experimental measurements. The short range stiffness is
# proportional to the force developed by the muscle ($f_m$)
#
# \begin{equation}\label{equ:short-range-stiffness}
# k_{s} = \gamma \frac{f_m}{l_m^o}
# \end{equation}
#
# where $\gamma = 23.4$ is an experimentally determined constant and $l_m^o$ the
# optimal muscle length. This definition will be used to populate the diagonal
# elements of the muscle stiffness matrix, whereas inter-muscle coupling
# (non-diagonal elements) will be assumed zero since it is difficult to measure
# and model in practice.
#
# The joint stiffness is related to the muscle stiffness through the following
# relationship
#
# \begin{equation}\label{equ:joint-muscle-stiffness}
# K_j = -\frac{\partial R^T}{\partial q} \bullet_2 f_m - R^T K_m R
# \end{equation}
#
# where the first term captures the varying effect of the muscle moment arm ($R
# \in \Re^{m \times n}$), while the second term maps the muscle space stiffness to
# joint space. The notation $\bullet_2$ denotes a product of a rank-3 tensor
# ($\frac{\partial R^T}{\partial q} \in \Re^{n \times m \times n}$, a 3D matrix)
# and a rank-1 tensor ($f_m \in \Re^{m}$, a vector), where the index $2$ specifies
# that the tensor dimensional reduction (by summation) is performed across the
# second dimension, resulting in a reduced rank-2 tensor of dimensions $n \times
# n$.
#
# In a similar manner, the task stiffness is related to the muscle stiffness
# through the following relationship
#
# \begin{equation}\label{equ:task-muscle-stiffness}
# K_t = -J_t^{+T} \left(\frac{\partial J_t^T}{\partial q} \bullet_2
# f_t + \frac{\partial R^T}{\partial q} \bullet_2 f_m + R^T
# K_m R\right) J_t^{+}
# \end{equation}
#
# where the task Jacobian matrix ($J_t \in \Re^{d \times n}$) describes the
# mapping from joint to task space ($\Re^{n} \rightarrow \Re^{d}$), $+$ stands for
# the Moore-Penrose pseudoinverse and $+T$ the transposed pseudoinverse operator.
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# Algorithm for calculating the feasible joint stiffness:
#
# **Step 1:** Calculate the feasible muscle forces $f_m^{\oplus}$ that satisfy the
# task and the physiological muscle constraints
#
#
# **Step 2:** Calculate the muscle stiffness matrix $K_m$ using the short range
# stiffness model
#
#
# \begin{equation*}\label{equ:short-range-stiffness-2}
# k_s = \gamma \frac{f_m}{l_m^o},\; \gamma = 23.4
# \end{equation*}
#
#
# **Step 3:** Calculate the task $K_t$ and joint $K_j$ stiffness
#
# \begin{equation*}
# \begin{gathered}
# K_j = -\frac{\partial R^T}{\partial q} \bullet_2 f_m - R^T K_m R \\
# K_t = -J_t^{+T} \left(\frac{\partial J_t^T}{\partial q} \bullet_2
# f_t + \frac{\partial R^T}{\partial q} \bullet_2 f_m + R^T
# K_m R\right) J_t^{+}
# \end{gathered}
# \end{equation*}
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# import necessary modules
from analysis import StiffnessAnalysis
from util import calculate_stiffness_properties
base_name = 'results/feasible_stiffness/feasible_stiffness_ts180_'
# initialize stiffness analysis
stiffness_analysis = StiffnessAnalysis(model, task, controller.reporter,
feasible_muscle_set)
# calculate feasible stiffness
calculate_stiffness_properties(stiffness_analysis, base_name, 0, t_end, 0.2, 500)
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
Image(url=base_name + 'anim.gif')
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# The left diagram shows the feasible major and minor axes of the endpoint
# stiffness using scaled ($\text{scaling} = 0.0006$) ellipses (ellipses are
# omitted for visibility reasons). The ellipse is a common way to visualize the
# task stiffness, where the major axis (red) of the ellipse is oriented along the
# maximum stiffness and the area is proportional to the determinant of $K_t$,
# conveying the stiffness amplitude. The stiffness capacity (area) is increased in
# the last pose, since the arm has already reached its final position and muscle
# forces are not needed for it to execute any further motion. The second diagram
# (middle) depicts the distribution of ellipse parameters (area and orientation
# $\phi$). Finally, the rightmost box plot shows the feasible joint stiffness
# distribution at three distinct time instants. Experimental measurements have
# showed that the orientation of stiffness ellipses varies in a range of about
# $30^{\circ}$. While our simulation results confirm this, they also reveal a
# tendency of fixation towards specific directions for higher stiffness
# amplitudes. The large variation of feasible stiffness verifies that this type of
# analysis conveys important findings that complement experimental observations.
| arm_model/feasible_task_stiffness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import datetime
import time
year, month, day = input("Enter the date = ").split("/") #splitting input values by'/'
hour, minutes, second = input("Enter the Time = ").split(":") #splitting input values by':'
shedule_date = datetime.date(int(day), int(month), int(year)) #converting input values into interger
n = 1
while n > 0:
if time.localtime().tm_hour == int(hour) and time.localtime().tm_min == int(minutes) and time.localtime().tm_sec == int(second) and datetime.date.today() == shedule_date:
os.startfile("C:\\Users\\Sagar\\Music\\Chogada.mp3") #Enter the path of a song or ringtone where it is placed in your system
break
else:
n +=1
# -
| Alarm Clock/alarmclock.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37_pytorch
# language: python
# name: conda-env-py37_pytorch-py
# ---
# %matplotlib inline
# # Build a Neural Network
#
# Neural networks comprise of layers/modules that perform operations on data.
# The [torch.nn](https://pytorch.org/docs/stable/nn.html) namespace provides all the building blocks you need to
# build your own neural network. Every module in PyTorch subclasses the [nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.html).
# A neural network is a module itself that consists of other modules (layers). This nested structure allows for
# building and managing complex architectures easily.
#
# In the following sections, we'll build a neural network to classify images in the FashionMNIST dataset.
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# ## Get Device for Training
#
# We want to be able to train our model on a hardware accelerator like the GPU,
# if it is available. Let's check to see if
# [torch.cuda](https://pytorch.org/docs/stable/notes/cuda.html) is available, else we
# continue to use the CPU.
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
# ## Define the Class
#
# We define our neural network by subclassing `nn.Module`, and
# initialize the neural network layers in `__init__`. Every `nn.Module` subclass implements
# the operations on input data in the `forward` method.
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
# We create an instance of `NeuralNetwork`, and move it to the `device`, and print
# it's structure.
#
#
model = NeuralNetwork().to(device)
print(model)
# To use the model, we pass it the input data. This executes the model's `forward`,
# along with some [background operations](https://github.com/pytorch/pytorch/blob/270111b7b611d174967ed204776985cefca9c144/torch/nn/modules/module.py#L866).
# Do not call `model.forward()` directly!
#
# Calling the model on the input returns a 10-dimensional tensor with raw predicted values for each class.
# We get the prediction densities by passing it through an instance of the `nn.Softmax` module.
X = torch.rand(1, 28, 28, device=device)
logits = model(X)
pred_probab = nn.Softmax(dim=1)(logits)
y_pred = pred_probab.argmax(1)
print(f"Predicted class: {y_pred}")
# ## Model Layers
#
# Lets break down the layers in the FashionMNIST model. To illustrate it, we
# will take a sample minibatch of 3 images of size 28x28 and see what happens to it as
# we pass it through the network.
#
#
input_image = torch.rand(3,28,28)
print(input_image.size())
# ### nn.Flatten
#
# We initialize the [nn.Flatten](https://pytorch.org/docs/stable/generated/torch.nn.Flatten.html)
# layer to convert each 2D 28x28 image into a contiguous array of 784 pixel values (the minibatch dimension (at dim=0) is maintained).
#
#
flatten = nn.Flatten()
flat_image = flatten(input_image)
print(flat_image.size())
# ### nn.Linear
#
# The [linear layer](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html)
# is a module that applies a linear transformation on the input using it's stored weights and biases.
layer1 = nn.Linear(in_features=28*28, out_features=20)
hidden1 = layer1(flat_image)
print(hidden1.size())
# ### nn.ReLU
#
# Non-linear activations are what create the complex mappings between the model's inputs and outputs.
# They are applied after linear transformations to introduce *nonlinearity*, helping neural networks
# learn a wide variety of phenomena.
#
# In this model, we use [nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) between our
# linear layers, but there's other activations to introduce non-linearity in your model.
print(f"Before ReLU: {hidden1}\n\n")
hidden1 = nn.ReLU()(hidden1)
print(f"After ReLU: {hidden1}")
# ### nn.Sequential
#
# [nn.Sequential](https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html) is an ordered
# container of modules. The data is passed through all the modules in the same order as defined. You can use
# sequential containers to put together a quick network like `seq_modules`.
#
#
seq_modules = nn.Sequential(
flatten,
layer1,
nn.ReLU(),
nn.Linear(20, 10)
)
input_image = torch.rand(3,28,28)
logits = seq_modules(input_image)
# ### nn.Softmax
#
# The last linear layer of the neural network returns `logits` - raw values in \[`-infty`, `infty`] - which are passed to the
# [nn.Softmax](https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html) module. The logits are scaled to values \[0, 1\] representing the model's predicted densities for each class. `dim` parameter indicates the dimension along which the values must sum to 1.
softmax = nn.Softmax(dim=1)
pred_probab = softmax(logits)
# ## Model Parameters
#
# Many layers inside a neural network are *parameterized*, i.e. have associated weights
# and biases that are optimized during training. Subclassing `nn.Module` automatically
# tracks all fields defined inside your model object, and makes all parameters
# accessible using your model's `parameters()` or `named_parameters()` methods.
#
# In this example, we iterate over each parameter, and print its size and a preview of its values.
#
# +
print("Model structure: ", model, "\n\n")
for name, param in model.named_parameters():
print(f"Layer: {name} | Size: {param.size()} | Values : {param[:2]} \n")
# -
# --------------
#
#
#
| intro-to-pytorch/5-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Birdsong REST API
#
# The Birdsong database is accessible via a REST API. Accessing it with any language should be fairly simple; for these examples, we'll use Python.
#
# All we need to get started is to import the _requests_ package and set up our base URL.
import requests
URL = "http://127.0.0.1:5000/"
# The simplest requests just view the rows in the database tables. Let's look at _bird_ - the datbase's driving table:
# +
# Find all birds
req = requests.get(URL + "view/bird")
# Results are returned as JSON. There are two blocks - "data" contains the returned data
req.json()["data"]
# -
# "rest" contains statistics on the API call itself
req.json()["rest"]
# We can also filter query results to look at specific birds:
# Find all birds sired by a specific bird
req = requests.get(URL + "view/bird?sire=20171220_purple42white35")
req.json()
# That's a lot of data. What if we just wanted the nest and name for the birds?
# We can specify just the columns we want:
# Find specific columns for all birds sired by a specific bird
req = requests.get(URL + "view/bird?sire=20171220_purple42white35&_columns=nest,name")
req.json()
# If you ever want to know which columns you can use for filtering, use the _/columns/_ endpoint:
# Find columns in the bird_event table
req = requests.get(URL + "columns/bird")
req.json()
Not sure of the tables you can query?
# Find views/tables
req = requests.get(URL + "tables")
req.json()
# Let's try a different table:
# Find all events for a specific bird
req = requests.get(URL + "view/bird_event?name=20220405_purple12white23")
req.json()
# So far, everything has worked. What if there's an error, like a filter that specifies a non-existent bird?
# If there's an error, check rest->error
req = requests.get(URL + "view/bird_event?name=no_such_bird")
req.json()
# What about nests?
# Find nests with a specific bird as a sire
req = requests.get(URL + "view/nest?sire=20171220_purple42white35")
req.json()
# Wildcards are also supported. Let's go back to the bird table:
# Find all birds with a green leg band:
req = requests.get(URL + "view/bird?name=*green*")
req.json()
# "Not equals" (or "not like" in the case of wildscards) is also supported:
# Find the names of all birds without a green leg band:
req = requests.get(URL + "view/bird?name!=*green*&_columns=name")
req.json()
# <= and >= are also supported. Let's find every bird hatched before January 1st of 2020:
# Find the names of all birds without a green leg band hatched before 2020-01-01:
req = requests.get(URL + "view/bird?hatch_late<=2020-01-01&_columns=name,hatch_early,hatch_late")
req.json()
| api/jupyter/birdsong_rest_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pg1992/IA025_2022S1/blob/main/ex02/pedro_moreira/solution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GqNC-aD3imli"
# # Notebook de referência
#
# Nome: <NAME>
# + [markdown] id="FEB34HNpuVtP"
# ## Instruções
# + [markdown] id="aKOp1WXHuVtR"
# Este exercício consiste em escrever um código para treinar um modelo linear usando SGD e vizualizar como a função de perda varia em função das pesos da rede.
#
# A implementação será considerada correta apenas se passar nos 3 asserts ao longo deste notebook.
# + [markdown] id="uVvsCpB_uVtS"
# ## Problema de Regressão Linear
# + [markdown] id="XpSx0ITuuVtU"
# O problema de ajuste de uma reta a um conjunto de pontos para verificar se existe uma previsão linear é um problema muito antigo, muito estudado e muito presente nos dias de hoje.
#
# Quando o ajuste é abordado como um problema de **otimização numérica**, ele é a base de boa parte dos **conceitos sobre redes neurais** e iremos explorá-lo aqui como uma forma de introdução às redes neurais. O modelo de regressão linear que iremos utilizar pode
# ser visto como uma rede neural de apenas uma camada e função de ativação linear.
# + [markdown] id="Vws1xdXyuVtV"
# ## Conjunto de dados: Flores Íris
# + [markdown] id="cG2UO9EwuVtW"
# Iremos utilizar duas propriedades do conjunto de dados das flores Íris [Wikipedia-Iris_flower_data_set](https://en.wikipedia.org/wiki/Iris_flower_data_set):
#
# * o comprimento das sépalas e
# * o comprimento da pétalas.
#
# A ideia será prever o comprimento da pétala, conhecendo-se o comprimento da sépala. Estaremos usando apenas uma propriedade, ou característica ou *feature* do objeto para que seja fácil visualizar o espaço de busca de parâmetros.
#
# Vamos utilizar as 50 amostras da variedade versicolor.
#
# 
# + [markdown] id="PBxvwp4RuVtX"
# ## Dados: leitura e visualização
#
# + id="e8o6DbARuVtY"
# %matplotlib inline
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython import display
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
import time
# + id="J3U7mxxCuVtc" outputId="4f2967ff-706a-4769-cf31-f6d80e6e6ded" colab={"base_uri": "https://localhost:8080/", "height": 206}
iris = load_iris()
data = iris.data[iris.target==1,::2] # comprimento das sépalas e pétalas, indices 0 e 2
x_in = data[:,0:1]
y_in = data[:,1:2]
iris_pd = pd.DataFrame(x_in, columns=['x_in'])
iris_pd['y_in'] = y_in
iris_pd.head()
# + [markdown] id="vbE9iw_nuVth"
# ## Visualização dos dados `x_in` e `y_in` e normalizados
# + id="Czv-YcXvuVti" outputId="7ab50e72-0c59-4905-a753-d2288c58738d" colab={"base_uri": "https://localhost:8080/", "height": 334}
x = x_in - x_in.min()
x /= x.max() # normalização
y = y_in - y_in.min()
y /= y.max()
fig = plt.figure(figsize=(16,5))
ax_in = fig.add_subplot(1,2,1)
ax_in.scatter(x_in, y_in)
ax_in.set_xlabel('Comprimento sepalas')
ax_in.set_ylabel('Comprimento petalas')
ax_n = fig.add_subplot(1,2,2)
ax_n.scatter(x, y)
ax_n.set_xlabel('Comprimento normalizado sepalas')
ax_n.set_ylabel('Comprimento normalizado petalas');
# + [markdown] id="h7IFlfHjuVtl"
# ## Reta de ajuste
# + [markdown] id="vJQZQKmhuVtm"
# A equação da reta no plano necessita de dois parâmetros, aqui denominados $w_0$ (*bias*) e inclinação $w_1$. Veja figura:
#
#
# <img src="https://raw.githubusercontent.com/robertoalotufo/files/master/figures/linhareta.png" width="300pt">
#
# A reta de ajuste será dada por:
#
# $$ \hat{y} = w_0 + w_1 x $$
#
# onde
# * $w_1$ é o coeficiente angular da reta e
# * $w_0$ é a interseção do eixo vertical quando x é igual a zero, também denominado de *bias*.
# * $x$ é a variável de entrada (comprimento das sépalas) e
# * $\hat{y}$ é a predição (comprimento estimado das pétalas).
# + [markdown] id="8Ajs4XteuVtn"
# ## Representação gráfica da equação linear via neurônio
# + [markdown] id="tGLD4vWkuVto"
# $ \hat{y} = 1 w_0 + x_0 w_1 $
#
# Temos:
# - 1 atributo de entrada: $x_0$
# - 2 parâmetros para serem ajustados (treinados) $w_0$ e $w_1$
# - 1 classe de saída $\hat{y}$
#
# <img src="https://raw.githubusercontent.com/robertoalotufo/files/master/figures/RegressaoLinearNeuronio.png" width="300pt">
# $$ \hat{y} = w_0 + w_1 x $$
# $$ \mathbf{\hat{y}} = \mathbf{w} \mathbf{x} $$
# + [markdown] id="8LQIj6bAuVtp"
# ### Função Custo ou de Perda (MSE - Mean Square Error)
# + [markdown] id="SjDhur3YuVtq"
# <img src="https://raw.githubusercontent.com/robertoalotufo/files/master//figures/Loss_MSE.png" width = "600pt">
#
# + [markdown] id="vSHV19BZuVtr"
# A função de custo depende do conjunto de treinamento ($y_i$) e dos valores de predição ($\hat{y_i}$):
#
# $$ J(\hat{y_i},y_i) = \frac{1}{M} \sum_{i=0}^{M-1} (\hat{y_i} - y_i)^2 $$ .
#
# + [markdown] id="0sCJNYhluVts"
# ## Laço de minimização via gradiente descendente
# + [markdown] id="zZEiH3teuVtt"
# O código da próxima célula é a parte principal deste notebook. É aqui que a minimização é feita. É aqui que dizemos que estamos fazendo o *fit*, ou o treinamento do sistema para encontrar o parâmetro $\mathbf{W}$ que minimiza a função de perda $J$. Acompanhamos a convergência da minimização pelo valor da perda a cada iteração, plotando o vetor `J_history`.
# + [markdown] id="kozomVYuuVtu"
# O esquema da otimização é representado pelo diagrama a seguir:
#
# <img src="https://raw.githubusercontent.com/robertoalotufo/files/master/figures/RegressaoLinear_Otimizacao.png" width = "600pt">
#
# e é implementado pela próxima célula de código:
# + [markdown] id="-OdOS-k7uVtu"
# ## Funções: Custo, Gradiente Descendente
# + id="Yi46ZI1fxPjP"
# É importante fixar as seeds para passar nos asserts abaixo.
import random
import numpy as np
random.seed(123)
np.random.seed(123)
# + id="tp9GnDHxet70"
class Model():
def __init__(self, n_in: int, n_out: int):
# Escreva seu código aqui.
# Não se esqueça de inicializar os pesos da rede com zeros.
self.w = np.zeros((n_out, n_in))
def forward(self, x):
# Escreva seu código aqui.
y_pred = self.w @ x
return y_pred
# + id="1Iwdb4A0lirO"
def loss(model, x, y):
return np.sum((model.forward(x) - y.T)**2) / y.size
# + id="xCaoS8nMuVty"
def train(model, x, y, learning_rate: float, n_epochs: int):
"""Train a linear model with SGD.
Returns:
loss_history: a np.array of shape (n_epochs + 1,)
w_history: a np.array of shape (n_epochs + 1, 2)
"""
# Escreva seu código aqui.
loss_history = np.zeros(n_epochs + 1)
w_history = np.zeros((n_epochs + 1, 2))
n = x.size
x_with_bias = np.vstack((np.ones(n), x.T))
for i in range(n_epochs):
y_pred = model.forward(x_with_bias)
w_history[i] = model.w
loss_history[i] = loss(model, x_with_bias, y)
grad = 2 * x_with_bias * (y_pred - y.T) / n
model.w -= np.sum(learning_rate * grad, axis=1)
w_history[-1] = model.w
loss_history[-1] = loss(model, x_with_bias, y)
return loss_history, w_history
# + [markdown] id="6C-L2NTBvTGh"
# ### Testando as funções
# + id="E7TbIlWuvSLo"
model = Model(2, 1) # duas entradas (1 + x0) e uma saída y_pred
loss_history, w_history = train(model=model, x=x, y=y, learning_rate=0.5, n_epochs=20)
# + id="WhZj9Qcjz3h3"
# Assert do histórico de losses
target_loss_history = np.array(
[0.40907029, 0.0559969 , 0.03208511, 0.02972902, 0.02885257,
0.02813922, 0.02749694, 0.02691416, 0.02638508, 0.02590473,
0.02546862, 0.02507267, 0.02471319, 0.02438681, 0.0240905 ,
0.02382147, 0.02357722, 0.02335547, 0.02315414, 0.02297135,
0.0228054])
assert np.allclose(loss_history, target_loss_history, atol=1e-6)
# + id="_BrLghe-zzow"
# Assert de histórico de pesos da rede
target_w_history = np.array(
[[0., 0. ],
[0.6, 0.336644 ],
[0.4339223, 0.27542454],
[0.4641239, 0.31466085],
[0.44476733, 0.3271254 ],
[0.43861815, 0.3453676 ],
[0.42961866, 0.3611236 ],
[0.4218457, 0.37655178],
[0.41423446, 0.3911463 ],
[0.40703452, 0.4050796 ],
[0.40016073, 0.41834888],
[0.39361456, 0.43099412],
[0.38737625, 0.44304258],
[0.38143232, 0.4545229 ],
[0.3757687, 0.4654618 ],
[0.37037218, 0.4758848 ],
[0.36523017, 0.48581624],
[0.36033067, 0.49527928],
[0.35566223, 0.50429606],
[0.35121396, 0.5128876 ],
[0.34697545, 0.52107394]])
assert np.allclose(w_history, target_w_history, atol=1e-6)
# + [markdown] id="MHiCDLugqR4R"
# # Função de cálculo do grid de custos
# + id="lm_uLYP-I6Bf"
def compute_loss_grid(x, y, w_0_grid, w_1_grid):
"""Returns:
loss_grid: an array of shape (w_0_grid.shape[0], w_1_grid.shape[0]).
"""
n = x.size
w0, w1 = np.meshgrid(w_0_grid, w_1_grid)
w0 = w0[:,:,np.newaxis]
w1 = w1[:,:,np.newaxis]
x = x.reshape((1,-1))[np.newaxis,:]
y = y.reshape((1,-1))[np.newaxis,:]
y_pred = w0 + w1 * x
loss_grid = np.sum((y - y_pred)**2,axis=2) / n
return loss_grid
# + id="l9ygbpRxJPnV"
wmin = w_history.min(axis=0)
wmax = w_history.max(axis=0)
D = wmax - wmin
wmin -= D
wmax += D
w_0_grid = np.linspace(wmin[0], wmax[0], 100)
w_1_grid = np.linspace(wmin[1], wmax[1], 100)
loss_grid = compute_loss_grid(x, y, w_0_grid, w_1_grid)
# + [markdown] id="zbnwHButrRfh"
# ### Testando a função
# + id="LIJrVbJ-V4zt" outputId="4c491f78-4bbc-43fe-a7b5-79196f9bf070" colab={"base_uri": "https://localhost:8080/"}
# !gsutil cp gs://unicamp-dl/ia025a_2022s1/aula2/target_loss_grid.npy .
target_loss_grid = np.load('target_loss_grid.npy')
# + id="UNwn3RHcWKEI"
assert np.allclose(loss_grid, target_loss_grid, atol=1e-6)
# + [markdown] id="jkQqeZPjuVt2"
# ## Funcão de Plot
# + id="HhIiKg2IuVt3"
def show_plots(x, y, w_0_grid, w_1_grid, loss_grid, loss_history, w_history, sleep=0.3):
n_samples = y.shape[0]
# valor ótimo, solução analítica
# ------------------------------
x_bias = np.hstack([np.ones((n_samples, 1)), x])
w_opt = (np.linalg.inv((x_bias.T).dot(x_bias)).dot(x_bias.T)).dot(y)
x_all = np.linspace(x.min(), x.max(), 100).reshape(100, 1)
x_all_bias = np.hstack([np.ones((100, 1)), x_all])
result_opt = x_all_bias.dot(w_opt) # Predição do valor ótimo
# Gráficos:
# --------
fig = plt.figure(figsize=(18, 6))
ax_grid = fig.add_subplot(1, 3, 1) # Grid de losses
ax_loss = fig.add_subplot(1, 3, 2) # Função perda
ax_loss.plot(loss_history)
ax_loss.set_title('Perda', fontsize=15)
ax_loss.set_xlabel('epochs', fontsize=10)
ax_loss.set_ylabel('MSE', fontsize=10)
ax_grid.pcolormesh(w_0_grid, w_1_grid, loss_grid, cmap=plt.cm.coolwarm)
ax_grid.contour(w_0_grid, w_1_grid, loss_grid, 20)
ax_grid.scatter(w_opt[0], w_opt[1], marker='x', c='w') # Solução analítica.
ax_grid.set_title('W', fontsize=15)
ax_grid.set_xlabel('w0', fontsize=10)
ax_grid.set_ylabel('w1', fontsize=10)
# Plot dinâmico
# -------------
for i, (loss, w) in enumerate(zip(loss_history, w_history)):
ax_loss.scatter(i, loss)
ax_grid.scatter(w[0], w[1], c='r', marker='o')
display.display(fig)
display.clear_output(wait=True)
time.sleep(sleep)
# + [markdown] id="8ePrG4fQuVt6"
# ## Plotagem iterativa do gradiente descendente, reta ajuste, parâmetros, função perda
# + id="Zx73IXaIuVt7" outputId="28d83c7d-d3bc-48de-b7b8-9368dcb7c1d2" colab={"base_uri": "https://localhost:8080/", "height": 406}
# %matplotlib inline
try:
show_plots(x, y, w_0_grid, w_1_grid, loss_grid, loss_history, w_history, sleep=0.01)
except KeyboardInterrupt:
pass
| ex02/pedro_moreira/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit (conda)
# name: python385jvsc74a57bd0b3ba2566441a7c06988d0923437866b63cedc61552a5af99d1f4fb67d367b25f
# ---
# # IMDB
# ## MASTER DATA SCIENCE: NUCLIO
# ## PROFESOR: <NAME>
# ## CLASE 1: EDA + Data Cleaning
import pandas as pd
import numpy as np
import matplotlib as plt
imdb = pd.read_csv("datasets/IMDB.csv", sep=";", index_col=[0])
imdb
imdb.shape
type(imdb.shape)
imdb.info()
# nos dice, por cada columna, si hay valores null -> NaN
imdb.isnull().any()
# para sacar las columnas con valores NaN
columna_nulos = imdb.columns[imdb.isnull().any()]
columna_nulos
# saca por la columna "color" las value y cuantas veces aparecen
imdb["color"].value_counts()
# saca por la columna "director_name" las value y cuantas veces aparecen
imdb["director_name"].value_counts()
# sacame todas las filas donde el director_name es NaN
# esto se hace para luego cambiar los valores NaN con valores "", porque el MODELO no entiende el NaN pero si el NULL
imdb[imdb["director_name"].isnull()]
# SLICING
# cuidado con el hacer siempre copias de df pero la info punta siempre al mismo df
director_nulo_mayorde140mins = imdb[ (imdb["director_name"].isnull()) & (imdb["duration"] >= 140) ]
# aplicar algo al resultado
imdb["gross"].describe().apply("{0:.2f}".format)
imdb["gross"].min()
imdb["gross"].max()
imdb["gross"].mean()
imdb[(imdb["gross"] > 600000000)].head(10)
imdb.hist(column="gross")
imdb[ (imdb["gross"] > 500000000) & (imdb["gross"] < 5600000000) ]
# ver valores duplicados (cuidado: toda la fila tiene que ser duplicada, seno no sale nada)
imdb.duplicated()
# ver FILAS duplicadas (cuidado: todos los valores de la fila tienen que ser duplicados, seno no sale nada)
# keep=False -> hace que la fila y su duplicado sean añadidos al resultado
imdb[imdb.duplicated(keep=False)]
# # When keep=True :
# ## only the duplicated row is sorted ( this is the setting to use when you perform the drop() )
# # When keep=False :
# ## both row are sorted (not only the duplicated one)
# +
# ejemplo de usar keep=False
df_test = pd.DataFrame(["a","b","c","d","a","b"])
print(df_test.T)
print("")
#SIN keep=False -> le decimos de sacar solo las filas duplicada (solo duplicados)
df_test_only_duplicate = df_test[df_test.duplicated()]
print(df_test_only_duplicate.T)
print("")
#CON keep=False -> le decimos de sacar todas las filas de duplicado (original y duplicados)
df_test_only_duplicate_ALL = df_test[df_test.duplicated(keep=False)]
print(df_test_only_duplicate_ALL.T)
# -
# ### Deep copy creates new id's of every object it contains
# ### while normal copy only copies the elements from the parent and creates a new id for a variable to which it is copied to.
# no queremos trabajar sobre el IMDB original, asi creamos una copia antes
# creamos un nuevo DF, con nuevo ID
imdb_sin_valores_duplicado = imdb.copy(deep=True)
#lo que sigue es para entender lo de LAS COPIAS que hay que hacer
# sacamos el ID del nuevo DF
# N.B. el ID de un objeto cambia a cada EJECUCION que se realize
print("id del DF imdb: " + str(id(imdb)))
print("id del DF imdb_sin_valores_duplicado: " + str(id(imdb_sin_valores_duplicado)))
# +
# asi es como un preview
# imdb_sin_valores_duplicado.drop_duplicates()
# ---> BORRAMOS LAS FILAS DUPLICADA <---
# con inplace=True es 'real', el DF estara modificado
imdb_sin_valores_duplicado.drop_duplicates(inplace=True)
# -
# # When inplace = True :
# ## the data is modified in place, which means it will return nothing and the dataframe is now updated.
# # When inplace = False :
# ## (which is the default) then the operation is performed and it returns a copy of the object. You then need to save it to something.
imdb_sin_valores_duplicado.info()
# reseteamos LOS INDICES que se han visto modificado cuando hemos borrado filas
# drop=True -> borrar el indice antiguo y inplace=True -> hace que sea real
imdb_sin_valores_duplicado.reset_index(drop=True, inplace=True)
imdb_sin_valores_duplicado.info()
# BORRAMOS LA COLUMNA color (porque es irilevante)
imdb_sin_valores_duplicado.drop(columns=["color"], inplace=True)
# controlamos que se ha eliminada la columna color (si! ahora hay solo 12 columnas)
imdb_sin_valores_duplicado.info()
# # rellenar los nulos
# RELLENAMOS los NaN con valores "" (vacio), porque el MODELO no entiende el NaN o NULL pero si el "" (vacio)
imdb_sin_valores_duplicado["director_name"].fillna("", inplace=True)
# controlamos que no hay ahora valores NaN en la columna director_name
imdb_sin_valores_duplicado[imdb_sin_valores_duplicado["director_name"].isnull()]
# COMPROBAMOS que se han borrados, sacando el num() de los valores isnull() de la columna "director_name"
imdb_sin_valores_duplicado["director_name"].isnull().sum()
# los valores vacio de director_name (son 11)
imdb_sin_valores_duplicado[imdb_sin_valores_duplicado["director_name"] =="" ]
imdb_sin_valores_duplicado["director_name"].value_counts()
# en la primera fila, el 11 == a los valores vacio de director_name
imdb_sin_valores_duplicado["gross"].isnull().sum()
imdb_sin_valores_duplicado[ imdb_sin_valores_duplicado["gross"].isnull() ]
# ## decidir como manejar los valores NaN de "gross"
# ## 1) poner a ZERO
# ## 2) poner un valor medio o mediana
# calculo la media
imdb_sin_valores_duplicado["gross"].mean()
# asigno la media a los valores NaN de "gross"
imdb_sin_valores_duplicado["gross"].fillna( imdb_sin_valores_duplicado["gross"].mean(), inplace=True )
# compruebo si hay valores NaN en "gross"
imdb_sin_valores_duplicado["gross"].isnull().sum()
#
imdb_sin_valores_duplicado.isnull().sum()
# ## miramos los GENRES
# ### en el campo genres hay 4/5/6 string de generos - COMO LO MANEJAMOS ???
# #### quieremos crear una columna por cada generos que hay y luego poner 1 si la peli es de este generos y 0 si no lo es
# +
# CREAMOS un DF con todos le genres con METODO SPLIT() ---> sirve para separar
# il metodo .str() serve a trasformare il Df in serie ---> oggetto che supporta il metodo .split()
lista_de_generos = imdb_sin_valores_duplicado["genres"].str.split("|", expand=True)
# ATTRIBUTE EXPAND the split strings into separate columns.
# If True, return DataFrame/MultiIndex expanding dimensionality ---> pandas.core.frame.DataFrame
# If False, return Series/Index, containing lists of strings ---> pandas.core.series.Series
# -
lista_de_generos
# RELLENAMOS los None con valores "" (vacio) porque el MODELO no entiende el NaN o NULL pero si el "" (vacio)
lista_de_generos.fillna("", inplace=True)
# +
# se CREA una columna y se asigna un valor (esto lo hacemos tanta veces cuantos hay de genres)
#imdb_sin_valores_duplicado["genero_1"] = lista_de_generos[0]
#imdb_sin_valores_duplicado["genero_2"] = lista_de_generos[1]
#imdb_sin_valores_duplicado["genero_3"] = lista_de_generos[2]
#imdb_sin_valores_duplicado["genero_4"] = lista_de_generos[3]
#imdb_sin_valores_duplicado["genero_5"] = lista_de_generos[4]
# -
# borrar la antigua columna "genres"
del(imdb_sin_valores_duplicado["genres"])
imdb2 = imdb_sin_valores_duplicado
imdb2
# +
imdb2["duration"].hist()
# tambien se puede escribir asi : imdb2.hist(column="duration")
# -
imdb2[imdb2["duration"]<=50]
# hay dos valores anomalos en la duration -> lo se soluciona con un WHERE
# usamos un WHERE para asignar un valor mean a cada fila de la columna duration que sea <50, si no lo es: se deja su valor
# np.where( condicion, que hago si la condicion es True, que hago si la condicion es False )
imdb2["duration"]=np.where( imdb2["duration"]<=50, imdb2["duration"].mean(), imdb2["duration"] )
# esto es un Df con todos le generos
lista_de_generos
| Clase1_IMDB_Limpieza_en_clase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # African Airlines Sentiment Analysis (2015-Present)
# **Modeling & (Aspect Based) Sentiment Analysis V1**
# **Goal:** Conduct a basic aspect-based sentiment analysis on airlines with the [aspect-based-sentiment-analysis (ABSA) package](https://github.com/ScalaConsultants/Aspect-Based-Sentiment-Analysis)
#
# The (airline) aspects in focus are:
# - Seat
# - Cabin staff service
# - Food
# - Inflight entertainment
# - Ground service
# - WiFi
#
# ---
#
# **Skip to the code if you are not interested in a brief discussion of the eventual ABSA process. This can also easily be inferred from the code**
#
# ---
#
# #### ABSA process
# To conduct the ABSA, aspect classes (enumerated above) are manually indicated as well as terms that are likely linked to these e.g. food: meal, food.
#
#
# These aspect terms are then searched for in the review text and if they exist, the sentiment (positive, negative, or neutral) of the word is inferred from the context of the sentence.
#
#
# #### Model Evaluation
# To evaluate the efficacy/accuracy of the ABSA, we utilized one of the preprocessed datasets. In this dataset, the airline aspects (enumerated above) ratings (from 1-5) were mapped to categorical sentiment groups thus:
#
# $$ 1-2: Negative \ \ | \ \ \ 3: Neutral \ \ \ | \ \ \ 4-5: Positive$$
#
#
# Subsequently, aspect predictions where the model found no aspect term were replaced with NaNs. For example, if the mapping of ratings to sentiment groups in a particular review reported food as being 'Positive' and the model did not find food to be discussed in this same review, then the sentiment ('Positive') for that review is replaced with a NaN. This is done so that the model's predictions are all that are evaluated and not cases where the model does not predict. There are a few reasons/assumptions behind this action:
#
# 1. Users reviews do not always cover what they rated as these are two different sections on the review website (i.e. the review is not necessarily exhaustive). Hence, there is no way for the model to deduce what the reviewer thought of an aspect if it was not actually expressed in the review (hence why the rated sentiment is replaced with a NaN since that sentiment was not expressed in the review)
# 2. In some sense, we are putting too much faith in the model. It is possible that the reviewer did express sentiment on the aspect but the model did not catch this. In this case, we still go with the model and believe that the service aspect (e.g. food, cabin crew, etc.) was not discussed in the review text. *This point is especially important since the aspect terms were specified manually and certain terms could definitely have been missed out*.
# +
# #!pip install arch
# -
# ### 1. Library Importation
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from arch.bootstrap import IIDBootstrap
import aspect_based_sentiment_analysis as absa
# ### 2. Loading the data
# +
#Dataframe with review text cleaned and encoded sentiment values
airlines_df = pd.read_csv('../data/processed/airlines_processed.csv',index_col=0)
#Convert encoded rating sentiment values to int
airlines_df.iloc[:,6:13] = airlines_df.iloc[:,6:13]
# -
airlines_df.head(3)
# ### 3. Quick ABSA
#Load the basic configuration of the ABSA package
nlp = absa.load()
#Note down the possible aspect classes – and their relevant (aspect) terms
aspect_classes = {'seat':['seat'],
'cabin_staff_service': ['crew', 'service', 'staff', 'attendant'],
'food': ['food','meal','breakfast','lunch','dinner'],
'inflight_entertainment': ['entertainment','ife','movies','shows', 'music', 'films'],
'ground_service': ['ground service', 'ground staff'], 'wifi': ['wifi', 'internet']}
def reviews_absa(df,sample_num = None):
"""
Function to run ABSA on a random sample from a review text dataframe.
To run ABSA specifically, aspect classes and their relevant terms are specified above and aspect terms
are searched for in the preprocessed review texts. If they exist, the ABSA is carried out by
looking at the aspect terms in the context of the review text
Inputs:
- df (pandas Dataframe): Dataframe of reviews
- sample_num (int): The number of reviews to randomly select from the dataframe
Outputs:
- rand_idx (list): A list containing the randomly selected indices from the dataframe
- sentiment_df (pd Dataframe): Dataframe containing the predicted aspect sentiments for the revies
- review_sentiment_sample (pd Dataframe): Dataframe containing the actual user-reported sentiments
"""
#Dataframe to store the eventual sentiment for different aspects
sentiment_df = pd.DataFrame()
#Generate random number which will serve as selections for review texts
#Running on all the reviews takes a long time
rand_idx = np.random.randint(0,len(df),sample_num)
#Iterate through all the aspects (e.g. seat, ground staff service, etc.)
for aspect_class in aspect_classes.keys():
#print('aspect class', aspect_class,'\n')
#List to store the aspect sentiment for each review for a given aspect
review_aspect_sentiment = []
#Iterate through all the reviews
for review in df.review_text.iloc[rand_idx]:
#print('review:',review,'\n')
#List to store the avg sentiment for all relevant aspect terms
term_sentiments = []
#Iterate through all the aspect terms for an aspect
for aspect_term in aspect_classes[aspect_class]:
#print('aspect_term:',aspect_term,'\n')
#If the term is in the review
if aspect_term in review:
try:
#Try to calculate the sentiment for the aspect term
sent = nlp(review,aspects = [aspect_term])
#Append the sentiment score to the sentiment scores list
term_sentiments.append(np.array(sent.subtasks[aspect_term].examples[0].scores))
except:
#If any error or exception arises, just pass
pass
#If the term is not in the review, pass
else:
pass
#If there were multiple aspect terms, find the average sentiment values across all terms
#Note: This is reported in the form [neutral,negative,positive]
if len(term_sentiments) > 1:
avg_sentiments = np.array(term_sentiments).mean(axis=0)
#If just one, no need to find the average
elif len(term_sentiments) == 1:
avg_sentiments = np.array(term_sentiments)
#If len == 0, no review with the aspect terms was found
else:
avg_sentiments = None
#If avg_sentiment is None, append np.nan indicating no sentiment for the aspect
if avg_sentiments is None:
review_aspect_sentiment.append(np.nan)
#If a sentiment score was determined,
else:
#Get the sentiment category (neutral,negative,positive) with the largest probability
max_idx = np.argmax(avg_sentiments)
if max_idx == 2:
review_aspect_sentiment.append("Positive")
elif max_idx == 1:
review_aspect_sentiment.append("Negative")
else:
review_aspect_sentiment.append("Neutral")
#print(review_aspect_sentiment, '\n\n')
sentiment_df[aspect_class] = review_aspect_sentiment
#Return the following:
#1. The indices of the randomly selected reviews
#2. The data frame containing the sentiment predictions from the absa model
#3. The true dataframe with the user reported ratings converted to sentiment values
return rand_idx, sentiment_df, df.iloc[rand_idx,6:12]
def replace_nan_with_str(df,true_df):
"""
Function to replace NaNs with string NaNs to allow for comparison of the values
"""
#Iterate through all the values in the sentiment dataframe
for row in range(len(df)):
for col in range(len(df.columns)):
#If there was no sentiment prediction from the model (i.e. if np.nan)
if df.iloc[row,col] not in ['Positive','Negative','Neutral']:
#Replace the nan prediction with a string (since np.nans cannot be compared with each other)
df.iloc[row,col] = str(df.iloc[row,col])
#Replace the review sentiment with a string nan - allowing us compare all available predictions
#to actual reported values
true_df.iloc[row,col] = str(np.nan)
return df,true_df
def absa_evaluation(true_df,num_reviews):
"""
Main function to run the aspect-based sentiment analysis. The function does the following:
1. Calls reviews_absa function predict aspect sentiment on a randomly selected sample of reviews
2. Calls replace_nan_with_str function to change np.nan values to strings (allowing for easy comparison)
Note: True review sentiments were the model did not predict anything (i.e. NaN) are also converted to
string nans. This is so that we only consider the accuracy of the model's predictions
3. Computes accuracy score for model predictions
"""
#Run aspect based sentiment analysis on num_reviews randomly selected reviews
_ , sentiment_df, reviews_ratings = reviews_absa(true_df,num_reviews)
#Reprocess in preparation for prediction comparison
#Here we reformat so only aspects that the model detected and predicted are evaluated
sentiment_df, reviews_ratings = replace_nan_with_str(sentiment_df, reviews_ratings)
#Calculate the number of predictions
num_preds = np.array([1 for pred in sentiment_df.values.flatten() if pred != 'nan']).sum()
#Compare predictions to user reported sentiment values
comparison_vals = (sentiment_df.values == reviews_ratings.values).flatten()
#Compute and return the number of predictions and the accuracy
return num_preds, (comparison_vals.sum()/len(comparison_vals))
# +
#List to store accuracy values
accuracy_values = []
#Counter to track total number of (effective) model predictions
num_predictions = 0
#Repeat absa evaluation for 5 runs
for _ in range(5):
num_pred, accuracy = absa_evaluation(airlines_df,50)
num_predictions += num_pred
accuracy_values.append(accuracy)
print(f"Total number of predictions: {num_predictions}")
print(f"Average accuracy: {np.array(accuracy_values).mean()}")
# -
# ### 4. ABSA - Version to compute bca confint
def memoize(func):
"""
Store the results of the decorated function for fast lookup and avoidance of recomputation
"""
#Store the results in a dictionary that maps the argument (here, review idx) to results
cache= {}
#Define the wrapper function to return
def wrapper(arg):
#If the argument (in this case the review idx) has not been seen before
if str(arg) not in cache:
#Call func() and store the result
cache[str(arg)]= func(arg)
#Return the function value
return cache[str(arg)]
return wrapper
@memoize
def get_aspect_sentiments(review_idx):
#Get the review
review = airlines_df.review_text.iloc[review_idx]
#List to store the aspect sentiment for the review for the different aspects
review_aspect_sentiment = []
#Iterate through all the aspects (e.g. seat, ground staff service, etc.)
for aspect_class in aspect_classes.keys():
#List to store the avg sentiment for all relevant aspect terms
term_sentiments = []
#Iterate through all the aspect terms for an aspect
for aspect_term in aspect_classes[aspect_class]:
#If the term is in the review
if aspect_term in review:
try:
#Try to calculate the sentiment for the aspect term
sent = nlp(review,aspects = [aspect_term])
#Append the sentiment score to the sentiment scores list
term_sentiments.append(np.array(sent.subtasks[aspect_term].examples[0].scores))
except:
#If any error or exception arises, just pass
pass
#If the term is not in the review, pass
else:
pass
#If there were multiple aspect terms, find the average sentiment values across all terms
#Note: This is reported in the form [neutral,negative,positive]
if len(term_sentiments) > 1:
avg_sentiments = np.array(term_sentiments).mean(axis=0)
#If just one, no need to find the average
elif len(term_sentiments) == 1:
avg_sentiments = np.array(term_sentiments)
#If len == 0, no review with the aspect terms was found
else:
avg_sentiments = None
#If avg_sentiment is None, append np.nan indicating no sentiment for the aspect
if avg_sentiments is None:
review_aspect_sentiment.append(np.nan)
#If a sentiment score was determined,
else:
#Get the sentiment category (neutral,negative,positive) with the largest probability
max_idx = np.argmax(avg_sentiments)
if max_idx == 2:
review_aspect_sentiment.append("Positive")
elif max_idx == 1:
review_aspect_sentiment.append("Negative")
else:
review_aspect_sentiment.append("Neutral")
return review_aspect_sentiment
def compute_accuracy(rand_idx = None):
#Get the true dataframe
true_df = airlines_df.iloc[rand_idx,6:12]
#Dataframe to store the eventual sentiment for different aspects
sentiment_df = pd.DataFrame()
#Iterate through all the randomly selected reviews
for review_idx in rand_idx:
#Run the ABSA model on each review and get the predicted aspect sentiments
review_aspect_sentiment = get_aspect_sentiments(review_idx)
#Add these to the dataframe for easy comparison
sentiment_df = sentiment_df.append(pd.DataFrame(review_aspect_sentiment).T)
#Reprocess in preparation for prediction comparison
#Here we reformat so only aspects that the model detected and predicted are evaluated
sentiment_df, true_df = replace_nan_with_str(sentiment_df, true_df)
#Calculate the number of predictions
num_preds = np.array([1 for pred in sentiment_df.values.flatten() if pred != 'nan']).sum()
#Compare predictions to user reported sentiment values
comparison_vals = (sentiment_df.values == true_df.values).flatten()
#Compute and return the accuracy
return comparison_vals.sum()/len(comparison_vals)
# +
#Draw a bootstrap sample of size 100
# - Here, a sample corresponds to a random review index. We sample with replacement
bs = IIDBootstrap(np.random.choice(len(airlines_df),100))
#Compute the bootstrap confidence interval
ci = bs.conf_int(compute_accuracy, 1000, method='bca')
# -
ci.ravel()
# ### 4. How well does the current model detect referenced aspects in reviews?
# Unfortunately, this is currently evaluated manually
# +
#Load the original data so we can see the original review texts (with no text processing)
original_df = pd.read_csv('../data/interim/airlines_categorical.csv',index_col=0)
#Run the ABSA to get the aspects and the model prediction for the deduced aspects
rand_idx, sentiment_df, reviews_ratings = reviews_absa(airlines_df,5)
#Print the text, the aspects and the models prediction for the aspects
for idx,value in enumerate(rand_idx):
print('Text', idx+1,':')
print(original_df.iloc[value,2], '\n')
print('\n','Model aspect predictions')
for attr, pred in zip(sentiment_df.iloc[idx].index,sentiment_df.iloc[idx].values):
print('- ',attr,': ',pred)
print('\n')
# -
# #### Notes from the reviews above (flaws):
# - In Text 1, customer service is mistaken for cabin service by the model.
# - For Text 2, since 'catering' was not specified under food, the model believes no discussion on food was present in the review.
# - For Text 5, we see the model likely ascribed 'Positive' due to the proximity to the word better
# ### Breaking down and visualizing the model's classifications
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
# +
#Run aspect based sentiment analysis on num_reviews randomly selected reviews
_ , sentiment_df, reviews_ratings = reviews_absa(airlines_df,250)
#Reprocess in preparation for prediction comparison
#Here we reformat so only aspects that the model detected and predicted are evaluated
sentiment_df, reviews_ratings = replace_nan_with_str(sentiment_df, reviews_ratings)
#Calculate the number of predictions
num_preds = np.array([1 for pred in sentiment_df.values.flatten() if pred != 'nan']).sum()
#Compare predictions to user reported sentiment values
comparison_vals = (sentiment_df.values == reviews_ratings.values).flatten()
#Compute and print the accuracy
print(comparison_vals.sum()/len(comparison_vals))
# -
#Replace missing true ratings with 'None' to allow for confusion matrix plotting
reviews_replaced = reviews_ratings.replace(np.nan, 'None')
attr_preds = np.delete(sentiment_df.values.flatten(),
np.where(sentiment_df.values.flatten() == 'nan'))
true_ratings = np.delete(reviews_replaced.values.flatten(),
np.where(reviews_replaced.values.flatten() == 'nan'))
cm = confusion_matrix(true_ratings, attr_preds)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels = ['Negative', 'Neutral', 'None', 'Positive'])
disp.plot()
plt.show()
# Does quite poorly predicting Neutral sentiment. Also detects sentiment in a number of reviews where the user gave no rating
# +
#List to store the rows with Neutral predictions
idx_with_neutral = []
#Iter
for index,row in sentiment_df.iterrows():
for var in row:
if var == 'Neutral':
idx_with_neutral.append(index)
break
| notebooks/.ipynb_checkpoints/4.0-modelling-absa-v1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-2:429704687514:image/datascience-1.0
# ---
# # Amazon SageMaker Workshop
# ## _**Deployment**_
#
# ---
#
# In this part of the workshop we will deploy our model created in the previous lab in an endpoint for real-time inferences to Predict Mobile Customer Departure.
#
# ---
#
# ## Contents
#
# 1. [Model hosting](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html)
# * Set up a persistent endpoint to get predictions from your model
#
# 2. [Exercise - You turn to an endpoint and customize inference](#Exercise)
#
# ---
#
# ## Background
#
# In the previous labs [Modeling](../../2-Modeling/modeling.ipynb) and [Evaluation](../../3-Evaluation/evaluation.ipynb) we trained multiple models with multiple SageMaker training jobs and evaluated them .
#
# Let's import the libraries for this lab:
#
#Supress default INFO loggingd
import logging
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
# +
import time
import json
from time import strftime, gmtime
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.predictor import csv_serializer
from sagemaker.model_monitor import DataCaptureConfig, DatasetFormat, DefaultModelMonitor
from sagemaker.s3 import S3Uploader, S3Downloader
# -
sess = boto3.Session()
sm = sess.client('sagemaker')
role = sagemaker.get_execution_role()
# %store -r bucket
# %store -r prefix
# %store -r region
# %store -r docker_image_name
# %store -r framework_version
bucket, prefix, region, docker_image_name, framework_version
# ---
# ### - if you _**skipped**_ the lab `2-Modeling/` follow instructions:
#
# - **run this:**
# +
# # Uncomment if you have not done Lab 2-Modeling
# from config.solution_lab2 import get_estimator_from_lab2
# xgb = get_estimator_from_lab2(docker_image_name, framework_version)
# -
# ---
# ### - if you _**have done**_ the lab `2-Modeling/` follow instructions:
#
# - **run this:**
# +
# # Uncomment if you've done Lab 2-Modeling
# # %store -r training_job_name
# xgb = sagemaker.estimator.Estimator.attach(training_job_name)
# -
# ---
# ## Host the model
# Now that we've trained the model, let's deploy it to a hosted endpoint. To monitor the model after it's hosted and serving requests, we'll also add configurations to capture data that is being sent to the endpoint.
# +
data_capture_prefix = '{}/datacapture'.format(prefix)
endpoint_name = "workshop-xgboost-customer-churn-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("EndpointName = {}".format(endpoint_name))
# -
xgb_predictor = xgb.deploy(initial_instance_count=1,
instance_type='ml.m4.xlarge',
endpoint_name=endpoint_name,
data_capture_config=DataCaptureConfig(enable_capture=True,
sampling_percentage=100,
destination_s3_uri=f's3://{bucket}/{data_capture_prefix}'
)
)
# Ok, we just trained a model with SageMaker and then used deployed it in a managed SageMaker endpoint.
# +
from IPython.core.display import display, HTML
sm_ep_placeholder = "https://us-east-2.console.aws.amazon.com/sagemaker/home?region={}#/endpoints"
display(HTML(f"<a href={sm_ep_placeholder.format(region)}>Look at your endpoints here</a>"))
# -
# Or go to the left tab here, inside the Studio UI, and select "Endpoints":
#
# 
# #### Let's save the endpoint name for later (Monitoring lab)
# %store endpoint_name
# ### Invoke the deployed model
#
# Now that we have a hosted endpoint running, we can make real-time predictions from our model by making an http POST request. But first, we need to set up serializers and deserializers for passing our `test_data` NumPy arrays to the model behind the endpoint.
# +
from sagemaker.serializers import CSVSerializer
from sagemaker.deserializers import CSVDeserializer
xgb_predictor.serializer = CSVSerializer()
xgb_predictor.deserializer = CSVDeserializer()
# -
# Now, we'll loop over our test dataset and collect predictions by invoking the XGBoost endpoint:
# +
print("Sending test traffic to the endpoint {}. \nPlease wait for a minute...".format(endpoint_name))
with open('config/test_sample.csv', 'r') as f:
for row in f:
payload = row.rstrip('\n')
response = xgb_predictor.predict(data=payload)
time.sleep(0.5)
# -
response
# ### Verify that data is captured in Amazon S3
#
# When we made some real-time predictions by sending data to our endpoint, we should have also captured that data for monitoring purposes.
#
# Let's list the data capture files stored in Amazon S3. Expect to see different files from different time periods organized based on the hour in which the invocation occurred. The format of the Amazon S3 path is:
#
# `s3://{destination-bucket-prefix}/{endpoint-name}/{variant-name}/yyyy/mm/dd/hh/filename.jsonl`
# +
from time import sleep
current_endpoint_capture_prefix = '{}/{}'.format(data_capture_prefix, endpoint_name)
for _ in range(12): # wait up to a minute to see captures in S3
capture_files = S3Downloader.list("s3://{}/{}".format(bucket, current_endpoint_capture_prefix))
if capture_files:
break
sleep(5)
print("Found Data Capture Files:")
print(capture_files)
# -
# All the data captured is stored in a SageMaker specific json-line formatted file. Next, Let's take a quick peek at the contents of a single line in a pretty formatted json so that we can observe the format a little better.
# +
capture_file = S3Downloader.read_file(capture_files[-1])
print("=====Single Data Capture====")
print(json.dumps(json.loads(capture_file.split('\n')[0]), indent=2)[:2000])
# -
# As you can see, each inference request is captured in one line in the jsonl file. The line contains both the input and output merged together. In our example, we provided the ContentType as `text/csv` which is reflected in the `observedContentType` value. Also, we expose the enconding that we used to encode the input and output payloads in the capture format with the `encoding` value.
#
# To recap, we have observed how you can enable capturing the input and/or output payloads to an Endpoint with a new parameter. We have also observed how the captured format looks like in S3. Let's continue to explore how SageMaker helps with monitoring the data collected in S3.
# ---
# ## _Alternative deployment_
#
# Ok, nice! We can train with SageMaker and then deploy in a managed endpoint with monitoring enabled.
#
# But:
#
# #### - What if I already have a model that was trained outside of SageMaker? How do I deploy it in SageMaker without training it previously?
#
# #### - What if I need to preprocess the request before performing inference and then post process what my model just predicted. How can I customize the inference logic with a custom inference script?
#
# # Exercise
# ### _[Challenge] Your turn!_
#
# Deploy another model in SageMaker. Remember that the output of each training job was an artifact (tar.gz file with the model and other configurations) that was saved in S3.
#
# 1. Pick one of this models in S3 or upload another one from your laptop to S3. Then deploy it.
# (If you haven't trained a model, pick the `model.tar.gz` in the `config` directory).
#
# 2. Add a custom inference script in your endpoint
#
# To make things easiser, you can add a simple post-processing function add a new value to the output `"hello from post-processing function!!!` to the request.
#
# So, if we send to our endpoint:
# ```
# 186,0.1,137.8,97,187.7,118,146.4,85,8.7,6,1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.10,0.11,0.12,0.13,0.14,0.15,0.16,0.17,1.1,0.18,0.19,0.20,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29,0.30,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.40,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.50,0.51,0.52,0.53,1.2,1.3,0.54,1.4,0.55
# ```
#
# The output will be something like:
# ```
# 0.014719205908477306,"hello from post-processing"
# ```
#
# Want a hint? [Look here](./solutions/b-hint1.md)
# +
# YOUR SOLUTION HERE
# -
# ---
# # [You can now go to the lab 5-Monitoring](../../5-Monitoring/monitoring.ipynb)
| 4-Deployment/RealTime/deployment_hosting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fonction du son : Arranger
Id<NAME> : arranger du son consiste à lui donner de la couleur, des sonoritées musicales.
| Son/Samir/Arranger.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Explore Pairwise Feature Correlations & Distributions
# ## Imports
from pygoose import *
# ## Config
project = kg.Project.discover()
# ## Read Data
feature_lists = [
'simple_summaries',
'jaccard_ngrams',
'fuzzy',
'tfidf',
'lda',
'nlp_tags',
'wordnet_similarity',
'phrase_embedding',
'wmd',
'wm_intersect',
'magic_pagerank',
'magic_frequencies',
'magic_cooccurrence_matrix',
'oofp_nn_mlp_with_magic',
'oofp_nn_cnn_with_magic',
'oofp_nn_bi_lstm_with_magic',
'oofp_nn_siamese_lstm_attention',
]
df_train, df_test, feature_ranges = project.load_feature_lists(feature_lists)
df_train['target'] = kg.io.load(project.features_dir + 'y_train.pickle')
# ## Explore
df_train.describe().T
kg.eda.plot_feature_correlation_heatmap(
df_train,
df_train.columns[:-1].tolist(),
font_size=3,
save_filename=project.features_dir + 'eda_heatmap.png'
)
| notebooks/eda-features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="otIQhfjzONFU"
# ## Installation on Google Colab
#
# The transformers package is not installed by default on Google Colab. So let's install it with pip:
# + colab={"base_uri": "https://localhost:8080/"} id="n8Dtn_CaNDTH" outputId="2048c95a-cff1-45c4-8b71-88c6fb61d70a"
# !pip install transformers[sentencepiece]
# + [markdown] id="LHxNtFiROfp7"
# ## Sentiment analysis in English
#
# In this article, we will use the high-level pipeline interface, which makes it extremely easy to use pre-trained transformer models.
#
# Basically, we just need to tell the pipeline what we want to do, and possibly to tell it which model to use for this task.
#
# Here we're going to do sentiment analysis in English, so we select the `sentiment-analysis` task, and the default model:
# + id="e661b705" colab={"base_uri": "https://localhost:8080/", "height": 220, "referenced_widgets": ["8ba96795c7df4840a39e58eebd7d0897", "34d5be54a730475da69c885a4e0132b9", "f9239d82ad274a5ea71bfe29219fbe5f", "8b158504a7074e50b260306726b4a5e2", "a9b5aa651baf4860800d774df600277a", "<KEY>", "<KEY>", "13ab0cb37e3e4846af726f3fca95bc23", "<KEY>", "<KEY>", "ca7094d8b8454c08a0d84ecf783e664f", "e595b53cc49e44e8a19bb1ce99dfdf6d", "30c459baf6cc4d86bed379d5da56a954", "<KEY>", "<KEY>", "5f944399cc3344d4847b7154be3f7e14", "702db5cd15ca482fb5ca5dde2c38a8ca", "e424d9550e7544eab69446adfcd410ec", "<KEY>", "<KEY>", "<KEY>", "d14e0f6aaea34cd79a1096c5ac465463", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "02a2c829ce4743fda1eb3877a2adac5e", "0e5d2d0219da45ada7a083e232c51669", "<KEY>", "d0ee9ccb884d4d28a40fc9495e2a0452", "<KEY>", "<KEY>"]} outputId="e20f6234-c3fc-46c6-b4dd-80b6080c61ce"
from transformers import pipeline
classifier = pipeline("sentiment-analysis")
# + [markdown] id="rXkOkiuf-Y5O"
# The pipeline is ready, and we can now use it:
# + colab={"base_uri": "https://localhost:8080/"} id="8f7a4b84" outputId="5e480e0a-eaeb-4182-c1ff-0180fbfac537"
classifier(["this is a great tutorial, thank you",
"your content just sucks"])
# + [markdown] id="fDhBDzbn-jC8"
# We sent two sentences through the pipeline. The first one is predicted to be positive and the second one negative with very high confidence.
#
# Sounds good!
#
# Now let's see what happens if we send in french sentences:
# + colab={"base_uri": "https://localhost:8080/"} id="OSDcCBRJPemh" outputId="e4b2d69b-5355-4c76-a0e3-f96da36718aa"
classifier(["Ton tuto est vraiment bien",
"il est complètement nul"])
# + [markdown] id="4USwsfyz-3Z_"
# This time, the classification does not work...
#
# Indeed, the second sentence, which means "this tutorial is complete crap", is classified as positive.
#
# That's not a surprise: the default model for the sentiment analysis task has been trained on English text, so it does not understand French.
# + [markdown] id="PYcOi1RKOE-U"
# ### Sentiment analysis in Dutch, German, French, Spanish and Italian
#
# So what can you do if you want to work with text in another language, say French?
#
# You just need to search the hub for a [french classification model](https://huggingface.co/models?filter=fr&pipeline_tag=text-classification&sort=downloads).
#
# Several models are available, and I decided to select [nlptown/bert-base-multilingual-uncased-sentiment](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment).
#
# We can specify this model as the one to be used when we create our `sentiment-analysis` pipeline:
# + id="YWk7fR2xOoqC" colab={"base_uri": "https://localhost:8080/", "height": 271, "referenced_widgets": ["663f68535b7e4aafa7f8bf8b7ba7f1b8", "9efbe589f6774ec295056186100ec28c", "<KEY>", "6f30571955ae4b869d55ca3d4ce8c183", "f073672775fd47dc81e7e03c9126d518", "b25b0b4db2e143c38e5c1988b0befc6e", "ddb45946de4d484889c0e8371602825b", "<KEY>", "<KEY>", "7435f3a44bd044eaa79b6d5f6a96db90", "b54b47e2c45e4a6ea2b6ab407bb4cdfd", "e1165434b68a420f94a4e36c7405fab8", "baa3e4e9d7344dada282cc39984fa49d", "ff4194fb5a984811bfb6b2a642da307d", "ac616af9beff4b52a0b8be792478dc63", "db4de8e163d646938eec3c0671ae5f39", "<KEY>", "4cc5abab72d947c78cefca9edbc57e8d", "e43eed725e70425daa2fae06f6ae4658", "<KEY>", "3192587336fd4a5baed241af3251fb36", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e0ab293a1faf44f7b90767c5def9f8a0", "<KEY>", "1e8f1d118d464a7183d458d36863113c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "79ddd407c0114f94856ebe185e3fbcef", "86ba863933eb4ead8803677b8f484019", "<KEY>", "e0b7b093eb7e4248be286380f7ab753b", "630b69238db04971a70ac8acded75821"]} outputId="ff1d13e3-32c6-482a-be19-99929dac05f9"
multilang_classifier = pipeline("sentiment-analysis",
model="nlptown/bert-base-multilingual-uncased-sentiment")
# + colab={"base_uri": "https://localhost:8080/"} id="h7-0_hM9N8_J" outputId="266c0939-ba79-41e3-95f2-a7d6e56fdcd8"
multilang_classifier(["Ton tuto est vraiment bien",
"il est complètement nul"])
# + [markdown] id="cP6yGH9pAlxc"
# And it worked! The second sentence is properly classified as very negative.
#
# You might be wondering while the confidence for the first sentence is lower. I'm pretty sure that it's because this sentence scores high on '4 stars' as well.
#
# Now let's try with an actual review for a restaurant near my place:
# + colab={"base_uri": "https://localhost:8080/"} id="nxR422j7BC-h" outputId="7463df44-648f-4c99-d772-fb98301157b1"
import pprint
sentence="Contente de pouvoir retourner au restaurant... Quelle déception... L accueil peu chaleureux... Un plat du jour plus disponible à 12h45...rien à me proposer à la place... Une pizza pas assez cuite et pour finir une glace pleine de glaçons... Et au gout très fade... Je pensais que les serveuses seraient plus aimable à l idée de retrouver leur clientèle.. Dommage"
pprint.pprint(sentence)
# + colab={"base_uri": "https://localhost:8080/"} id="y9CMD1gDBO9w" outputId="83e76d4e-4d9f-404f-b1d2-1d8759ffff70"
multilang_classifier([sentence])
# + [markdown] id="NI4LzALfBUQf"
# 2 stars! on Google Review, this review has 1 star. Not a bad prediction.
# + [markdown] id="z2s3z9JMQTxU"
# ## Translation English-French
#
# Let's try and do a bit of translation, from English to French.
#
# Again, we search the hub, and we end up with this pipeline:
#
# + id="s8dFINPogXVG" colab={"base_uri": "https://localhost:8080/", "height": 322, "referenced_widgets": ["7abb4df3ee9640a294a8cb61afb50b81", "7d7042be064f49809fcda2ece29f579b", "4c339821c8624226a39a0a013226fd55", "6813dfb1b0b44a36aaf1ddcd4a069eaf", "adaa38d9cd8947e09d29a3bb11809af2", "66ceb5a5807b43f0bb534e47bb20381f", "484a6a2edb8e45dd838399ca8461eb0c", "b54a2c66a44a4375b812e752f72a3c71", "3d5383640527459db5e78c1c12febb52", "c84e7b443a834cad937f33ed5f4f8fba", "<KEY>", "d424568bfa34410f91d18d04032be06e", "<KEY>", "914f6de064cd41be9c8a1afeb8c299ac", "390397025d094ed9bbb3d15d31fc8a2a", "<KEY>", "<KEY>", "<KEY>", "b23fc56ed1074b9a91122bdb98c25a38", "<KEY>", "<KEY>", "3ab43448ced74903a088935d4552f2a9", "2d150d45ac434185b3ed93fd93215fd8", "76d7497c1f3e41e594463a215bb3bb66", "<KEY>", "a39f90a813784d749148db041636ed8d", "<KEY>", "<KEY>", "c15999afeb0d410b930e5a60ce1902e0", "<KEY>", "<KEY>", "68f97f92302143c88226b77e9f426f25", "965f4e7c534e497aa2ac2372bea0fbfe", "<KEY>", "<KEY>", "1e00da5894db4745879957825bebcdac", "<KEY>", "<KEY>", "f1d9faf0a82d423da212a4ae25e0897e", "990fad8de9a34edc81a75110bc80be99", "<KEY>", "<KEY>", "82e148a0ca8e425e9f6e7e30bf291165", "9091144081974a878d7a7996a48c4013", "<KEY>", "c7a6f1b904ac43dab0e29744a41fa8cc", "<KEY>", "<KEY>"]} outputId="b31b98f4-bc0d-4c82-a757-d7c55c74b4c6"
en_to_fr = pipeline("translation_en_to_fr",
model="Helsinki-NLP/opus-mt-en-fr")
# + id="cw4RXdLxgobO" colab={"base_uri": "https://localhost:8080/"} outputId="d5f83062-0750-4afd-e335-66a5242c54ea"
en_to_fr("your tutorial is really good")
# + [markdown] id="R-Lh7BTeCFN5"
# This works well. Let's translate in the other direction. For this, we need to change the task and the model:
# + id="ts9ESQkHgtL_" colab={"base_uri": "https://localhost:8080/", "height": 322, "referenced_widgets": ["75c2e0d95c444ab7a560135e706ff23d", "c7eaede396774450a6afce3e9552d64c", "<KEY>", "321adbe08d254da097abda15350e08ec", "<KEY>", "<KEY>", "4c9ceb4626584268bbe6ca4588b900d2", "<KEY>", "<KEY>", "37fd09c7d1854166a0d45df598df4ac4", "d1259f8a7aba4cabb3df3f444b496288", "ba7a9dd21d234899bd3f750a4688fe67", "be2a4adc79444118a2c752f749937211", "afa534d2504d4a72b295742b95489812", "<KEY>", "e870e1d3fb0345969a626b3d1c8e4994", "0337e55f38694b4dbde7deec821ec918", "00016ebc992047dda597e18892241077", "a6b0c5bd2fdc4ee4abe893e6b9aad5fa", "665885f9fd4b4a98a29bc61efaf5787d", "<KEY>", "9b9a9285da294ff888159da064ddf2d0", "<KEY>", "85e9c56e10fc422db23e42430dd13701", "f4f5907731b84934ac2a5092c2fb1840", "3d33f0d2ea81499e8f6e8350128d5add", "<KEY>", "<KEY>", "6c5e36cfe20d46fe98276179804c94dc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "77512bfe51694ce8989a316e4d238a20", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "bea810cde87e46a29e3d597c10741499", "5da6226371414c54916e62c196488c21", "<KEY>", "e41f40814e6748a6a94d0504ee5902f8", "7eec6f7b55744ed5896b94584a70742b", "<KEY>", "ab7c40aca7454a7bbe2e73862e1ee1bd", "<KEY>", "<KEY>"]} outputId="8737d434-8aa1-4751-d760-7659cc54ab2d"
fr_to_en = pipeline("translation_fr_to_en",
model="Helsinki-NLP/opus-mt-fr-en")
# + id="g9A7BqYlg6Sw" colab={"base_uri": "https://localhost:8080/"} outputId="06a392d3-8924-42ae-ec4a-29b775c4ab27"
fr_to_en("ton tutoriel est super")
# + [markdown] id="T1yBlLaBCQGu"
# Excellent translation!
# + [markdown] id="JqlFz7ZNhZ1I"
# ## Zero-shot classification in French
#
# Nowadays, very large deep-learning models are trained on very large datasets collected from the internet.
#
# These models know a lot already, so they don't need to learn much more.
#
# Typically, it's possible to fine tune these models to a specific use case like text classification with a very small additional specific dataset. This is called few-shots learning.
#
# And sometimes, we can even do zero-shot learning: specific tasks can be performed without any specific training. This is what we're going to do now.
#
# We search the hub for a french zero-shot classification model, and we create this pipeline:
# + id="5-k5T-KUhYmb" colab={"base_uri": "https://localhost:8080/", "height": 271, "referenced_widgets": ["ad58d2d7f82544f6b20f7ef025f3ea16", "08d284c5a8b941fd84ba7d6089bf3da9", "941d3bf8b0d24bb096025e5e73758717", "a25664921de74b67be345ccf2e6f6811", "015fcc5d2565455d84c2fb6eb689c053", "dd92eb6a297b4494bd7821b0015e0fea", "5a649ac5b5fa4b0280beb3cbe7f9fa25", "90b36c8438ec4a29a7c78794ed205b25", "5124ac7a1ca2496bbbabe3aed0687ff9", "dfd96617bdb14701982a8bb76e7e3ca2", "6111d7096e27478c945463f0f0e0f4dc", "63d25d3e50db4415a661860a34cd1b27", "<KEY>", "<KEY>", "<KEY>", "1665ba31c104423c95e10ad9e273fcc2", "153c7f7660ac4d28915452a782dee3cf", "489bd247596646f6ad94faf64df87a3c", "d42e6dca1add4f7b89624c3d7112ad81", "b2397df3ac5a4301bea9e28c1669281b", "f40a43c6d5be41ceb365fb1f32841a8c", "221fa436953b400290582182485b1371", "<KEY>", "b62872e70b964b0a86a08558e4e9fb8e", "e5f3f49527f7448280eec6bb2da2b778", "9dd3aacf77fe4ae98da3a10d34b33307", "1e3572a36d1546628ff1e1687b0f56e5", "<KEY>", "e03384dd80d1497d95e94c6d8097554b", "<KEY>", "<KEY>", "<KEY>", "8f2a3120ae404536a0c8c049ed3ab6e3", "<KEY>", "f968c10e1c9044158f153a8cda5ece95", "e76ed760877e46c0b1abcf9417e6391d", "3e231f553b1842efae076c4812ec165a", "30ca814bb0614f64b5964ea2f903aad9", "a906e1d9311249e787914621de2b7a9d", "1cbf1a2236634df0a03baf5f4fef64b1"]} outputId="b726776d-7e04-45d0-f723-6c4fcf96fdf0"
classifier = pipeline("zero-shot-classification",
model="BaptisteDoyen/camembert-base-xlni")
# + [markdown] id="gvkB7h2gEcXv"
# In the example below, I propose a sequence to classify into categories, and I also specify the categories.
#
# It's important to note that the model **has not been trained with these categories**, you could change them at will!
# + id="0Qg6iAoShhHY" colab={"base_uri": "https://localhost:8080/"} outputId="ebf70a45-5c0d-4778-db32-6b2a125f6cdf"
sequence = "Colin est en train d'écrire un article au sujet du traitement du langage naturel"
candidate_labels = ["science","politique","education", "news"]
classifier(sequence, candidate_labels)
# + [markdown] id="Vxk403bhEwko"
# The predicted probabilities are actually quite sound! This sentence is indeed about science, news, and education. And not at all related to politics.
#
# But if we try this one:
# + id="E04_ip72FAOD" outputId="00e175ba-df7c-4f45-b414-917a28b94c8b" colab={"base_uri": "https://localhost:8080/"}
sequence = "<NAME> reconduit à la tête de la région Rhône-Alpes-Auvergne à la suite du deuxième tour des élections."
candidate_labels = ["politique", "musique"]
classifier(sequence, candidate_labels)
# + [markdown] id="3b0CMhuKFx1a"
# It works!
#
# Feel free to try other sentences and other categories. You can also change the model if you wish to do zero-shot classification in English or in another language.
# + [markdown] id="HSgHXhFfj6MC"
# ## Summarization in French
#
# Summarizing text is an interesting application of transformers.
#
# Here, we use a model trained on a dataset obtained by scraping [https://actu.orange.fr/](https://actu.orange.fr/), again found on the Hugging Face hub:
#
#
# + id="271WObZ2hzUy" colab={"base_uri": "https://localhost:8080/", "height": 220, "referenced_widgets": ["9a724bc533a94b1fa66a0a8045c220f9", "2ef0b405ad594136bc9a850ca22b4dd3", "e64b2e1f148a4f6c9bd81d79dc6aca67", "2561e533490049d9976f74182f6eeb35", "<KEY>", "23f656df7001497e9dc039b8b138a821", "a3f620e408ef418b95f314d09413c5ca", "<KEY>", "71405952fb5d4b278d251931e4597f06", "34089e8acc4f495285ffe68ff90838ef", "93066be29155429c8baed1350d01e85d", "f92811f9e54e497a932e861cbd36c0e0", "a2c7e75faa47489d9535e77f36b9a4bb", "96ef2a1310a9484ca45551a3dfb6d73c", "0bd6e1e483fd4c5eb4e5ff40c03db104", "529f0e962ae547118104bbee7eb5b4e2", "70aa62a9472e4cab84ce5a304d47999e", "<KEY>", "d44926bf1d4e43dd8e59285dd5bb6533", "<KEY>", "6153887c0acb44c48eab6e4dc6737456", "cba4beaa7b9f4993be17a624d04e6c7f", "e36fdb323f5548d4ab65e5661ac7aa81", "80f1a2b03e5644369b0d40f67996477e", "<KEY>", "<KEY>", "39c8e0847b964147b95e46d288774c80", "<KEY>", "1f9d19165e3341a480a16d57734ecf5a", "<KEY>", "94fe381e83a2455385e07334f8b5368d", "979c0fbb903642d2bbd2e4aec46e7e65"]} outputId="38a89ae5-17e0-449b-eb27-5c4900816982"
summarizer = pipeline("summarization",
model="moussaKam/barthez-orangesum-title")
# + [markdown] id="llaQhqscHcDQ"
# Let's use the first two paragraphs of an article about Covid-19 read in Le Monde:
# + id="JynCA1B0kINz" colab={"base_uri": "https://localhost:8080/"} outputId="d21d8cba-d70c-4263-b25d-a03ecdbde2a5"
import pprint
sentence = "La pandémie ne marque pas le pas. Le variant Delta poursuit son essor planétaire au grand dam de pays impatients de retrouver une vie normale. La pandémie a fait près de quatre millions de morts dans le monde depuis que le bureau de l’Organisation mondiale de la santé (OMS) en Chine a fait état de l’apparition de la maladie fin décembre 2019, selon un bilan établi par l’Agence France-Presse (AFP) à partir de sources officielles, lundi à 12 heures. Les Etats-Unis sont le pays le plus touché tant en nombre de morts (603 967) que de cas. Le Brésil, qui compte 513 474 morts, est suivi par l’Inde (396 730), le Mexique (232 564) et le Pérou (191 899), le pays qui déplore le plus de morts par rapport à sa population. Ces chiffres, qui reposent sur les bilans quotidiens des autorités nationales de santé, sont globalement sous-évalués. L’Organisation mondiale de la santé (OMS) estime que le bilan de la pandémie pourrait être deux à trois fois plus élevé que celui officiellement calculé."
pprint.pprint(sentence)
# + id="acAebixFkTGB" colab={"base_uri": "https://localhost:8080/"} outputId="abfa300c-d8a7-4d59-c72b-67fffd27dc59"
summarizer(sentence, max_length=80)
# + [markdown] id="o3gnRzeDH2bd"
# Terse, but not bad!
# + [markdown] id="T0Oo-N_SkpHd"
# ## Named entity recognition in French
#
# Named entity recognition can serve as the basis of many interesting apps!
#
# For example, one could analyse financial reports looking for dates, prices, company names.
#
# Let's see how to do this.
#
# Here, we use a french equivalent of BERT, called CamemBERT, fine-tuned for NER:
# + id="nZqoCACnkoTG" colab={"base_uri": "https://localhost:8080/", "height": 271, "referenced_widgets": ["db5fa558aeed46cdbcc5401fceff605d", "5922afd55f0a4b99805558f4dbcfe07e", "fcdeae793d8a449c8f5c1cd0de696549", "1c3f604117a8461a94a92091027c7066", "9aff46d718874353b30eae7cd0f94257", "9935299e25f44b1fb2065c2621c396f4", "8e56eda022fc4b8cafec80ad3c382577", "bc7ec226cea549f9a76827cdd16d772b", "63c062ebe51e4303a6b504ff3170f2d0", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "2368e6606d5a4a179ddab9ced494ddf9", "a012738762e6449b84a908f7eca99939", "<KEY>", "<KEY>", "9e344ce8f81d4227802b1aea37398d58", "<KEY>", "87f11b3186884e098fd65f3f83e3486a", "0f6a3b1fa3644eaeb6a5c1496f0cea5d", "<KEY>", "9c89e67e8f4541b582461a96da6ae069", "<KEY>", "<KEY>", "41d22898366948c79b686777ed2e93a6", "0ddc42c2f7ac4092b440a176c8ce2f2f", "089c40ff80014fb9be1d741dce425999", "f068c1ba85e3493d94860ee1e8350072", "4c113528f5ae48dc98a872bd2a4c8461", "<KEY>", "0a21c0e59cc44a11bd91d9051dbc3f52", "77031eefee0040da8561e2907801b230", "f84cf95174084a269c8f89f5a2d8269d", "<KEY>", "9479fdadae7348d3923d59bf7f5a2708", "025a154661d5479f8d2861a982b34870", "f332c1da0ada4dc7945704486b871d09", "56febb9c3eb543f0a1739ad34643be40"]} outputId="a4b7793c-221e-456b-dc41-d1d676412bd4"
ner = pipeline("token-classification", model="Jean-Baptiste/camembert-ner")
# + id="46CJFCMQkdFy" colab={"base_uri": "https://localhost:8080/"} outputId="d0ea14d6-6d1e-4b3a-f5a4-a97603fadc76"
nes = ner("Colin est parti à Saint-André acheter de la mozzarella")
pprint.pprint(nes)
# + [markdown] id="W7fCT5-9KXUp"
# We need to do a bit or post-processing to aggregate named entities with the same type.
#
# Here is a simple algorithm to do so (it can certainly be improved!)
#
# + id="ZBerEaOOlB7I" colab={"base_uri": "https://localhost:8080/"} outputId="d8186cbf-b2e7-4afe-9dd6-113300e3c3e5"
cur = None
agg = []
for ne in nes:
entity=ne['entity']
if entity != cur:
if cur is None:
cur = entity
if agg:
print(cur, ner.tokenizer.convert_tokens_to_string(agg))
agg = []
cur = entity
agg.append(ne['word'])
print(cur, ner.tokenizer.convert_tokens_to_string(agg))
# + [markdown] id="-yYhYftcLrGS"
# We found two named entities:
#
# * PERSON: Colin
# * LOCATION: Saint-André
| basic_huggingface.ipynb |