code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting Numeric Outcomes with Linear Regression
#
# **Aim**: The aim of this notebook is to predict the amount of a mobile transaction (numeric outcome) given all the other features in the dataset.
# ## Table of contents
#
# 1. Linear Regression in 2-Dimensions
# 2. Linear Regression to predict transaction amount
# 3. Model Optimization
# ## Package requirements
import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
import warnings
# +
#Reading in the dataset
df = pd.read_csv('fraud_prediction.csv')
# +
#Define the feature and target arrays
feature = df['oldbalanceOrg'].values
target = df['amount'].values
# -
# ## Linear Regression in 2-Dimensions
# +
#Creating a scatter plot
plt.scatter(feature, target)
plt.xlabel('Old Balance of Account Holder')
plt.ylabel('Amount of Transaction')
plt.title('Amount Vs. Old Balance')
plt.show()
# -
# **Building the linear regression model**
# +
#Initializing a linear regression model
linear_reg = linear_model.LinearRegression()
#Reshaping the array since we only have a single feature
feature = feature.reshape(-1, 1)
target = target.reshape(-1, 1)
#Fitting the model on the data
linear_reg.fit(feature, target)
# +
#Define the limits of the x-axis
x_lim = np.linspace(min(feature), max(feature)).reshape(-1, 1)
#Creating the scatter plot
plt.scatter(feature, target)
plt.xlabel('Old Balance of Account Holder')
plt.ylabel('Amount of Transaction')
plt.title('Amount Vs. Old Balance')
#Creating the prediction line
plt.plot(x_lim, linear_reg.predict(x_lim), color = 'red')
#Show the plot
plt.show()
# -
# ## Linear Regression to predict transaction amount
# +
# Reading in the dataset
df = pd.read_csv('fraud_prediction.csv')
# +
#Creating the features
features = df.drop('isFraud', axis = 1).values
target = df['isFraud'].values
# -
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size = 0.3, random_state = 42, stratify = target)
# **Fitting and evaluating the accuracy of the linear regression model**
# +
#Initializing a linear regression model
linear_reg = linear_model.LinearRegression()
# +
#Fitting the model on the data
linear_reg.fit(X_train, y_train)
# +
#Accuracy of the model
linear_reg.score(X_test, y_test)
# -
# **Scaling your data**
# +
#Setting up the scaling pipeline
pipeline_order = [('scaler', StandardScaler()), ('linear_reg', linear_model.LinearRegression())]
pipeline = Pipeline(pipeline_order)
#Fitting the classfier to the scaled dataset
linear_reg_scaled = pipeline.fit(X_train, y_train)
#Extracting the score
linear_reg_scaled.score(X_test, y_test)
# -
# ## Model Optimization
# **Ridge Regression**
# +
# Reading in the dataset
df = pd.read_csv('fraud_prediction.csv')
# +
#Creating the features
features = df.drop('isFraud', axis = 1).values
target = df['isFraud'].values
# -
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size = 0.3, random_state = 42, stratify = target)
# +
#Initialize a ridge regression model
ridge_reg = Ridge(alpha = 0, normalize = True)
# +
#Fit the model to the training data
ridge_reg.fit(X_train, y_train)
# +
#Extract the score from the test data
ridge_reg.score(X_test, y_test)
# -
# **Optimizing alpha using GridSearchCV**
# +
#Building the model
ridge_regression = Ridge()
#Using GridSearchCV to search for the best parameter
grid = GridSearchCV(ridge_regression, {'alpha':[0.0001, 0.001, 0.01, 0.1, 10]})
grid.fit(X_train, y_train)
# Print out the best parameter
print("The most optimal value of alpha is:", grid.best_params_)
# +
#Initializing an ridge regression object
ridge_regression = Ridge(alpha = 0.01)
#Fitting the model to the training and test sets
ridge_regression.fit(X_train, y_train)
# +
#Accuracy score of the ridge regression model
ridge_regression.score(X_test, y_test)
# +
train_errors = []
test_errors = []
alpha_list = [0.0001, 0.001, 0.01, 0.1, 10]
# Evaluate the training and test classification errors for each value of alpha
for value in alpha_list:
# Create Ridge object and fit
ridge_regression = Ridge(alpha= value)
ridge_regression.fit(X_train, y_train)
# Evaluate error rates and append to lists
train_errors.append(ridge_regression.score(X_train, y_train) )
test_errors.append(ridge_regression.score(X_test, y_test))
# Plot results
plt.semilogx(alpha_list, train_errors, alpha_list, test_errors)
plt.legend(("train", "test"))
plt.ylabel('Accuracy Score')
plt.xlabel('Alpha')
plt.show()
# -
# **Lasso Regression**
# +
# Reading in the dataset
df = pd.read_csv('fraud_prediction.csv')
# +
#Creating the features
features = df.drop('isFraud', axis = 1).values
target = df['isFraud'].values
# -
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size = 0.3, random_state = 42, stratify = target)
# +
#Initialize a lasso regression model
lasso_reg = Lasso(alpha = 0, normalize = True)
# +
#Fit the model to the training data
lasso_reg.fit(X_train, y_train)
warnings.filterwarnings('ignore')
# +
#Extract the score from the test data
lasso_reg.score(X_test, y_test)
# -
# **Optimizing alpha using GridSearchCV**
# +
#Building the model
lasso_regression = Lasso()
#Using GridSearchCV to search for the best parameter
grid = GridSearchCV(lasso_regression, {'alpha':[0.0001, 0.001, 0.01, 0.1, 10]})
grid.fit(X_train, y_train)
# Print out the best parameter
print("The most optimal value of alpha is:", grid.best_params_)
# +
#Initializing an lasso regression object
lasso_regression = Lasso(alpha = 0.0001)
#Fitting the model to the training and test sets
lasso_regression.fit(X_train, y_train)
# +
#Accuracy score of the lasso regression model
lasso_regression.score(X_test, y_test)
# +
train_errors = []
test_errors = []
alpha_list = [0.0001, 0.001, 0.01, 0.1, 10]
# Evaluate the training and test classification errors for each value of alpha
for value in alpha_list:
# Create Lasso object and fit
lasso_regression = Lasso(alpha= value)
lasso_regression.fit(X_train, y_train)
# Evaluate error rates and append to lists
train_errors.append(ridge_regression.score(X_train, y_train) )
test_errors.append(ridge_regression.score(X_test, y_test))
# Plot results
plt.semilogx(alpha_list, train_errors, alpha_list, test_errors)
plt.legend(("train", "test"))
plt.ylabel('Accuracy Score')
plt.xlabel('Alpha')
plt.show()
|
Chapter_05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (TensorFlow 2.3 Python 3.7 CPU Optimized)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-2:429704687514:image/tensorflow-2.3-cpu-py37-ubuntu18.04-v1
# ---
# + [markdown] colab_type="text" id="M27qF7CTrBqc"
# # TASK #1: UNDERSTAND THE PROBLEM STATEMENT
# + [markdown] colab_type="text" id="xNl52nl3qiyL"
# - The objective of this case study is to predict the employee salary based on the number of years of experience.
# - In simple linear regression, we predict the value of one variable Y based on another variable X.
# - X is called the independent variable and Y is called the dependant variable.
# - Why simple? Because it examines relationship between two variables only.
# - Why linear? when the independent variable increases (or decreases), the dependent variable increases (or decreases) in a linear fashion.
#
# + [markdown] colab_type="text" id="zKmFmyaGunc7"
# # TASK #2: IMPORT LIBRARIES AND DATASETS
# +
# #!pip install seaborn
# #!pip install tensorflow
# -
# install seaborn library
# # !pip install seaborn
# # !pip install tensorflow
import tensorflow as tf
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# + colab={} colab_type="code" id="tjIiJdM4u1IE"
# read the csv file
salary_df = pd.read_csv('salary.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="q4_wPDKCu5Uc" outputId="886d2aaf-0205-4f46-96a7-629d0f367d2f"
salary_df
# -
# MINI CHALLENGE
# - Use head and tail methods to print the first and last 7 rows of the dataframe
# - Try to find the maximum salary value in the dataframe
salary_df.head(7)
salary_df.tail(7)
salary_df['Salary'].max()
# + [markdown] colab_type="text" id="tMcr7xqMQre2"
# # TASK #3: PERFORM EXPLORATORY DATA ANALYSIS AND VISUALIZATION
# -
# check if there are any Null values
sns.heatmap(salary_df.isnull(), yticklabels = False, cbar = False, cmap="Blues")
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="hMq3-KWOx0e1" outputId="22a5b184-1f07-46ef-dfc1-f8377fd7042f"
# Check the dataframe info
salary_df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="Nn1Oxk2SzPX3" outputId="95f0265a-5e75-4a32-d771-4b3d15850c3c"
# Statistical summary of the dataframe
salary_df.describe()
# -
# MINI CHALLENGE
# - What are the number of years of experience corresponding to employees with minimum and maximim salaries?
salary_df[salary_df['Salary'] == salary_df['Salary'].min()]
salary_df[salary_df['Salary'] == salary_df['Salary'].max()]
salary_df.hist(bins = 30, figsize = (20,10), color = 'r')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Ka9uFRXSkWHw" outputId="f42a681e-93d4-4b1f-a29c-f58fc8a6f974"
# plot pairplot
sns.pairplot(salary_df)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="9q-tFxvskWDa" outputId="8834e9ec-7676-4e86-c5e7-20f4e9eccbcb"
corr_matrix = salary_df.corr()
sns.heatmap(corr_matrix, annot = True)
plt.show()
# -
# MINI CHALLENGE
# - Use regplot in Seaborn to obtain a straight line fit between "salary" and "years of experience"
sns.regplot(x='YearsExperience',y='Salary',data=salary_df)
# + [markdown] colab_type="text" id="53qDZFRn3-S1"
# # TASK #4: CREATE TRAINING AND TESTING DATASET
# + colab={} colab_type="code" id="4OXZB2F21e4H"
X = salary_df[['YearsExperience']]
y = salary_df[['Salary']]
# -
X
y
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XpGU63Ne1e9P" outputId="e16c74ca-dc1c-416c-dc44-7f927bb99bc6"
X.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="OjGj0RALA0qZ" outputId="26559a6c-880b-45b4-a1e8-3c4b92bea889"
y.shape
# + colab={} colab_type="code" id="jIeiK1maA6mm"
X = np.array(X).astype('float32')
y = np.array(y).astype('float32')
# -
# Only take the numerical variables and scale them
X
# + colab={} colab_type="code" id="GoReLFfnA6uF"
# split the data into test and train sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)
# -
# MINI CHALLENGE
# - Try splitting the data into 75% for training and the rest for testing
# - Verify that the split was successful by obtaining the shape of both X_train and X_test
# - Did you notice any change in the order of the data? why?
X_train.shape
X_test.shape
# + [markdown] colab_type="text" id="idWHLv5alF4C"
# # TASK #5: TRAIN A LINEAR REGRESSION MODEL IN SK-LEARN (NOTE THAT SAGEMAKER BUILT-IN ALGORITHMS ARE NOT USED HERE)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="yHL-6mKwBURs" outputId="10d71b6d-9c2b-4bab-8b27-d3c5883e6a25"
# using linear regression model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, accuracy_score
regresssion_model_sklearn = LinearRegression(fit_intercept = True)
regresssion_model_sklearn.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="elD8m5N0BgEY" outputId="16a1813d-a0f1-4d1f-dc02-d7ad5a445417"
regresssion_model_sklearn_accuracy = regresssion_model_sklearn.score(X_test, y_test)
regresssion_model_sklearn_accuracy
# -
print('Linear Model Coefficient (m): ', regresssion_model_sklearn.coef_)
print('Linear Model Coefficient (b): ', regresssion_model_sklearn.intercept_)
# MINI CHALLENGE
# - Retrain the model while setting the fit_intercept = False, what do you notice?
regresssion_model_sklearn = LinearRegression(fit_intercept = False)
regresssion_model_sklearn.fit(X_train, y_train)
regresssion_model_sklearn_accuracy = regresssion_model_sklearn.score(X_test, y_test)
regresssion_model_sklearn_accuracy
print('Linear Model Coefficient (m): ', regresssion_model_sklearn.coef_)
print('Linear Model Coefficient (b): ', regresssion_model_sklearn.intercept_)
# # TASK #6: EVALUATE TRAINED MODEL PERFORMANCE (NOTE THAT SAGEMAKER BUILT-IN ALGORITHMS ARE NOT USED HERE)
y_predict = regresssion_model_sklearn.predict(X_test)
y_predict
plt.scatter(X_train, y_train, color = 'gray')
plt.plot(X_train, regresssion_model_sklearn.predict(X_train), color = 'red')
plt.ylabel('Salary')
plt.xlabel('Number of Years of Experience')
plt.title('Salary vs. Years of Experience')
# MINI CHALLENGE
# - Use the trained model, obtain the salary corresponding to eployees who have years of experience = 5
num_years_exp = [[5]]
salary = regresssion_model_sklearn.predict(num_years_exp)
salary
# # TASK #7: TRAIN A LINEAR LEARNER MODEL USING SAGEMAKER
# +
# Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) for Python
# Boto3 allows Python developer to write software that makes use of services like Amazon S3 and Amazon EC2
import sagemaker
import boto3
from sagemaker import Session
# Let's create a Sagemaker session
sagemaker_session = sagemaker.Session()
bucket = Session().default_bucket()
# Let's define the S3 bucket and prefix that we want to use in this session
bucket = 'sagemaker-practic' # bucket named 'sagemaker-practical' was created beforehand
prefix = 'linear_learner' # prefix is the subfolder within the bucket.
# Let's get the execution role for the notebook instance.
# This is the IAM role that you created when you created your notebook instance. You pass the role to the training job.
# Note that AWS Identity and Access Management (IAM) role that Amazon SageMaker can assume to perform tasks on your behalf (for example, reading training results, called model artifacts, from the S3 bucket and writing training results to Amazon S3).
role = sagemaker.get_execution_role()
print(role)
# -
X_train.shape
y_train = y_train[:,0]
y_train.shape
# +
import io # The io module allows for dealing with various types of I/O (text I/O, binary I/O and raw I/O).
import numpy as np
import sagemaker.amazon.common as smac # sagemaker common libary
# Code below converts the data in numpy array format to RecordIO format
# This is the format required by Sagemaker Linear Learner
buf = io.BytesIO() # create an in-memory byte array (buf is a buffer I will be writing to)
smac.write_numpy_to_dense_tensor(buf, X_train, y_train)
buf.seek(0)
# When you write to in-memory byte arrays, it increments 1 every time you write to it
# Let's reset that back to zero
# +
import os
# Code to upload RecordIO data to S3
# Key refers to the name of the file
key = 'linear-train-data'
# The following code uploads the data in record-io format to S3 bucket to be accessed later for training
boto3.resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', key)).upload_fileobj(buf)
# Let's print out the training data location in s3
s3_train_data = 's3://{}/{}/train/{}'.format(bucket, prefix, key)
print('uploaded training data location: {}'.format(s3_train_data))
# -
X_test.shape
y_test.shape
# Make sure that the target label is a vector
y_test = y_test[:,0]
# +
# Code to upload RecordIO data to S3
buf = io.BytesIO() # create an in-memory byte array (buf is a buffer I will be writing to)
smac.write_numpy_to_dense_tensor(buf, X_test, y_test)
buf.seek(0)
# When you write to in-memory byte arrays, it increments 1 every time you write to it
# Let's reset that back to zero
# +
# Key refers to the name of the file
key = 'linear-test-data'
# The following code uploads the data in record-io format to S3 bucket to be accessed later for training
boto3.resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'test', key)).upload_fileobj(buf)
# Let's print out the testing data location in s3
s3_test_data = 's3://{}/{}/test/{}'.format(bucket, prefix, key)
print('uploaded training data location: {}'.format(s3_test_data))
# +
# create an output placeholder in S3 bucket to store the linear learner output
output_location = 's3://{}/{}/output'.format(bucket, prefix)
print('Training artifacts will be uploaded to: {}'.format(output_location))
# +
# This code is used to get the training container of sagemaker built-in algorithms
# all we have to do is to specify the name of the algorithm, that we want to use
# Let's obtain a reference to the linearLearner container image
# Note that all regression models are named estimators
# You don't have to specify (hardcode) the region, get_image_uri will get the current region name using boto3.Session
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, 'linear-learner')
# +
# We have pass in the container, the type of instance that we would like to use for training
# output path and sagemaker session into the Estimator.
# We can also specify how many instances we would like to use for training
# sagemaker_session = sagemaker.Session()
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count = 1,
train_instance_type = 'ml.c4.xlarge',
output_path = output_location,
sagemaker_session = sagemaker_session)
# We can tune parameters like the number of features that we are passing in, type of predictor like 'regressor' or 'classifier', mini batch size, epochs
# Train 32 different versions of the model and will get the best out of them (built-in parameters optimization!)
linear.set_hyperparameters(feature_dim = 1,
predictor_type = 'regressor',
mini_batch_size = 5,
epochs = 5,
num_models = 32,
loss = 'absolute_loss')
# Now we are ready to pass in the training data from S3 to train the linear learner model
linear.fit({'train': s3_train_data})
# Let's see the progress using cloudwatch logs
# -
# MINI CHALLENGE
# - Try to train the model with more epochs and additional number of models
#
# - Can you try to reduce the cost of billable seconds?
# +
# We have pass in the container, the type of instance that we would like to use for training
# output path and sagemaker session into the Estimator.
# We can also specify how many instances we would like to use for training
# sagemaker_session = sagemaker.Session()
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count = 1,
train_instance_type = 'ml.c4.xlarge',
output_path = output_location,
sagemaker_session = sagemaker_session, train_use_spot_instances = True, train_max_run = 300, train_max_wait = 600)
# We can tune parameters like the number of features that we are passing in, type of predictor like 'regressor' or 'classifier', mini batch size, epochs
# Train 32 different versions of the model and will get the best out of them (built-in parameters optimization!)
linear.set_hyperparameters(feature_dim = 1,
predictor_type = 'regressor',
mini_batch_size = 5,
epochs = 5,
num_models = 32,
loss = 'absolute_loss')
# Now we are ready to pass in the training data from S3 to train the linear learner model
linear.fit({'train': s3_train_data})
# Let's see the progress using cloudwatch logs
# -
# # TASK #8: DEPLOY AND TEST THE TRAINED LINEAR LEARNER MODEL
# +
# Deploying the model to perform inference
linear_regressor = linear.deploy(initial_instance_count = 1,
instance_type = 'ml.m4.xlarge')
# +
from sagemaker.predictor import csv_serializer, json_deserializer
# Content type overrides the data that will be passed to the deployed model, since the deployed model expects data in text/csv format.
# Serializer accepts a single argument, the input data, and returns a sequence of bytes in the specified content type
# Deserializer accepts two arguments, the result data and the response content type, and return a sequence of bytes in the specified content type.
# Reference: https://sagemaker.readthedocs.io/en/stable/predictors.html
#linear_regressor.content_type = 'text/csv'
linear_regressor.serializer = csv_serializer
linear_regressor.deserializer = json_deserializer
# +
# making prediction on the test data
result = linear_regressor.predict(X_test)
# -
result # results are in Json format
# +
# Since the result is in json format, we access the scores by iterating through the scores in the predictions
predictions = np.array([r['score'] for r in result['predictions']])
# -
predictions
predictions.shape
# VISUALIZE TEST SET RESULTS
plt.scatter(X_test, y_test, color = 'gray')
plt.plot(X_test, predictions, color = 'red')
plt.xlabel('Years of Experience (Testing Dataset)')
plt.ylabel('salary')
plt.title('Salary vs. Years of Experience')
# +
# Delete the end-point
linear_regressor.delete_endpoint()
# -
# # EXCELLENT JOB! NOW YOU'RE FAMILIAR WITH SAGEMAKER LINEAR LEARNER, YOU SHOULD BE PROUD OF YOUR NEWLY ACQUIRED SKILLS
|
ML_projects/1. Employee Salary Prediction/employee_salary_prediction_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from geopy.exc import GeocoderServiceError
from time import sleep
crimes = pd.read_csv('../data/cleaned_crime_data.csv')
crimes.shape
crimes = crimes[crimes.latitude != 0]
crimes = crimes[crimes.longitude != 0]
crimes = crimes[np.isfinite(crimes['longitude'])]
crimes.shape
df = crimes.loc[crimes['zip'] == 'null']
df.shape
zips = pd.Series(zipcodes)
zips.value_counts()
# showing zipcode problem
a = crimes.zip.value_counts().reset_index().rename(columns={'index': 'zip', 0: 'count'})
print (a.to_string())
#address example
geolocator = Nominatim()
location = geolocator.reverse("47.500626, -122.234039")
x = location.address
x
#Showing example of one instance
geolocator = Nominatim()
location = geolocator.reverse("47.500626, -122.234039")
x = location.address
z = x.rsplit(', ', 1)
r = z[0]
s = r.rsplit(', ', 1)
zipcode = s[1]
zipcode
# +
locations = list(zip(crimes.latitude, crimes.longitude))
zipcodes = []
geolocator = Nominatim()
def geocode():
for item in locations:
try:
location = geolocator.reverse(item, timeout=10)
x = location.address
z = x.rsplit(', ', 1)
r = z[0]
s = r.rsplit(', ', 1)
zipcode = s[1]
zipcodes.append(zipcode)
sleep(1)
geocode()
zipcodes
# -
zips = pd.Series(zipcodes)
zips.value_counts()
zipcodes
len(zipcodes)
|
Examples/Zipcodes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from osbrain import run_agent
from osbrain import run_nameserver
import random
# +
#------------------------------------------------------------------
# Nuwan's Details
nuwansWealth = random.randint(5000000,7000000)
nuwansSalary = 50000
nuwansExpenses = 0
bought_a_land = False
built_a_house = False
carpentering_done = False
painting_done = False
bought_a_car = False
life_is_completed = False
building_happening = False
carpentering_happening = False
painting_happening = False
def introduction(agent):
agent.log_info("I have "+ str(nuwansWealth)+ " much of money")
agent.log_info("I need to buy a land, build a house and buy a car")
agent.log_info("I got a job with a monthly salary of " + str(nuwansSalary))
agent.log_info("I have no monthly expenses yet")
agent.log_info("--------------------------------------------------------")
agent.log_info("I found a land but the land price increases every month")
agent.log_info("--------------------------------------------------------")
def gotASalary(agent):
global nuwansWealth
nuwansWealth += (nuwansSalary - nuwansExpenses)
agent.log_info("I got my salary. Now I have " + str(nuwansWealth))
def buyTheLand(agent,amount,expenses):
global nuwansWealth
global bought_a_land
global nuwansExpenses
nuwansWealth -= amount
nuwansExpenses += expenses
agent.log_info("Yes!. I bought the land. Now I have " + str(nuwansWealth))
bought_a_land = True
def buyTheCar(agent,amount,expenses):
global nuwansWealth
global bought_a_car
global nuwansExpenses
global life_is_completed
nuwansWealth -= amount
nuwansExpenses += expenses
agent.log_info("Yes!. I bought the car. Now I have " + str(nuwansWealth))
agent.log_info("Now. My life is completed !!!!!!!!!")
bought_a_car = True
life_is_completed = True
def houseBuilt(agent):
global built_a_house
built_a_house = True
agent.log_info("Yes!. I have half completed house. Need to start carpetering. Now I have " + str(nuwansWealth))
def houseCarpentered(agent):
global carpentering_done
carpentering_done = True
agent.log_info("Yes!. I have almost completed house. Need to start painting. Now I have " + str(nuwansWealth))
def housePainted(agent):
global painting_done
painting_done = True
agent.log_info("Yes!!!. I have a completed house. Need to buy a car. Now I have " + str(nuwansWealth))
#------------------------------------------------------------------
# Land Owner's (ls) Details
lsWealth = 0
landPrice = random.randint(5000000,7500000)
landMaintainanceCost = 10000
land_sold = False
def landPriceIncrease(agent):
global landPrice
global land_sold
if not land_sold :
landPrice += 20000
agent.log_info("Land price is now " + str(landPrice))
def sellLand(agent):
global lsWealth
global land_sold
lsWealth = landPrice
land_sold = True
agent.log_info("Sold the land to Nuwan. I have now "+ str(lsWealth))
def sendLandPrice(agent,_):
return landPrice
def sendLandMaintainanceCost(agent,_):
return landMaintainanceCost
#------------------------------------------------------------------
# Builder's Details
buildersWealth = 0
buildersSalary = random.randint(40000,60000)
buildersMonthsToBuild = random.randint(5,10)
def building_the_house(agent):
global buildersWealth
global buildersMonthsToBuild
buildersWealth += buildersSalary
buildersMonthsToBuild -= 1
if buildersMonthsToBuild == 0 :
agent.log_info("I finished the house. From building Nuwan's house now I have "+ str(buildersWealth))
else:
agent.log_info("I did a months work. I got a salary. Now I have "+ str(buildersWealth))
agent.log_info( str(buildersMonthsToBuild) + " months remaining to finish.")
return buildersMonthsToBuild
def sendBuildersSalary(agent,_):
return buildersSalary
#------------------------------------------------------------------
# Carpenter's Details
carpentersWealth = 0
carpentersSalary = random.randint(40000,60000)
carpentersMonthsToBuild = random.randint(2,5)
def carpentering_the_house(agent):
global carpentersWealth
global carpentersMonthsToBuild
carpentersWealth += carpentersSalary
carpentersMonthsToBuild -= 1
if carpentersMonthsToBuild == 0 :
agent.log_info("I finished carpentering the house. From carpentering Nuwan's house now I have "+ str(carpentersWealth))
else:
agent.log_info("I did a months work. I got a salary. Now I have "+ str(carpentersWealth))
agent.log_info(str(carpentersMonthsToBuild)+" months remaining to finish.")
return carpentersMonthsToBuild
def sendCarpentersSalary(agent,_):
return carpentersSalary
#------------------------------------------------------------------
# Painter's Details
paintersWealth = 0
paintersSalary = random.randint(20000,30000)
paintersMonthsToPaint = 1
def painting_the_house(agent):
global paintersWealth
global paintersMonthsToPaint
paintersWealth += paintersSalary
paintersMonthsToPaint -= 1
if paintersMonthsToPaint == 0 :
agent.log_info("I finished painting the house. From painting Nuwan's house now I have "+ str(paintersWealth))
else:
agent.log_info("I did a months work. I got a salary. Now I have "+ str(paintersWealth))
agent.log_info(str(paintersMonthsToPaint) +" months remaining to finish.")
return paintersMonthsToPaint
def sendPaintersSalary(agent,_):
return paintersSalary
#------------------------------------------------------------------
# CarSale's Details
carSalesWealth = 0
carPrice = random.randint(5000000,7500000)
carMaintainanceCost = 15000
def sellCar(agent):
global carSalesWealth
carSalesWealth = carPrice
agent.log_info("Sold the car to Nuwan. I have now "+ str(carSalesWealth))
def sendCarPrice(agent,_):
return carPrice
def sendCarMaintainanceCost(agent,_):
return carMaintainanceCost
#------------------------------------------------------------------
wealth = [[],[],[],[],[],[]]
# -
if __name__ == '__main__':
# System deployment
ns = run_nameserver()
# Agents
nuwan = run_agent('Nuwan')
landOwner = run_agent('LandOwner')
builder = run_agent('Builder')
carpenter = run_agent('Carpenter')
painter = run_agent('Painter')
carSale = run_agent('CarSale')
addr = landOwner.bind('REP', alias='getLandMaintainanceCost', handler=sendLandMaintainanceCost)
nuwan.connect(addr, alias='getLandMaintainanceCost')
addr = landOwner.bind('REP', alias='getLandPrice', handler=sendLandPrice)
nuwan.connect(addr, alias='getLandPrice')
addr = builder.bind('REP', alias='getBuildersSalary', handler=sendBuildersSalary)
nuwan.connect(addr, alias='getBuildersSalary')
addr = carpenter.bind('REP', alias='getCarpentersSalary', handler=sendCarpentersSalary)
nuwan.connect(addr, alias='getCarpentersSalary')
addr = painter.bind('REP', alias='getPaintersSalary', handler=sendPaintersSalary)
nuwan.connect(addr, alias='getPaintersSalary')
addr = carSale.bind('REP', alias='getCarMaintainanceCost', handler=sendCarMaintainanceCost)
nuwan.connect(addr, alias='getCarMaintainanceCost')
addr = carSale.bind('REP', alias='getCarPrice', handler=sendCarPrice)
nuwan.connect(addr, alias='getCarPrice')
introduction(nuwan)
global wealth
global nuwansWealth
while(not life_is_completed):
gotASalary(nuwan)
wealth[0].append(nuwansWealth)
wealth[1].append(lsWealth)
wealth[2].append(buildersWealth)
wealth[3].append(carpentersWealth)
wealth[4].append(paintersWealth)
wealth[5].append(carSalesWealth)
if not bought_a_land:
landPriceIncrease(landOwner)
nuwan.send('getLandPrice',"_")
price = int(nuwan.recv('getLandPrice'))
if price < nuwansWealth:
nuwan.log_info("I have enough money to buy the land")
nuwan.send('getLandMaintainanceCost',"_")
cost = int(nuwan.recv('getLandMaintainanceCost'))
buyTheLand(nuwan,price,cost)
sellLand(landOwner)
nuwan.log_info("Now I have a land need to build a house")
else:
nuwan.log_info("I don't have enough money to buy the land")
nuwan.log_info("I need to wait")
continue
if not built_a_house:
global building_happening
nuwan.send('getBuildersSalary',"_")
builderSalary = int(nuwan.recv('getBuildersSalary'))
if builderSalary < nuwansWealth:
if not building_happening:
nuwan.log_info("I found a builder to build the house")
building_happening = True
days = building_the_house(builder)
nuwansWealth -= builderSalary
if days == 0:
houseBuilt(nuwan)
else:
nuwan.log_info("Builder is asking "+str(builderSalary))
nuwan.log_info("I don't have enough money to pay the builder")
nuwan.log_info("I need to wait")
continue
if not carpentering_done:
global carpentering_happening
nuwan.send('getCarpentersSalary',"_")
carpentersSalary = int(nuwan.recv('getCarpentersSalary'))
if carpentersSalary < nuwansWealth:
if not carpentering_happening:
nuwan.log_info("I found a carpenter to build the house")
carpentering_happening = True
days = carpentering_the_house(carpenter)
nuwansWealth -= carpentersSalary
if days == 0:
houseCarpentered(nuwan)
else:
nuwan.log_info("Builder is asking "+str(carpentersSalary))
nuwan.log_info("I don't have enough money to pay the carpenter")
nuwan.log_info("I need to wait")
continue
if not painting_done:
global painting_happening
nuwan.send('getPaintersSalary',"_")
paintersSalary = int(nuwan.recv('getPaintersSalary'))
if paintersSalary < nuwansWealth:
if not painting_happening:
nuwan.log_info("I found a painter to build the house")
painting_happening = True
days = painting_the_house(painter)
nuwansWealth -= paintersSalary
if days == 0:
housePainted(nuwan)
else:
nuwan.log_info("Builder is asking "+str(paintersSalary))
nuwan.log_info("I don't have enough money to pay the painter")
nuwan.log_info("I need to wait")
continue
if not bought_a_car:
nuwan.send('getCarPrice',"_")
carPrice = int(nuwan.recv('getCarPrice'))
if carPrice < nuwansWealth:
nuwan.log_info("I have enough money to buy the car")
nuwan.send('getCarMaintainanceCost',"_")
cost = int(nuwan.recv('getCarMaintainanceCost'))
buyTheCar(nuwan,carPrice,cost)
sellCar(carSale)
nuwan.log_info("Now I have a car")
else:
nuwan.log_info("Car price is "+str(carPrice))
nuwan.log_info("I don't have enough money to buy the car")
nuwan.log_info("I need to wait")
wealth[0].append(nuwansWealth)
wealth[1].append(lsWealth)
wealth[2].append(buildersWealth)
wealth[3].append(carpentersWealth)
wealth[4].append(paintersWealth)
wealth[5].append(carSalesWealth)
continue
ns.shutdown()
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(wealth[0],label='Nuwan')
plt.plot(wealth[1],label='Land Owner')
plt.plot(wealth[2],label='Builder')
plt.plot(wealth[3],label='Carpenter')
plt.plot(wealth[4],label='Painter')
plt.plot(wealth[5],label='Car Sale')
plt.legend(loc = 7)
|
osBrain/LifeGoals_osBrain.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="H08esTFOYO99"
# # Main imports and code
# + colab={"base_uri": "https://localhost:8080/"} id="EnHQoayhBYlm" outputId="974de5ec-fc49-4a8a-d54d-260e01dea10a"
# check which gpu we're using
# !nvidia-smi
# + colab={"base_uri": "https://localhost:8080/"} id="hYhFR7nSYOjG" outputId="bb536efd-b9e2-4b18-e04c-d1a65ef48545"
# !pip install transformers
# !pip install pytorch-ignite
# + id="RJC8wj73Zd_p"
# Any results you write to the current directory are saved as output.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
from transformers import BertTokenizer,BertModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset
from torch.nn.utils.rnn import pack_padded_sequence
from torch.optim import AdamW
from tqdm import tqdm
from argparse import ArgumentParser
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.engine.engine import Engine, State, Events
from ignite.handlers import EarlyStopping
from ignite.contrib.handlers import TensorboardLogger, ProgressBar
from ignite.utils import convert_tensor
from torch.optim.lr_scheduler import ExponentialLR
import warnings
warnings.filterwarnings('ignore')
# + id="DvKeryw3eQPw"
import os
import gc
import copy
import time
import random
import string
# For data manipulation
import numpy as np
import pandas as pd
# Pytorch Imports
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
# Utils
from tqdm import tqdm
from collections import defaultdict
# Sklearn Imports
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold, KFold
# + id="fezs8xASSS28"
from transformers import AutoTokenizer, AutoModel, AdamW
# + colab={"base_uri": "https://localhost:8080/"} id="t3FDM6BjTLwW" outputId="0c8264b7-cdeb-4ebd-87ea-681793448f62"
# !pip install sentencepiece
# + id="rSzWEX54ScQi"
import random
import os
from urllib import request
# + colab={"base_uri": "https://localhost:8080/"} id="i2J-Qx3ekn_N" outputId="7d983a7c-79cb-4da0-fc2e-816a24b8cc38"
from google.colab import drive
drive.mount('/content/drive')
# + id="VjoRy3-22MLm"
df=pd.read_csv('/content/drive/MyDrive/ISarcasm/DataSet/train.En.csv')
df=df[['tweet','sarcastic']]
# + id="y6Fd8UBBdTQ1"
train, validate, test = \
np.split(df.sample(frac=1, random_state=42),
[int(.6*len(df)), int(.8*len(df))])
# + id="PJrh2l2qdXf_"
train=pd.concat([train, validate], ignore_index=True)
# + id="X5DMNjrTGT8T"
# tedf1.to_csv('/content/drive/MyDrive/PCL/test_task_1',index=False)
# + id="l18cECm4GhQv"
# trdf1.to_csv('/content/drive/MyDrive/PCL/train_task_1',index=False)
# + [markdown] id="xK6FY70KZ6TY"
# # RoBERTa Baseline for Task 1
# + id="EA9QzHTCl5F6"
import numpy as np
from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix, precision_score , recall_score
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, BertTokenizer
from transformers.data.processors import SingleSentenceClassificationProcessor
from transformers import Trainer , TrainingArguments
from transformers.trainer_utils import EvaluationStrategy
from transformers.data.processors.utils import InputFeatures
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# + colab={"base_uri": "https://localhost:8080/"} id="YlT2IDHumPNi" outputId="d909fb19-d635-4de1-8c5f-f3190b2d18a7"
# !pip install datasets
# + id="o-E_jNQbV_NL"
class PCLTrainDataset(Dataset):
def __init__(self, df, tokenizer, max_length,displacemnt):
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.text = df['tweet'].values
self.label=df['sarcastic'].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
text = self.text[index]
# summary = self.summary[index]
inputs_text = self.tokenizer.encode_plus(
text,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
target = self.label[index]
text_ids = inputs_text['input_ids']
text_mask = inputs_text['attention_mask']
return {
'text_ids': torch.tensor(text_ids, dtype=torch.long),
'text_mask': torch.tensor(text_mask, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.float)
}
# + id="WLXkQK5Bawae"
import math
def sigmoid(x):
return 1/(1+math.exp(-x))
# + id="skGp5I4wM_Zn"
class F1_Loss(nn.Module):
'''Calculate F1 score. Can work with gpu tensors
The original implmentation is written by <NAME> on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. epsilon <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
- http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/
'''
def __init__(self, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
def forward(self, y_pred, y_true,):
# assert y_pred.ndim == 2
# assert y_true.ndim == 1
# print(y_pred.shape)
# print(y_true.shape)
# y_pred[y_pred<0.5]=0
# y_pred[y_pred>=0.5]=0
y_true_one_hot = F.one_hot(y_true.to(torch.int64), 2).to(torch.float32)
# y_pred_one_hot = F.one_hot(y_pred.to(torch.int64), 2).to(torch.float32)
tp = (y_true_one_hot * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true_one_hot) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true_one_hot) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true_one_hot * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2* (precision*recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1-self.epsilon)
f1=f1.detach()
# print(f1.shape)
# y_pred=y_pred.reshape((y_pred.shape[0], 1))
# y_true=y_true.reshape((y_true.shape[0], 1))
# p1=y_true*(math.log(sigmoid(y_pred)))*(1-f1)[1]
# p0=(1-y_true)*math.log(1-sigmoid(y_pred))*(1-f1)[0]
# y_true_one_hot = F.one_hot(y_true.to(torch.int64), 2)
# print(y_pred)
# print(y_true_one_hot)
CE =torch.nn.CrossEntropyLoss(weight=( 1 - f1))(y_pred, y_true_one_hot)
# loss = ( 1 - f1) * CE
return CE.mean()
# + id="AZa-chAxXf5r"
class PCL_Model_Arch(nn.Module):
def __init__(self):
super(PCL_Model_Arch, self).__init__()
self.bert = AutoModel.from_pretrained('roberta-base', output_hidden_states=True)
output_channel = 16 # number of kernels
num_classes = 2 # number of targets to predict
dropout = 0.2 # dropout value
embedding_dim = 768 # length of embedding dim
ks = 3 # three conv nets here
# input_channel = word embeddings at a value of 1; 3 for RGB images
input_channel = 4 # for single embedding, input_channel = 1
# [3, 4, 5] = window height
# padding = padding to account for height of search window
# 3 convolutional nets
self.conv1 = nn.Conv2d(input_channel, output_channel, (3, embedding_dim), padding=(2, 0), groups=4)
self.conv2 = nn.Conv2d(input_channel, output_channel, (4, embedding_dim), padding=(3, 0), groups=4)
self.conv3 = nn.Conv2d(input_channel, output_channel, (5, embedding_dim), padding=(4, 0), groups=4)
# apply dropout
self.dropout = nn.Dropout(dropout)
# fully connected layer for classification
# 3x conv nets * output channel
self.fc1 = nn.Linear(ks * output_channel, num_classes)
self.softmax = nn.Softmax()
def forward(self, text_id, text_mask):
# get the last 4 layers
outputs= self.bert(text_id, attention_mask=text_mask)
# all_layers = [4, 16, 256, 768]
hidden_layers = outputs[2] # get hidden layers
hidden_layers = torch.stack(hidden_layers, dim=1)
x = hidden_layers[:, -4:]
# x = x.unsqueeze(1)
# x = torch.mean(x, 0)
# print(hidden_layers.size())
torch.cuda.empty_cache()
x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]
# max-over-time pooling; # (batch, channel_output) * ks
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
# concat results; (batch, channel_output * ks)
x = torch.cat(x, 1)
# add dropout
x = self.dropout(x)
# generate logits (batch, target_size)
logit = self.fc1(x)
torch.cuda.empty_cache()
return self.softmax(logit)
# + id="IfQwNPiRx_6y"
# class PCL_Model_Arch(nn.Module):
# def __init__(self):
# super(PCL_Model_Arch, self).__init__()
# self.bert = AutoModel.from_pretrained('google/canine-c', output_hidden_states=False)
# self.drop = nn.Dropout(p=0.2)
# self.fc = nn.Linear(768, 2)
# self.softmax = nn.Softmax()
# def forward(self, text_id, text_mask):
# # get the last 4 layers
# outputs= self.bert(text_id, attention_mask=text_mask)
# # all_layers = [4, 16, 256, 768]
# hidden_layers = outputs[1] # get hidden layers
# torch.cuda.empty_cache()
# x = self.drop(hidden_layers)
# torch.cuda.empty_cache()
# # generate logits (batch, target_size)
# logit = self.fc(x)
# torch.cuda.empty_cache()
# return self.softmax(logit)
# + id="tuODW43NYhTz" colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["f1c278360faa44838ba2eb074be7f9b0", "2bd26cc181fc42cf8bded4274fc4d956", "8a70b0c68a40453a86e93affceb7b89e", "2e50c081d4564f768c02e4e2cd2c5671", "<KEY>", "cece92a16e6d49a583b43717c00c4fbe", "<KEY>", "<KEY>", "c963aedf1ba74599be5ae4ba5438f6b6", "8f52feffe22c454598403c06652c9579", "13f374217a1e4a3ba0b8a801636227ba", "9c19c54caa404f73bf86a2bdaeebed4a", "d3a3fab1ff3944d181fe2cc720b5b05f", "<KEY>", "26a9f74720cc4d66aea348d884f705a4", "d903efe284984773ad1bb58985677f1c", "41ab46b927a54cbdb8a7fad52c306780", "<KEY>", "<KEY>", "f68e842a5cee4a9fa1b0c935b1dbde0a", "05241b96c3ea4dedbc67af0fd669d3cd", "077d7a4c1be147d5a3d74c1f09a1bf50", "1f5e4d49d35c45b2a8dba5d533dbc7df", "b27ae649e8c0474eba4b1d102695b91a", "4adecd38f420415e9f4c2235c0e01e3b", "<KEY>", "<KEY>", "846a23b1b19a4b369b44ab1ef72c33d4", "<KEY>", "f60ef3a50a0444918e981d3d0f47fd98", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "5851911575074258ad547187120506f1", "<KEY>", "<KEY>", "5ad5c7935c8c442892472ced8f9e3bbf", "4ab9a1f8a4b84ded8c3041326f840af6", "<KEY>", "<KEY>", "80b5b17e307540a7a49739799487f524", "<KEY>", "<KEY>"]} outputId="4d6d78a4-9556-42fb-ec9a-db29a5473e55"
tokenizer= AutoTokenizer.from_pretrained('roberta-base')
# + id="b0XUHerFY2jd"
def criterion(outputs1, targets):
criterion = F1_Loss()
loss = criterion(outputs1, targets)
return loss
# + id="Zeh7f_UdC6dh"
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""
Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
"""
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset.sarcastic))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)] for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, id_):
return dataset.sarcastic[id_]
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
# + id="kRX0b55VaxmS"
CONFIG = {"seed": 2021,
"epochs": 3,
"model_name": "xlnet-base-cased",
"train_batch_size": 16,
"valid_batch_size": 64,
"max_length": 256,
"learning_rate": 1e-4,
"scheduler": 'CosineAnnealingLR',
"min_lr": 1e-6,
"T_max": 500,
"weight_decay": 1e-6,
"n_fold": 5,
"n_accumulate": 1,
"num_classes": 1,
"margin": 0.5,
"device": torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
}
# + id="4afl1P8LaD07"
def train_one_epoch(model, optimizer, scheduler, dataloader, device, epoch):
model.train()
dataset_size = 0
running_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
text_ids = data['text_ids'].to(device, dtype = torch.long)
text_mask = data['text_mask'].to(device, dtype = torch.long)
targets = data['target'].to(device, dtype=torch.long)
batch_size = text_ids.size(0)
# print(targets)
outputs = model(text_ids, text_mask)
# print(outputs.shape)
# print(outputs.shape)
loss = criterion(outputs, targets)
loss = loss / CONFIG['n_accumulate']
loss.backward()
if (step + 1) % CONFIG['n_accumulate'] == 0:
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
if scheduler is not None:
scheduler.step()
running_loss += (loss.item() * batch_size)
dataset_size += batch_size
epoch_loss = running_loss / dataset_size
bar.set_postfix(Epoch=epoch, Train_Loss=epoch_loss,
LR=optimizer.param_groups[0]['lr'])
gc.collect()
return epoch_loss
# + id="JdWI_KWRafLZ"
@torch.no_grad()
def valid_one_epoch(model, dataloader, device, epoch):
model.eval()
dataset_size = 0
running_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
text_ids = data['text_ids'].to(device, dtype = torch.long)
text_mask = data['text_mask'].to(device, dtype = torch.long)
targets = data['target'].to(device, dtype=torch.long)
batch_size = text_ids.size(0)
outputs = model(text_ids, text_mask)
# outputs = outputs.argmax(dim=1)
loss = criterion(outputs, targets)
running_loss += (loss.item() * batch_size)
dataset_size += batch_size
epoch_loss = running_loss / dataset_size
bar.set_postfix(Epoch=epoch, Valid_Loss=epoch_loss,
LR=optimizer.param_groups[0]['lr'])
gc.collect()
return epoch_loss
# + id="q1wtBAJJbEou"
def run_training(model, optimizer, scheduler, device, num_epochs, fold):
# To automatically log gradients
if torch.cuda.is_available():
print("[INFO] Using GPU: {}\n".format(torch.cuda.get_device_name()))
start = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch_loss = np.inf
history = defaultdict(list)
for epoch in range(1, num_epochs + 1):
gc.collect()
train_epoch_loss = train_one_epoch(model, optimizer, scheduler,
dataloader=train_loader,
device=CONFIG['device'], epoch=epoch)
val_epoch_loss = valid_one_epoch(model, valid_loader, device=CONFIG['device'],
epoch=epoch)
history['Train Loss'].append(train_epoch_loss)
history['Valid Loss'].append(val_epoch_loss)
# deep copy the model
if val_epoch_loss <= best_epoch_loss:
print(f"Validation Loss Improved ({best_epoch_loss} ---> {val_epoch_loss})")
best_epoch_loss = val_epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
PATH = f"/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-{fold}.bin"
torch.save(model.state_dict(), PATH)
# Save a model file from the current directory
print("Model Saved")
print()
end = time.time()
time_elapsed = end - start
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, (time_elapsed % 3600) % 60))
print("Best Loss: {:.4f}".format(best_epoch_loss))
# load best model weights
model.load_state_dict(best_model_wts)
return model, history
# + id="WSXiJa2-bRiX"
def fetch_scheduler(optimizer):
if CONFIG['scheduler'] == 'CosineAnnealingLR':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=CONFIG['T_max'],
eta_min=CONFIG['min_lr'])
elif CONFIG['scheduler'] == 'CosineAnnealingWarmRestarts':
scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer,T_0=CONFIG['T_0'],
eta_min=CONFIG['min_lr'])
elif CONFIG['scheduler'] == None:
return None
return scheduler
# + id="B6nXhK_ObVNe"
def prepare_loaders(fold):
displacemnt_list=[0,512,1024,1536,2048,2560,3072,3584,4096,4608,4950]
df_train = train[train.kfold != fold].reset_index(drop=True)
df_valid = train[train.kfold == fold].reset_index(drop=True)
sampler = ImbalancedDatasetSampler(df_train)
train_dataset = PCLTrainDataset(df_train, tokenizer=tokenizer, max_length=CONFIG['max_length'],displacemnt=displacemnt_list[fold])
valid_dataset = PCLTrainDataset(df_valid, tokenizer=tokenizer, max_length=CONFIG['max_length'],displacemnt=displacemnt_list[fold])
train_loader = DataLoader(train_dataset, batch_size=CONFIG['train_batch_size'],
num_workers=2, shuffle=False, pin_memory=True, drop_last=True,sampler=sampler)
valid_loader = DataLoader(valid_dataset, batch_size=CONFIG['valid_batch_size'],
num_workers=2, shuffle=False, pin_memory=True)
return train_loader, valid_loader
# + id="rCN4vwBOeaLb"
skf = StratifiedKFold(n_splits=CONFIG['n_fold'], shuffle=True, random_state=CONFIG['seed'])
for fold, ( _, val_) in enumerate(skf.split(X=train, y=train.sarcastic)):
train.loc[val_ , "kfold"] = int(fold)
train["kfold"] = train["kfold"].astype(int)
# + id="vOtw3nW-2X58"
# del model,train_loader, valid_loader
# + colab={"base_uri": "https://localhost:8080/"} id="HoQghNKN2Cy2" outputId="42f881d7-e47b-4173-d75a-b4caf1a648f2"
import gc
gc.collect()
# + [markdown] id="QCwmBcvPlNaO"
# http://seekinginference.com/applied_nlp/bert-cnn.html
# + colab={"base_uri": "https://localhost:8080/"} id="cXC4SsiDbbGR" outputId="12c64ede-353d-41fe-a515-88d313fd9bd3"
for fold in range(1, CONFIG['n_fold']):
print(f"====== Fold: {fold} ======")
# Create Dataloaders
train_loader, valid_loader = prepare_loaders(fold=fold)
model = PCL_Model_Arch()
model.to(CONFIG['device'])
torch.cuda.empty_cache()
# Define Optimizer and Scheduler
optimizer = AdamW(model.parameters(), lr=CONFIG['learning_rate'], weight_decay=CONFIG['weight_decay'])
scheduler = fetch_scheduler(optimizer)
model, history = run_training(model, optimizer, scheduler,
device=CONFIG['device'],
num_epochs=CONFIG['epochs'],
fold=fold)
del model, history, train_loader, valid_loader
_ = gc.collect()
print()
# + id="XSvm2lLPAf0i"
test.dropna(inplace=True)
# + id="ujNNMM9HAGtl"
valid_dataset = PCLTrainDataset(test, tokenizer=tokenizer, max_length=CONFIG['max_length'],displacemnt=0)
valid_loader = DataLoader(valid_dataset, batch_size=CONFIG['valid_batch_size'],
num_workers=2, shuffle=False, pin_memory=True)
# + id="piuXPwW433gP"
@torch.no_grad()
def valid_fn(model, dataloader, device):
model.eval()
dataset_size = 0
running_loss = 0.0
PREDS = []
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
ids = data['text_ids'].to(device, dtype = torch.long)
mask = data['text_mask'].to(device, dtype = torch.long)
outputs = model(ids, mask)
# outputs = outputs.argmax(dim=1)
# print(len(outputs))
# print(len(np.max(outputs.cpu().detach().numpy(),axis=1)))
PREDS.append(outputs.detach().cpu().numpy())
PREDS = np.concatenate(PREDS)
gc.collect()
return PREDS
# + id="zaxyuvUP3Md0"
def inference(model_paths, dataloader, device):
final_preds = []
for i, path in enumerate(model_paths):
model = PCL_Model_Arch()
model.to(CONFIG['device'])
model.load_state_dict(torch.load(path))
print(f"Getting predictions for model {i+1}")
preds = valid_fn(model, dataloader, device)
final_preds.append(preds)
final_preds = np.array(final_preds)
final_preds = np.mean(final_preds, axis=0)
final_preds= np.argmax(final_preds,axis=1)
return final_preds
# + id="-mbRltZW4Dvf" colab={"base_uri": "https://localhost:8080/", "height": 502, "referenced_widgets": ["1298aaa7c43d4e3bb072ce548e6b4e77", "1f99b286b1ad4397adaeb195d355de94", "482450cb5dd0485390b4b4ac24f0458f", "53dc6ed886334d41a8494300cc532e79", "8e9a0082d4174dc1b90a7bec66cf15d4", "9a718816ae8446aeb39c7cadeeceebea", "05d79d87e30e413781ffa4ae13b46248", "f16e453d244f43739509a723ad9aef2d", "31e5f15b9b0c49a0a6fb6c02bccbb87a", "333c409a971b4948a9e77a623ae4eefc", "d6f2fe3a555e45c3a7d2efe441c12029"]} outputId="153df15d-2d97-49c7-ecee-0df1aa352e2c"
MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-0.bin','/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-1.bin','/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-2.bin','/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-3.bin','/content/drive/MyDrive/ISarcasm/Models/roberta_base/Loss-Fold-4.bin']
preds = inference(MODEL_PATH_2, valid_loader, CONFIG['device'])
# + id="sKQV51HyommQ"
from sklearn.metrics import jaccard_score,f1_score,accuracy_score,recall_score,precision_score,classification_report
def print_statistics(y, y_pred):
accuracy = accuracy_score(y, y_pred)
precision =precision_score(y, y_pred, average='weighted')
recall = recall_score(y, y_pred, average='weighted')
f_score = f1_score(y, y_pred, average='weighted')
print('Accuracy: %.3f\nPrecision: %.3f\nRecall: %.3f\nF_score: %.3f\n'
% (accuracy, precision, recall, f_score))
print(classification_report(y, y_pred))
return accuracy, precision, recall, f_score
# + id="3965a5XAooJx" colab={"base_uri": "https://localhost:8080/"} outputId="7429c1f7-79db-4bce-bfc4-5b7a3b9945db"
print(print_statistics(test['sarcastic'],preds))
# + [markdown] id="RT8hjnxbbfJq"
# ## Prepare submission
# + id="U7HICl8MJQf0"
# !cat task1.txt | head -n 10
# + id="qCjziGtxJRif"
# !cat task2.txt | head -n 10
# + id="GZDLUcYZbhYg"
# !zip submission.zip task1.txt task2.txt
|
Task A/En/Training/Bert-Base-Approaches/roberta+Kimcnn+_f1_CE_loss+_Imbalance_datasampler.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RIVET Python API (pyrivet)
from pyrivet import rivet
import numpy as np
# Let's start with some warmups. We can run RIVET on an existing data file:
# +
# The RIVET distribution comes with some test data files
import os
rivet_location = '../../rivet' #Might be in a different location for you
os.listdir(rivet_location + '/data/Test_Point_Clouds')
# -
computed_file_path = rivet.compute_file(
rivet_location + '/data/Test_Point_Clouds/circle_data_60pts_codensity.txt',
homology=1,
x=20,
y=20)
computed_file_path
# The output when used this way is a path to a file generated by RIVET. We can load the contents of that computed file into memory and run other RIVET commands on it, like computing barcodes.
# +
with open(computed_file_path, 'rb') as f:
computed_data = f.read()
codes = rivet.barcodes(computed_data, [
(45, 0), # 45 degrees, offset 0
(45, 1), # 45 degrees, offset 1
])
codes
# -
# Let's look at one barcode in the list. The format for each bar is Bar(birth, death, multiplicity)
barcode1 = codes[0][1]
barcode1
# A barcode can also be easily converted to a Numpy array:
barcode1.to_array()
# We can also load data directly from memory.
# +
#Equivalent to the 'points1.txt' sample data file in the RIVET distribution
points = rivet.PointCloud(
points = [
(0.3, 1.5),
(4.2, 3.8),
(5.6, 2.3),
(2.9, 5.1),
(3.3, 2.0)
],
second_param_name='birth',
appearance=0,
max_dist=4.1
)
# Now let's take the Betti information
betti = rivet.betti(points)
betti
# -
# We can also provide a function that gives the grade of appearance of each point:
# +
#Equivalent to the 'points2.txt' sample data file in the RIVET distribution
points = rivet.PointCloud(
points = [
(0.3, 1.5, 2.1), # will appear at 1
(4.2, 3.8, 4.9), # will appear at 3.2
(5.6, 2.3, 6), # will appear at 2
(2.9, 5.1, 3.3), # will appear at 4
(3.3, 2.0, 2.5), # will appear at 2.5
(4.1, 1.1, 2.3), # will appear at 2.4
(1.1, 1.3, 1.7), # will appear at 2
],
second_param_name='time',
appearance=[1, 3.2, 2, 4, 2.5, 2.4, 2],
max_dist=3.1 # pyrivet will calculate max_dist if you leave it out
)
# Now let's take the Betti information
betti = rivet.betti(points)
betti
# -
# Now let's look at two persistence modules for molecules.
#
# First, let's see how to compute L2 distance on Hilbert functions of the persistence modules.
# +
# Let's look at H0, binned into 10x10.
aspirin_h0_betti = rivet.betti_file('aspirin-ZINC000000000053.sdf.txt', homology=0, x=10, y=10)
tylenol_h0_betti = rivet.betti_file('tylenol-ZINC000013550868.sdf.txt', homology=0, x=10, y=10)
# We can compute distance between these using Hilbert functions
from pyrivet import hilbert_distance
print("Distance with Hilbert functions:",
hilbert_distance.distance(
aspirin_h0_betti,
tylenol_h0_betti
)
)
# -
# And now let's look at the matching distance.
# +
# We can also compute the matching distance, if we have the full precomputed arrangement
aspirin_h0 = open(rivet.compute_file('aspirin-ZINC000000000053.sdf.txt', homology=0, x=10, y=10), 'rb').read()
tylenol_h0 = open(rivet.compute_file('tylenol-ZINC000013550868.sdf.txt', homology=0, x=10, y=10), 'rb').read()
from pyrivet import matching_distance
print("Distance with matching distance:",
matching_distance.matching_distance(aspirin_h0, tylenol_h0, grid_size=10, normalize=True)
)
# -
# While the RIVET GUI does many things that would be tricky to duplicate in a notebook, we can at least visualize the Hilbert
# functions!
# +
# %matplotlib inline
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(12,8))
plots = fig.subplots(1, 2, sharex=True, sharey=True)
plots[0].imshow(aspirin_h0_betti.graded_rank, origin='lower', aspect='auto', cmap='coolwarm')
plots[1].imshow(tylenol_h0_betti.graded_rank, origin='lower', aspect='auto', cmap='coolwarm')
# -
# Here's an example with a bifiltration:
# +
import imp
imp.reload(rivet)
bifi = rivet.Bifiltration(x_label='time of appearance',
y_label='network distance',
simplices = [
[0],
[3],
[4],
[1],
[2],
[0, 3],
[0, 4],
[3, 4],
[0, 1],
[0, 2],
[1, 2],
[0, 1, 2],
[0, 3, 4]
],
appearances = [
[(0, 0)],
[(1, 0)],
[(1, 0)],
[(0, 1)],
[(0, 1)],
[(1, 0)],
[(1, 0)],
[(1, 0)],
[(0, 1)],
[(0, 1)],
[(0, 1)],
[(1, 2)],
[(2, 1)]
]
)
bifi_betti = rivet.betti(bifi, homology=1)
fig = plt.figure(figsize=(12,8))
plots = fig.subplots(1, 2, sharex=True, sharey=True)
plots[0].imshow(bifi_betti.graded_rank, origin='lower', aspect='auto', cmap='coolwarm')
# -
# **Examples with `metric` and `FIRep` input types coming soon!**
|
example/RIVET Python API Tour.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
epochs = 10
# We don't use the whole dataset for efficiency purpose, but feel free to increase these numbers
n_train_items = 640
n_test_items = 640
# -
# # Part X - Secure Training and Evaluation on MNIST
#
# When building Machine Learning as a Service solutions (MLaaS), a company might need to request access to data from other partners to train its model. In health or in finance, both the model and the data are extremely critical: the model parameters is a business asset while data is personal data which is tightly regulated.
#
# In this context, one possible solution is to encrypt both the model and the data and to train the machine learning model over the encrypted values. This guarantees that the company won't access patients medical records for example and that health facilities won't be able to observe the model to which they contribute. Several encryption schemes exist that allow for computation over encrypted data, among which Secure Multi-Party Computation (SMPC), Homomorphic Encryption (FHE/SHE) and Functional Encryption (FE). We will focus here on Multi-Party Computation (which have been introduced in Tutorial 5) which consists of private additive sharing and relies on the crypto protocols SecureNN and SPDZ.
#
# The exact setting of this tutorial is the following: consider that you are the server and you would like to train your model on some data held by $n$ workers. The server secret shares his model and send each share to a worker. The workers also secret share their data and exchange it between them. In the configuration that we will study, there are 2 workers: alice and bob. After exchanging shares, each of them now has one of their own shares, one share of the other worker, and one share of the model. Computation can now start to privately train the model using the appropriate crypto protocols. Once the model is trained, all the shares can be sent back to the server to decrypt it. This is illustrated with the following figure:
# 
# To give an example of this process, let's assume alice and bob both hold a part of the MNIST dataset and let's train a model to perform digit classification!
#
# Author:
# - <NAME> - Twitter: [@theoryffel](https://twitter.com/theoryffel) · GitHub: [@LaRiffle](https://github.com/LaRiffle)
# # 1. Encrypted Training demo on MNIST
# ## Imports and training configuration
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import time
# -
# This class describes all the hyper-parameters for the training. Note that they are all public here.
# +
class Arguments():
def __init__(self):
self.batch_size = 128
self.test_batch_size = 128
self.epochs = epochs
self.lr = 0.02
self.seed = 1
self.log_interval = 1 # Log info at each batch
self.precision_fractional = 3
args = Arguments()
_ = torch.manual_seed(args.seed)
# -
# Here are PySyft imports. We connect to two remote workers that be call `alice` and `bob` and request another worker called the `crypto_provider` who gives all the crypto primitives we may need.
# +
import syft as sy # import the Pysyft library
hook = sy.TorchHook(torch) # hook PyTorch to add extra functionalities like Federated and Encrypted Learning
# simulation functions
def connect_to_workers(n_workers):
return [
sy.VirtualWorker(hook, id=f"worker{i+1}")
for i in range(n_workers)
]
def connect_to_crypto_provider():
return sy.VirtualWorker(hook, id="crypto_provider")
workers = connect_to_workers(n_workers=2)
crypto_provider = connect_to_crypto_provider()
# -
# ## Getting access and secret share data
#
# Here we're using a utility function which simulates the following behaviour: we assume the MNIST dataset is distributed in parts each of which is held by one of our workers. The workers then split their data in batches and secret share their data between each others. The final object returned is an iterable on these secret shared batches, that we call the **private data loader**. Note that during the process the local worker (so us) never had access to the data.
#
# We obtain as usual a training and testing private dataset, and both the inputs and labels are secret shared.
# +
def get_private_data_loaders(precision_fractional, workers, crypto_provider):
def one_hot_of(index_tensor):
"""
Transform to one hot tensor
Example:
[0, 3, 9]
=>
[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]
"""
onehot_tensor = torch.zeros(*index_tensor.shape, 10) # 10 classes for MNIST
onehot_tensor = onehot_tensor.scatter(1, index_tensor.view(-1, 1), 1)
return onehot_tensor
def secret_share(tensor):
"""
Transform to fixed precision and secret share a tensor
"""
return (
tensor
.fix_precision(precision_fractional=precision_fractional)
.share(*workers, crypto_provider=crypto_provider, protocol="fss", requires_grad=True)
)
transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transformation),
batch_size=args.batch_size
)
private_train_loader = [
(secret_share(data), secret_share(one_hot_of(target)))
for i, (data, target) in enumerate(train_loader)
if i < n_train_items / args.batch_size
]
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True, transform=transformation),
batch_size=args.test_batch_size
)
private_test_loader = [
(secret_share(data), secret_share(target.float()))
for i, (data, target) in enumerate(test_loader)
if i < n_test_items / args.test_batch_size
]
return private_train_loader, private_test_loader
private_train_loader, private_test_loader = get_private_data_loaders(
precision_fractional=args.precision_fractional,
workers=workers,
crypto_provider=crypto_provider
)
# -
# ## Model specification
#
# Here is the model that we will use, it's a rather simple one but [it has proved to perform reasonably well on MNIST](https://towardsdatascience.com/handwritten-digit-mnist-pytorch-977b5338e627)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# ## Training and testing functions
#
# The training is done almost as usual, the real difference is that we can't use losses like negative log-likelihood (`F.nll_loss` in PyTorch) because it's quite complicated to reproduce these functions with SMPC. Instead, we use a simpler Mean Square Error loss.
def train(args, model, private_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(private_train_loader): # <-- now it is a private dataset
start_time = time.time()
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target) <-- not possible here
batch_size = output.shape[0]
loss = ((output - target)**2).sum().refresh()/batch_size
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
loss = loss.get().float_precision()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime: {:.3f}s'.format(
epoch, batch_idx * args.batch_size, len(private_train_loader) * args.batch_size,
100. * batch_idx / len(private_train_loader), loss.item(), time.time() - start_time))
# The test function does not change!
def test(args, model, private_test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in private_test_loader:
start_time = time.time()
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum()
correct = correct.get().float_precision()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct.item(), len(private_test_loader)* args.test_batch_size,
100. * correct.item() / (len(private_test_loader) * args.test_batch_size)))
# ### Let's launch the training !
#
# A few notes about what's happening here. First, we secret share all the model parameters across our workers. Second, we convert optimizer's hyperparameters to fixed precision. Note that we don't need to secret share them because they are public in our context, but as secret shared values live in finite fields we still need to move them in finite fields using `.fix_precision`, in order to perform consistently operations like the weight update $W \leftarrow W - \alpha * \Delta W$.
# +
model = Net()
model = model.fix_precision().share(*workers, crypto_provider=crypto_provider, protocol="fss", requires_grad=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
optimizer = optimizer.fix_precision()
for epoch in range(1, args.epochs + 1):
train(args, model, private_train_loader, optimizer, epoch)
test(args, model, private_test_loader)
# -
# There you are! You just got 75% of accuracy using a tiny fraction of the MNIST dataset, using 100% encrypted training!
# # 2. Discussion
#
# Let's have a closer look to the power of encrypted training by analyzing what we just did.
# ## 2.1 Computation time
#
# First thing is obviously the running time! As you have surely noticed, it is far slower than plain text training. In particular, a iteration over 1 batch of 128 items takes 4.3s while only 4ms in pure PyTorch on my machine. Whereas this might seem like a blocker, just recall that here everything happened remotely and in the encrypted world: no single data item has been disclosed. More specifically, the time to process one item is 33ms which is not that bad. The real question is to analyze when encrypted training is needed and when only encrypted prediction is sufficient. 33ms to perform a prediction is completely acceptable in a production-ready scenario for example!
#
# One main bottleneck is the use of costly activation functions: relu activation with SMPC are very costly because it uses private comparison, which we implement here using [Function Secret Sharing](https://arxiv.org/abs/2006.04593). As an illustration, if we replace relu with a quadratic activation as it is done in several papers on encrypted computation like CryptoNets, we drop from 4.3s to 2.9s.
#
# As a general rule, the key idea to take away is to encrypt only what's necessary, and this tutorial shows you how simple it can be.
# ## 2.2 Backpropagation with SMPC
#
# You might wonder how we perform backpropagation and gradient updates although we're working with integers in finite fields. To do so, we have developed a new syft tensor called AutogradTensor. This tutorial used it intensively although you might have not seen it! Let's check this by printing a model's weight:
model.fc3.bias
# And a data item:
first_batch, input_data = 0, 0
private_train_loader[first_batch][input_data]
# As you observe, the AutogradTensor is there! It lives between the torch wrapper and the FixedPrecisionTensor which indicate that the values are now in finite fields. The goal of this AutogradTensor is to store the computation graph when operations are made on encrypted values. This is useful because when calling backward for the backpropagation, this AutogradTensor overrides all the backward functions that are not compatible with encrypted computation and indicates how to compute these gradients. For example, regarding multiplication which is done using the Beaver triples trick, we don't want to differentiate the trick all the more that differentiating a multiplication should be very easy: $\partial_b (a \cdot b) = a \cdot \partial b$. Here is how we describe how to compute these gradients for example:
#
# ```python
# class MulBackward(GradFunc):
# def __init__(self, self_, other):
# super().__init__(self, self_, other)
# self.self_ = self_
# self.other = other
#
# def gradient(self, grad):
# grad_self_ = grad * self.other
# grad_other = grad * self.self_ if type(self.self_) == type(self.other) else None
# return (grad_self_, grad_other)
# ```
#
# You can have a look at `tensors/interpreters/gradients.py` if you're curious to see how we implemented more gradients.
#
# In terms of computation graph, it means that a copy of the graph remains local and that the server which coordinates the forward pass also provide instructions on how to do the backward pass. This is a completely valid hypothesis in our setting.
# ## 2.3 Security guarantees
#
#
# Last, let's give a few hints about the security we're achieving here: adversaries that we are considering here are **honest but curious**: this means that an adversary can't learn anything about the data by running this protocol, but a malicious adversary could still deviate from the protocol and for example try to corrupt the shares to sabotage the computation. Security against malicious adversaries in such SMPC computations including private comparison is still an open problem.
#
# In addition, even if Secure Multi-Party Computation ensures that training data wasn't accessed, many threats from the plain text world are still present here. For example, as you can make request to the model (in the context of MLaaS), you can get predictions which might disclose information about the training dataset. In particular you don't have any protection against membership attacks, a common attack on machine learning services where the adversary wants to determine if a specific item was used in the dataset. Besides this, other attacks such as unintended memorization processes (models learning specific feature about a data item), model inversion or extraction are still possible.
#
# One general solution which is effective for many of the threats mentioned above is to add Differential Privacy. It can be nicely combined with Secure Multi-Party Computation and can provide very interesting security guarantees. We're currently working on several implementations and hope to propose an example that combines both shortly!
# # Conclusion
#
# As you have seen, training a model using SMPC is not complicated from a code point of view, even we use rather complex objects under the hood. With this in mind, you should now analyse your use-cases to see when encrypted computation is needed either for training or for evaluation. If encrypted computation is much slower in general, it can also be used carefully so as to reduce the overall computation overhead.
#
# If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
#
# ### Star PySyft on GitHub
#
# The easiest way to help our community is just by starring the repositories! This helps raise awareness of the cool tools we're building.
#
# - [Star PySyft](https://github.com/OpenMined/PySyft)
#
# ### Pick our tutorials on GitHub!
#
# We made really nice tutorials to get a better understanding of what Federated and Privacy-Preserving Learning should look like and how we are building the bricks for this to happen.
#
# - [Checkout the PySyft tutorials](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials)
#
#
# ### Join our Slack!
#
# The best way to keep up to date on the latest advancements is to join our community!
#
# - [Join slack.openmined.org](http://slack.openmined.org)
#
# ### Join a Code Project!
#
# The best way to contribute to our community is to become a code contributor! If you want to start "one off" mini-projects, you can go to PySyft GitHub Issues page and search for issues marked `Good First Issue`.
#
# - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
#
# ### Donate
#
# If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
#
# - [Donate through OpenMined's Open Collective Page](https://opencollective.com/openmined)
|
examples/tutorials/Part 12 bis - Encrypted Training on MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''ml_env'': conda)'
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="chpCmd7enwY1" outputId="bf6f8ead-4e5f-417b-b269-8a7e250712b6"
from google.colab import drive
drive.mount('/content/drive')
# + id="X2uwWqiGumDE" outputId="4d8a55ef-1e27-41e3-8635-4bedbd7fc247" colab={"base_uri": "https://localhost:8080/"}
# !pip install --upgrade tables
# + [markdown] id="ujJny8FGirKP"
# # Loads in the dataframes
# + id="2gvoHwiOirKT"
import pandas as pd
import numpy as np
import warnings
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
# %matplotlib inline
# loads the dataframes
higgs_df = pd.read_hdf('/content/drive/MyDrive/Colab Notebooks/ttH.hd5')
semi_leptonic_df = pd.read_hdf('/content/drive/MyDrive/Colab Notebooks/ttsemileptonic.hd5')
# labels signal vs background
semi_leptonic_df["signal"] = 0
higgs_df["signal"] = 1
# combines the dataframes and randomly shuffles the rows
full_df = semi_leptonic_df.append(higgs_df, ignore_index=True)
full_df = shuffle(full_df)
# + id="mLUQ7_0kirKV"
event_cols = [
"BiasedDPhi",
"DiJet_mass",
"HT",
"InputMet_InputJet_mindPhi",
"InputMet_pt",
"MHT_pt",
"MinChi",
"MinOmegaHat",
"MinOmegaTilde",
"ncleanedBJet",
"ncleanedJet",
]
object_cols = [
"cleanedJet_pt",
"cleanedJet_area",
"cleanedJet_btagDeepB",
"cleanedJet_chHEF",
"cleanedJet_eta",
"cleanedJet_mass",
"cleanedJet_neHEF",
"cleanedJet_phi",
]
df = full_df[event_cols + object_cols + ["signal"]]
# + [markdown] id="5zAC_PEfirKW"
# # Centers the Jets
# + colab={"base_uri": "https://localhost:8080/", "height": 116, "referenced_widgets": ["60dd2a12260148cf966957ed0742d7e8", "43889b89918e483ba503b7d83fc78024", "51f677f65683450aa5be5bb37758a4f7", "207bd682b0cd482fac70e06e45276cc4", "884ca7c3c110440596e46902a46fbdbe", "2a068ab339004c81bdbcf928f55f0aa0", "c0c0802b6bb34f87a08ec6fdeacac05e", "1649ad46e34a4505ac75072f2859157a", "492b4c4ee0de44909ba91f5bdfe4cc07", "<KEY>", "dc8c024a068347999cbf2e85032da047", "4f8b6fcd2e024c6980e20381fa789cc6", "e62007d39a6c4a3f9e625e778a20932b", "<KEY>", "<KEY>", "<KEY>", "0d3d4183f0b74c92a04d5d59a4c19d27", "9ab9d8b4d3634d9091a9fef4a6cb45e2", "620806b79f984a0288958100c29aeec9", "20df20b4d08a41f48b0ba049edc80e7c", "<KEY>", "122eaffea8c64351ae90c166d8d61cc3"]} id="ssPG6Y20irKW" outputId="2b621ad0-9db8-4e2c-aad8-685a8feef964"
from tqdm.auto import tqdm # for notebooks
tqdm.pandas()
def deltaphi(phi1, phi2):
deltaphilist = [phi1 - phi2, phi1 - phi2 + np.pi * 2.0, phi1 - phi2 - np.pi * 2.0]
sortind = np.argsort(np.abs(deltaphilist))
return deltaphilist[sortind[0]]
def center_phi(pts, phis):
leading_jet_phi = phis[0]
total_jet_pt = sum(pts)
# pT weighted average of phi
phi_c = np.average(
[
(i * deltaphi(j, leading_jet_phi)) / total_jet_pt
for i, j in zip(pts, phis)
]
)
return phis - phi_c
def center_eta(pts, etas):
total_jet_pt = sum(pts)
# pT weighted average of eta
eta_c = np.average([(i * j) / total_jet_pt for i, j in zip(pts, etas)])
return etas - eta_c
# centers the jets
print('Centering jets in eta axis')
df['cleanedJet_eta'] = df.progress_apply(lambda x: center_eta(x['cleanedJet_pt'], x['cleanedJet_eta']), axis=1)
print('Centering jets in phi axis')
df['cleanedJet_phi'] = df.progress_apply(lambda x: center_phi(x['cleanedJet_pt'], x['cleanedJet_phi']), axis=1)
# + [markdown] id="oNV92aBJirKX"
# # Rotates the Jets
# + colab={"base_uri": "https://localhost:8080/", "height": 116, "referenced_widgets": ["d9c849710dbf4c45af0f6d63513add1f", "4c24a6dfeec54890b35c3da7b385506a", "c38dbe6a46924becb017fcd48c63a615", "b5525c3f1ebc4b8199b6f56c94f1c480", "6705861edf384f19b194922b0424d5b0", "<KEY>", "c428390c14cc4dd5bd9a3758d4eb4a61", "<KEY>", "63ed0acde5b344b29d7ebdf8a4c18ef3", "<KEY>", "6566ee52a703468098b383e494f2b759", "570278fa6c524063a2d83714d1f4e559", "60751fb39f3f431ebdf383146fee1c55", "b879ce93a45c472995cd36f17cab5865", "2a0ac28eacb340d5882d57790f2069af", "7e8c2f134e484611a9c4036e90a7daf5", "0ee8ebf30c6149aead2439ad5cac78a9", "d71475ac690e4d218b659b03ac099718", "e5869f709f644fa8a9e7999338689e69", "5fceb6b0e0ff422aa40e999d8f372f63", "<KEY>", "14d8604f27224512aa3826c7c2078862"]} id="weYbuWEhirKX" outputId="0b0df984-bb8d-4907-dfc7-803dbabf2642"
def principal_axis(etas, phis, pts):
M11 = np.sum([pt * eta * phi for pt, eta, phi in zip(pts, etas, phis)])
M20 = np.sum([pt * eta ** 2 for pt, eta in zip(pts, etas)])
M02 = np.sum([pt * phi ** 2 for pt, phi in zip(pts, etas)])
# tan(theta) angle to rotate to the principal axis in the image
tan_theta = (
2 * M11 / (M20 - M02 + np.sqrt(4 * M11 * M11 + (M20 - M02) * (M20 - M02)))
)
return tan_theta
def rotate_eta(pts, etas, phis):
tan_theta = principal_axis(etas, phis, pts)
rot_eta = etas * np.cos(np.arctan(tan_theta)) + phis * np.sin(np.arctan(tan_theta))
return rot_eta
def rotate_phi(pts, etas, phis):
tan_theta = principal_axis(etas, phis, pts)
rot_phi = -etas * np.sin(np.arctan(tan_theta)) + phis * np.cos(np.arctan(tan_theta))
rot_phi = np.unwrap(rot_phi) # fixes the angle phi to be between (-Pi,Pi]
return rot_phi
# rotates the jets
print('Rotating jets in eta axis')
df['cleanedJet_eta'] = df.progress_apply(lambda x: rotate_eta(x['cleanedJet_pt'], x['cleanedJet_eta'], x['cleanedJet_phi']), axis=1)
print('Rotating jets in phi axis')
df['cleanedJet_phi'] = df.progress_apply(lambda x: rotate_phi(x['cleanedJet_pt'], x['cleanedJet_eta'], x['cleanedJet_phi']), axis=1)
# + [markdown] id="06x_BS2firKY"
# # Splits data into event / object dataframes and train / test dataframes
# + id="bmhXDyJjirKZ" colab={"base_uri": "https://localhost:8080/", "height": 452} outputId="e75425cb-7823-483a-a5f0-ddfcdb988c37"
from sklearn.preprocessing import StandardScaler
from keras.preprocessing import sequence
scaler = StandardScaler()
# columns that should not be transformed
untransformed_cols = ["ncleanedBJet", "ncleanedJet", "BiasedDPhi", "signal"]
transformed_cols = list(set(event_cols) - set(untransformed_cols))
# takes the log of each column to remove skewness
for col_name in event_cols:
if col_name in transformed_cols:
df[col_name] = np.log(df[col_name])
# splits data into training and validation
X, y = df.drop("signal", axis=1), df["signal"]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=1)
# divides training data into object level and event level features
event_X_train, event_X_test = X_train[event_cols], X_test[event_cols]
object_X_train, object_X_test = X_train[object_cols], X_test[object_cols]
# scales features so they all have the same mean and variance
event_X_train[event_cols] = scaler.fit_transform(event_X_train[event_cols].values)
event_X_test[event_cols] = scaler.transform(event_X_test[event_cols].values)
max_jets = df["ncleanedJet"].max()
# pads input sequences with zeroes so they're all the same length
for col in object_cols:
object_X_train[col] = sequence.pad_sequences(
object_X_train[col].values, padding="post", dtype="float32"
).tolist()
object_X_test[col] = sequence.pad_sequences(
object_X_test[col].values, padding="post", dtype="float32"
).tolist()
print(
"Removed Columns:",
[col for col in full_df.columns if col not in set(event_cols + object_cols)],
)
X_train.head()
# + [markdown] id="WFznb-h_irKa"
# # Expands the lists in the df so they can be used as input
# + id="SH8pax3lirKa"
temp_X_train = np.ndarray(
shape=(object_X_train.shape[0], max_jets, len(object_X_train.columns))
)
temp_X_test = np.ndarray(
shape=(object_X_test.shape[0], max_jets, len(object_X_test.columns))
)
# iterate through each column in the df and iterate through each value in the column and then iterate through each item in the list and then put that item in the nparray
for i, col in enumerate(object_cols):
train_col = object_X_train[col]
test_col = object_X_test[col]
for j, event in enumerate(train_col):
for k, item in enumerate(event):
temp_X_train[j][k][i] = item
for j, event in enumerate(test_col):
for k, item in enumerate(event):
temp_X_test[j][k][i] = item
object_X_train = temp_X_train
object_X_test = temp_X_test
# + [markdown] id="rDfCA6DjN9Be"
# # Scales the data
# + id="IihZg5ahN9QR"
from sklearn.preprocessing import RobustScaler
nz = np.any(object_X_train, -1)
object_X_train[nz] = RobustScaler().fit_transform(object_X_train[nz])
nz = np.any(object_X_test, -1)
object_X_test[nz] = RobustScaler().fit_transform(object_X_test[nz])
# + [markdown] id="c-_sVf42L9u6"
# # Saves numpy arrays so I don't have to preprocess each time
# + id="yiu_DI4ML9OP"
np.save('/content/drive/MyDrive/RNN_classifier/object_X_train2.npy', object_X_train)
np.save('/content/drive/MyDrive/RNN_classifier/object_X_test2.npy', object_X_test)
# + [markdown] id="ZZH_t5B-MKE3"
# # Loads numpy arrays
# + id="j_6xk5LXLrri"
object_X_train = np.load('/content/drive/MyDrive/RNN_classifier/object_X_train.npy')
object_X_test = np.load('/content/drive/MyDrive/RNN_classifier/object_X_test.npy')
# + [markdown] id="JmkyoF7qirKb"
# # Plots the jet positions as a scatter plot to visualize the transformations
# + id="DW5q4b-XirKb" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="56eef5a5-9d18-4736-e73b-bcca34db01d4"
import matplotlib.pyplot as plt
from numpy.random import randint
i = randint(0, object_X_train.shape[0])
# eta, phi
plt.scatter(object_X_train[:, :, 7], object_X_train[:, :, 4], s=0.1)
# + [markdown] id="c-r3qlxjirKc"
# # Hyperparameters
# + id="LF6k4uMKirKc" colab={"base_uri": "https://localhost:8080/"} outputId="8f22b49a-80ec-46e8-ea29-ed46b490ab12"
# binary classifier neural network
import tensorflow as tf
from tensorflow import keras
import keras.backend as K
from sklearn.utils import class_weight
# hyperparameters
lr = 0.01
activation = "relu"
batch_size = 64
epochs = 10
patience = 10
loss = "binary_crossentropy"
lstm_l2 = 0#1e-6
mlp_l2 = 0#1e-4
optimizer = keras.optimizers.Adam(
learning_rate=lr,
)
# metrics
def f1_score(y_true, y_pred): # taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon())
return f1_val
METRICS = [
keras.metrics.BinaryAccuracy(name="accuracy"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="AUC"),
f1_score,
]
# weighting the classes lowers accuracy and precision but increases recall and f1 score
# class_weights = [1/float(full_df['xs_weight'].loc[full_df['signal'] == _class].mode()) for _class in np.unique(y_train)]
# class_weights = [i / min(class_weights) for i in class_weights]
class_weights = class_weight.compute_class_weight(
class_weight="balanced", classes=np.unique(y_train), y=y_train
)
class_weights = {l: c for l, c in zip(np.unique(y_train), class_weights)}
print(class_weights)
# + [markdown] id="zowVOAG4IqhW"
# # Callbacks
# + id="Y0YJf6YIIsTk"
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
verbose=1,
patience=patience,
mode="auto",
restore_best_weights=True,
)
# saves the network at regular intervals so you can pick the best version
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath="/content/drive/MyDrive/RNN_classifier/best_model.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
)
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
patience=3,
mode='auto'
)
trainlog_filepath = "/content/drive/MyDrive/RNN_classifier/trainlog.csv"
csv_logger = keras.callbacks.CSVLogger(
trainlog_filepath,
separator=',',
append = False,
)
# + [markdown] id="Iz7N3I9kirKc"
# # Defines and compiles the model
# + id="RAzSpG9airKc"
from keras.models import Sequential
from keras.layers import Dense, LSTM, Concatenate, BatchNormalization
from keras.regularizers import l2
from keras import Model
DNN_model = Sequential(
[
Dense(200, input_shape=(len(event_cols),), activation=activation, kernel_regularizer=l2(mlp_l2)),
BatchNormalization(),
]
)
RNN_model = Sequential(
[
LSTM(
100,
input_shape=(max_jets, len(object_cols)),
activation="tanh",
unroll=False,
recurrent_dropout=0.2,
kernel_regularizer=l2(lstm_l2),
return_sequences=True,
),
LSTM(
100,
activation='tanh',
unroll=False,
kernel_regularizer=l2(lstm_l2),
recurrent_dropout=0.2),
]
)
merged = Concatenate()([DNN_model.output, RNN_model.output])
z = Dense(200, activation=activation, kernel_regularizer=l2(mlp_l2))(merged)
z = BatchNormalization()(z)
z = Dense(100, activation=activation, kernel_regularizer=l2(mlp_l2))(z)
z = BatchNormalization()(z)
z = Dense(100, activation=activation, kernel_regularizer=l2(mlp_l2))(z)
z = BatchNormalization()(z)
z = Dense(100, activation=activation, kernel_regularizer=l2(mlp_l2))(z)
z = BatchNormalization()(z)
z = Dense(100, activation=activation, kernel_regularizer=l2(mlp_l2))(z)
z = BatchNormalization()(z)
z = Dense(100, activation=activation, kernel_regularizer=l2(mlp_l2))(z)
z = BatchNormalization()(z)
z = Dense(1, activation="sigmoid")(z)
model = Model(inputs=[DNN_model.input, RNN_model.input], outputs=z)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=METRICS)
# + [markdown] id="mCXQcBsrMYKU"
# # Loads pre-trained model
# + id="nFaiJG_4M739"
model = keras.models.load_model(
'/content/drive/MyDrive/RNN_classifier/best_model.h5',
custom_objects={'f1_score': f1_score})
# + [markdown] id="wdXMt--3irKd"
# # Trains the model
# + colab={"base_uri": "https://localhost:8080/"} id="0sC1eUjC2L1H" outputId="8151bdfc-1ea9-4e95-940a-a61d11e4585e"
history = model.fit(
[event_X_train, object_X_train],
y_train,
batch_size=batch_size,
class_weight=class_weights,
epochs=100,
callbacks=[early_stopping, checkpoint, reduce_lr, csv_logger],
validation_data=([event_X_test, object_X_test], y_test),
verbose=1,
)
# + id="xyHSm6ReniLd" outputId="bf1679c1-742d-4b51-eb47-e274450d1489" colab={"base_uri": "https://localhost:8080/", "height": 398}
history = model.fit(
[event_X_train, object_X_train],
y_train,
batch_size=batch_size,
class_weight=class_weights,
epochs=100,
callbacks=[early_stopping, checkpoint, reduce_lr, csv_logger],
validation_data=([event_X_test, object_X_test], y_test),
verbose=1,
)
# + [markdown] id="dYsZoohm73aT"
# # Evaluates the model
# + id="7avvPpySKxjM"
y_pred = model.predict([event_X_test, object_X_test])
# + id="kHp6wfDaKlpp"
import matplotlib.pyplot as plt
def plot_metrics(history):
metrics = ['loss', 'accuracy', 'precision', 'recall', 'AUC']
fig = plt.figure(figsize=(20, 20))
for n, metric in enumerate(metrics):
name = metric.replace("_"," ")
plt.subplot(3,2,n+1)
plt.plot(history.epoch, history.history[metric], label='Train')
plt.plot(history.epoch, history.history['val_'+metric],
linestyle="--", label='Val')
plt.xlabel('Epoch')
plt.ylabel(name)
plt.legend()
plot_metrics(history)
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="qDARK0csKouy" outputId="3b565210-ee3e-4d46-9585-d5ddbf05b062"
from sklearn.metrics import confusion_matrix
import seaborn as sns
def plot_cm(labels, predictions, p=0.5):
cm = confusion_matrix(labels, predictions > p, normalize='true')
plt.figure(figsize=(10, 10))
sns.heatmap(cm, annot=True, xticklabels=['signal', 'background'], yticklabels=['signal', 'background'])
plt.title('Confusion matrix')
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plot_cm(y_test, y_pred)
|
notebooks/RNN_models/RNN_12_preprocessing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rhapis97/Practice_AI/blob/main/210707_NLP_Encoding%2BDecoding.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="U0vxVV-7Soyk" outputId="3f6f28ed-62f9-42a3-f6b4-0f5afcf8670e"
# !git clone https://github.com/SOMJANG/Mecab-ko-for-Google-Colab.git
# + colab={"base_uri": "https://localhost:8080/"} id="GOnMu5TJX6bp" outputId="cb42334a-81f5-4600-f824-181d5828a0f4"
# ls # 현재 폴더의 리스트 보여주세요
# + colab={"base_uri": "https://localhost:8080/"} id="ZNWU_s4rYFIS" outputId="2f3fcc65-46da-4704-e036-7ad0cc834fe0"
# cd Mecab-ko-for-Google-Colab/ # cd: change directory
# + colab={"base_uri": "https://localhost:8080/"} id="c5xMpSzGYRAd" outputId="064b5c06-ca22-4968-fa9f-60cb620b6f4d"
# !bash install_mecab-ko_on_colab190912.sh
# + colab={"base_uri": "https://localhost:8080/"} id="wKhiFK-6YXqe" outputId="272f2327-1f91-4073-d0ae-3add095946dc"
# !pip install konlpy
# + id="jfUxLywCaWT8"
from konlpy.tag import Mecab
# + id="jK5HSXQmabbY"
mecab = Mecab()
# + colab={"base_uri": "https://localhost:8080/"} id="ujVYk5_haepv" outputId="1925c627-68fe-47ab-ba30-05ffbc2f9d3a"
print(mecab.morphs('자연어처리가너무재밌어서밥먹는것도가끔까먹어요'))
# + id="GqQOlET_akor"
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Q-iEA2UUb5SN" outputId="c83f7067-d0b1-47a1-f8e8-d50ab9b68cf0"
pwd # 현재 위치
# + colab={"base_uri": "https://localhost:8080/"} id="xn2RVGr2b81g" outputId="2355551c-f3e9-428b-c15d-68076dd31725"
# cd ../ # 상위폴더로 이동
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="dr5DkcQGb-ox" outputId="637616a8-f42e-4e03-c5b6-77ccf5fdf30e"
pwd
# + id="W_A2m5NLa-7I"
path_to_file = '/content/drive/MyDrive/백_data/korean-english-park.train.ko'
# + colab={"base_uri": "https://localhost:8080/"} id="x0aQkGXEcqlz" outputId="5c0629c6-70cd-4cb7-f1b5-f42daafa1aa9"
with open(path_to_file, 'r', encoding='utf-8') as f:
raw = f.read().splitlines() # 라인별로 split
print('Data Size', len(raw))
print('Example')
for sen in raw[0:100][::20]: # 인덱스 0, 20, 40, 60, 80에 해당하는 데이터 - raw[0:100:20]와 같다
print('>>', sen)
# + colab={"base_uri": "https://localhost:8080/"} id="re9nQqcSeMmi" outputId="4f4b0156-2e89-4bde-a2e8-19fb7dfda42d"
print(len(raw[3]))
print(type(raw))
print(type(raw[3]))
# + colab={"base_uri": "https://localhost:8080/"} id="hUBBfgyviCVH" outputId="5ea265ad-f520-4fa7-9b6e-9f005e21fd6e"
min_len = 999
max_len = 0
sum_len = 0
for sen in raw:
length = len(sen)
if min_len > length:
min_len = length
if max_len < length:
max_len = length
sum_len += length
print('문장의 최단 길이:', min_len)
print('문장의 최장 길이:', max_len)
print('문장의 평균 길이:', sum_len//len(raw))
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="cAFXCLpyiopb" outputId="5a964fff-8f78-420e-f642-7613c8ab8d07"
sentence_length = np.zeros((max_len), dtype=np.int) # 최장 길이로 0 배열 만들기
for sen in raw:
sentence_length[len(sen)-1] += 1 # 길이에 따른 문장 개수 세기
plt.bar(range(max_len), sentence_length, width=1.0)
plt.title('Sentence Length Distribution') # 해당 문장 길이(x)인 문장 개수(y)
# + id="FaZ-3egxi7eT"
def check_sentence_with_length(raw, length):
count = 0
for sen in raw:
if len(sen) == length:
print(sen)
count += 1
if count > 100: return
# + colab={"base_uri": "https://localhost:8080/"} id="6LvHzROtjyJI" outputId="f6086a14-1fd8-4177-e2a3-8119d86023fe"
check_sentence_with_length(raw, 3)
# + colab={"base_uri": "https://localhost:8080/"} id="741-ZqP4j2LR" outputId="075b15e7-6773-4627-e326-45cbbe0a14e0"
for idx, _sum in enumerate(sentence_length):
if _sum > 1500:
print('Outlier index:', idx+1)
# + colab={"base_uri": "https://localhost:8080/"} id="3LShcsNSkQQS" outputId="22331d5c-9bd9-4a76-b3c3-d9da31873f53"
# 문장길이가 11
check_sentence_with_length(raw, 11) # 중복 문장이 많다.
# + colab={"base_uri": "https://localhost:8080/"} id="yoOjqnjGkhu1" outputId="6e8b36a8-3b2e-4da4-c38f-8480dc7cc790"
min_len = 999
max_len = 0
sum_len = 0
cleaned_corpus = list(set(raw)) # set을 통해 문장 중복 없애기
print('Data Size:', len(cleaned_corpus))
for sen in cleaned_corpus:
length = len(sen)
if min_len > length: min_len = length
if max_len < length: max_len = length
sum_len += length
print('문장의 최단 길이:', min_len)
print('문장의 최장 길이:', max_len)
print('문장의 평균 길이:', sum_len//len(cleaned_corpus))
# + colab={"base_uri": "https://localhost:8080/"} id="HN3ncUguk4vF" outputId="0ec0c650-3b87-4de2-d2d0-30b1db52c833"
check_sentence_with_length(cleaned_corpus, 11) # 문장 중복이 사라진 것을 확인할 수 있다.
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="uKEm9EBBlvQM" outputId="5b6b909a-4452-4c32-9db6-ae00ef353ff6"
max_len = 150
min_len = 10
# 10 ~ 150 사이의 문장분포
filtered_corpus = [s for s in cleaned_corpus if (len(s) < max_len) & (len(s) >= min_len)]
sentence_length = np.zeros((max_len), dtype=np.int)
for sen in filtered_corpus:
sentence_length[len(sen)-1] += 1
plt.bar(range(max_len), sentence_length, width=1.0)
plt.title('Sentence Length Distribution')
plt.show()
# + id="kGk97k3imm2R"
# 삐쭉삐쭉한 부분을 다 정제하고 범위도 정해주었더니 이전보다 안정된 그래프를 그린다.
# + [markdown] id="eceuaIZJnif4"
# ## 공백 기반 토큰화
# + id="Ayb2Cs2um7c3"
def tokenize(corpus):
tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='') # 토크나이징
tokenizer.fit_on_texts(corpus)
tensor = tokenizer.texts_to_sequences(corpus) # 벡터화(숫자화)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post') # 뒤쪽으로 패딩 (앞쪽: padding='pre')
return tensor, tokenizer
# + colab={"base_uri": "https://localhost:8080/"} id="YaeoCfZ_oXch" outputId="98ae4596-f82f-4051-d7ea-35cd60aedb3d"
# 정제된 데이터를 공백 기반으로 토큰화하여 저장하는 코드를 직접 작성해보세요~
split_corpus = []
for kor in filtered_corpus:
# 코드를 작성하세요.
split_corpus.append(kor.split())
print(split_corpus[0]) # 공백을 기준으로 문장을 잘 잘라주고 있다.
# + id="-mCi_FvfonRy"
split_tensor, split_tokenizer = tokenize(split_corpus)
# + colab={"base_uri": "https://localhost:8080/"} id="ygI_Gqv2pMZk" outputId="d1fc0956-ec64-413d-8bb9-489d0271faeb"
print('Split Vocab Size:', len(split_tokenizer.index_word))
# + colab={"base_uri": "https://localhost:8080/"} id="h6RJTlD0wBfu" outputId="0a257748-f777-4080-d45a-9c6e7cf738b4"
split_tensor, split_tokenizer = tokenize(filtered_corpus)
print('Split Vocab Size:', len(split_tokenizer.index_word))
# + colab={"base_uri": "https://localhost:8080/"} id="SAc_cH7cwOd0" outputId="056d714b-047b-4306-d276-260ad27a8d95"
for idx, word in enumerate(split_tokenizer.word_index):
print(idx, ':', word)
if idx > 10: break
# + id="e6qnSRw4wj-U"
# 밝혔다 → 밝 + 혔다 / 원형이 밝히다 or 밝다 로 다양하게 해석될 수 있다.(중의성)
# 따라서 의미로 토큰화하는 mecab의 형태소 분석 이용해보자
# + [markdown] id="ge7X20Zs3Zkq"
# ## 형태소 토큰화
# + id="fADAjcPrxNt9"
# 위에서 사용한 코드를 활용해 MeCab 단어 사전을 만들어 보세요.
# Hint: mecab.morphs()를 사용해서 형태소 분석을 합니다.
def mecab_split(sentence):
## 코드를 작성하세요
return mecab.morphs(sentence)
mecab_corpus = []
for kor in filtered_corpus:
## 코드를 작성하세요
mecab_corpus.append(mecab_split(kor))
# + colab={"base_uri": "https://localhost:8080/"} id="8D7pxCt0x-ze" outputId="f79863d5-bbf4-4b20-dd1e-d492714a5000"
print(mecab_corpus[0]) # 형태소 단위로 분리하고 있다.
# + colab={"base_uri": "https://localhost:8080/"} id="feruK9eBxqWw" outputId="8e8fd45c-74ed-45c3-8a9e-2fab1d9a4511"
mecab_tensor, mecab_tokenizer = tokenize(mecab_corpus)
print('MeCab Vocab Size:', len(mecab_tokenizer.index_word))
# + colab={"base_uri": "https://localhost:8080/"} id="F13f9eBiyHWo" outputId="627370d8-27cc-42b0-9012-d28d2af90d32"
print(mecab_tokenizer.index_word) # 형태소와 인덱스가 딕셔너리 형태로 짝지어져 있다
# + id="qdAghtHAy3Y1"
# 위의 이러한 과정을 인코딩이라고 부른다.
# 이제 다시 복구하는 디코딩
# + [markdown] id="-GRq6kI83S-z"
# ## Decoding(Detokenizer)
# + [markdown] id="rj8kwu-v3gQm"
# 1. tokenizer.sequences_to_texts() 함수를 사용하여 Decoding
# 2. tokenizer.index_word를 사용하여 Decoding
#
# 두 가지 방법을 사용하여 mecab_tensor[100]을 원문으로 되돌려 봅시다.
# (여기서는 띄어쓰기는 고려하지 않습니다!)
# + colab={"base_uri": "https://localhost:8080/"} id="9IxXteN63UF4" outputId="7fddea63-916c-40e3-bc05-f136c20188c0"
# Case 1: tokenizer.sequences_to_texts()
texts = mecab_tokenizer.sequences_to_texts([mecab_tensor[100]])[0]
print(texts)
# + colab={"base_uri": "https://localhost:8080/"} id="DJ05fS7a33h0" outputId="8d4c52b0-e731-4c88-b63a-d087a3b2b251"
# Case 2: tokenizer.index_word()
sentence = ''
for w in mecab_tensor[100]:
if w == 0: continue
sentence += mecab_tokenizer.index_word[w] + ' '
print(sentence)
# + id="YedUfboY4t3v"
|
210707_NLP_Encoding+Decoding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from cast_to_xarray import *
import cartopy.crs as ccrs
import cartopy.feature as cf
# +
data_dir = 'data_cruise1'
cast4 = cast_to_xarray(data_dir+'/Station4_TSwift_loop_filter_teos10_bin.cnv', '4')
cast5 = cast_to_xarray(data_dir+'/Station5_loop_filter_teos10_bin.cnv', '5')
castWF = cast_to_xarray(data_dir+'/StationWF_loop_filter_teos10_bin.cnv', 'WF')
castRace = cast_to_xarray(data_dir+'/TheRace_loop_filter_teos10_bin.cnv', 'Race')
# +
# combine all the casts into one xarray dataset
# combine in the order visited
ctdsection = xr.concat([castWF, cast5, cast4, castRace],"cast")
#ctdsection
# -
extent = [-74,-71,40.3,41.7]
# Download ETOPO1 topography for the domain(this may take a minute)
url = 'http://geoport.whoi.edu/thredds/dodsC/bathy/etopo1_bed_g2'
bat = xr.open_dataset(url)
bat = bat.sel(lon=slice(extent[0],extent[1]),lat=slice(extent[2],extent[3]))
# +
projection = ccrs.PlateCarree()
fig, ax = plt.subplots(figsize=(14, 17),
subplot_kw=dict(projection=projection))
ax.set_extent(extent)
# Plot topography
bat['topo'].plot.contourf(
ax=ax,levels=np.linspace(-500,100,10),add_colorbar=False,extend='neither',cmap='Blues_r',transform=projection)
_ = ax.add_feature(cf.NaturalEarthFeature('physical', 'land', '10m',
edgecolor='face', facecolor='0.2'),
zorder=3)
plt.plot(ctdsection.longitude.values,ctdsection.latitude.values, color=([1, 1, 0]))
# plot CTD stations
ctdsection.plot.scatter('longitude', 'latitude', color='tab:red',transform=projection, s=60, zorder=3)
plt.text(ctdsection.longitude.values[0],ctdsection.latitude.values[0]-.09,ctdsection.station.values[0],color='k',transform=projection,zorder=1)
plt.text(ctdsection.longitude.values[1],ctdsection.latitude.values[1]-.09,ctdsection.station.values[1],color='k',transform=projection,zorder=1)
plt.text(ctdsection.longitude.values[2],ctdsection.latitude.values[2]-.09,ctdsection.station.values[2],color='k',transform=projection,zorder=1)
plt.text(ctdsection.longitude.values[3],ctdsection.latitude.values[3]-.09,ctdsection.station.values[3],color='k',transform=projection,zorder=1)
# plot ADCP track
gl = ax.gridlines(draw_labels=True,linewidth=1, color='gray', alpha=0.5, linestyle='--')
plt.rcParams.update({'font.size': 18})
plt.savefig("Cruise1_map.png")
# -
ctdsection
|
Cruise1_map.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <img src="img/turtlebot.png" alt="TurtleBot" style="width: 200px;" align="right"/>
#
# # Try a TurtleBot
#
# Play and learn with a TurtleBot robot and ROS.
#
# ## Code Examples
# * [Motion and odometry](Motion and Odometry.ipynb)
# * [Camera](Camera.ipynb)
# * [Color Segmentation](Color Segmentation.ipynb)
# * [ArUco Markers](ArUco Markers.ipynb)
# ---
#
# ### Try-a-Bot: an open source guide for robot programming
#
# Developed by:
# [](http://robinlab.uji.es)
#
# Sponsored by:
# <table>
# <tr>
# <td style="border:1px solid #ffffff ;">[](http://www.ieee-ras.org)</td>
# <td style="border:1px solid #ffffff ;">[](http://www.cyberbotics.com)</td>
# <td style="border:1px solid #ffffff ;">[](http://www.theconstructsim.com)</td>
# </tr>
# </table>
#
# Follow us:
# <table>
# <tr>
# <td style="border:1px solid #ffffff ;">[](https://www.facebook.com/RobotProgrammingNetwork)</td>
# <td style="border:1px solid #ffffff ;">[](https://www.youtube.com/user/robotprogrammingnet)</td>
# </tr>
# </table>
#
|
default.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Introduction
#
# Underpinning all of the prediction techniques are models which assume "repeat" or "near-repeat" victimisation: the tendancy for one crime event at a location to give rise to an increased risk of other events at the same, or similar, locations in the future.
#
# Missing from most of these models is an analysis of time of day effects. A crime event in the evening might imply increased risk only on subsequent evenings, with perhaps no increased risk in the morning?
#
# We are not aware of any _detailed_ prediction algorithms incorporting such analysis, and so we offer no implementations. However, this document contains some discussion of the literature.
# # Sources
#
# 1. <NAME>, "'Disrupting the optimal forager': predictive risk mapping and domestic burglary reduction in Trafford, Greater Manchester", International Journal of Police Science & Management 2012, doi:10.1350/ijps.2012.14.1.260
#
# 2. <NAME> al., "Prospective crime mapping in operational context", Home Office Online Report 19/07 [Police online library](http://library.college.police.uk/docs/hordsolr/rdsolr1907.pdf)
#
# 3. Ratcliffe, "Aoristic analysis: the spatial interpretation of unspecific temporal events" [doi:10.1080/136588100424963](http://dx.doi.org/10.1080/136588100424963)
# # Discussion
#
# (1) discusses a practical trial in Trafford. However, I have not been able to extract an exact algorithm from the description in the paper. Furthermore, it is not clear how we might compare such predictions against those given by the other algorithms. However, revisiting this is surely of interest for future work.
#
# (2) See pages 39--41 This does indeed find evidence that repeat victimisation is linked to a time of day effect. The analysis looks at the 3 shift patterns employed by the police in the study. It ends with the followig note:
#
# > Thus, as a final change to
# the system, the predictive model was developed so that it produced different maps for the
# different shifts, weighting more heavily those events that occurred during the same shift as
# that for which the prediction was made.
#
# However, no details as to these "weighting"(s) is given.
#
# (3) Discusses "aoristic analysis": how to handle the fact that e.g. most burgulary crime does not occur at a know timepoint, but rather in a time range (e.g. at some point during the day while the property in question was unoccupied). There is some brief discussion of how to adapt the standard hot-spot algorithm, but no real discussion of "prediction".
|
examples/Time of day considerations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
import requests
from bs4 import BeautifulSoup
import webscraping_functions as wf
import time
import concurrent.futures
# +
url_domain = "https://en.wikipedia.org"
website_url = "https://en.wikipedia.org/wiki/World_Weightlifting_Championships"
header_name = "Combined"
# Scrape the competition urls from the World_Weightlifting_Championships page on Wikipedia
competition_urls = [
url_domain + elem for elem in wf.WikiParser.iwf_links(
website_url, header_name, wf.years_list
)]
# -
competition_urls
# Use threading to grab the results url from every weightclass and append it to a list
with concurrent.futures.ThreadPoolExecutor() as executor:
weightclass_urls = list(executor.map(wf.WikiParser.process_urls, competition_urls))
comp_urls = []
for index in range(len(weightclass_urls)):
for elem in weightclass_urls[index]:
comp_urls.append(url_domain + elem)
comp_urls
# Use threading to write the results tables for every weightclass from 1996+ and write to .csv files
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
file_names = executor.map(wf.datatable_cleanup.results_table, comp_urls)
for names in file_names:
futures.append(names)
# A function to concate all of the above .csv files into one file
file_name = "IWF-championships-total-results-1996-2019"
wf.datatable_cleanup.concat_csv(file_name)
# +
website_url = "https://en.wikipedia.org/wiki/2019_World_Weightlifting_Championships_%E2%80%93_Women%27s_59_kg"
header_name = "Results"
df = wf.WikiParser.results_to_dataframe(website_url, header_name)
df
# -
year = wf.datatable_cleanup.insert_year(website_url)
gender = wf.datatable_cleanup.insert_gender(website_url)
event = wf.datatable_cleanup.insert_gender(website_url)
url_header = wf.WikiParser.get_h1_text(website_url)
header_name = "Results"
snatch_cols = ["Snatch 1 (kg)", "Snatch 2 (kg)", "Snatch 3 (kg)"]
clean_cols = ["C/J 1 (kg)", "C/J 2 (kg)", "C/J 3 (kg)"]
df = wf.WikiParser.results_to_dataframe(website_url, header_name)
wf.ResultsCleanup.column_row_cleanup(df)
wf.ResultsCleanup.data_cleanup(df)
wf.ResultsCleanup.lift_rankings(df, snatch_cols, "Max Snatch", "Snatch Rank")
wf.ResultsCleanup.lift_rankings(df, clean_cols, "Max C/J", "C/J Rank")
df.insert(0,"Year", year)
df.insert(1, "Event", event)
df.insert(2, "Gender", gender)
file_name = url_header + ".csv"
df.to_csv(file_name)
# +
website_url = "https://en.wikipedia.org/wiki/2019_World_Weightlifting_Championships_%E2%80%93_Women%27s_59_kg"
header_name = "Results"
df = wf.WikiParser.results_to_dataframe(website_url, header_name)
# -
results_dataframe = df
results_dataframe
if not "Rank\n" in results_dataframe.iloc[1].values:
results_dataframe.insert(9, "Snatch Rank", 0)
results_dataframe.insert(14, "C/J Rank", 0)
results_dataframe
else:
results_dataframe.rename({9: "Snatch Rank", 14: "C/J Rank"})
# # results_dataframe.iloc[columns=9].column = "Snatch Rank"
# # results_dataframe.iloc[columns=14].column = "C/J Rank"
# results_dataframe.drop(columns=[8, 14], inplace = True)
results_dataframe
wf.CheckFunctions.check_group(results_dataframe)
results_dataframe
wf.CheckFunctions.check_bodyweight(results_dataframe)
results_dataframe
wf.CheckFunctions.check_nation(results_dataframe)
results_dataframe
df = wf.WikiParser.results_to_dataframe(website_url, header_name)
wf.CheckFunctions.check_max_lift(results_dataframe)
results_dataframe
wf.CheckFunctions.check_rank(results_dataframe)
results_dataframe
results_dataframe
column_names = (
"Comp Rank, Athlete Name, Nationality, Group, Body Weight (kg), "
"Snatch 1 (kg), Snatch 2 (kg), Snatch 3 (kg), Max Snatch, Snatch Rank, "
"C/J 1 (kg), C/J 2 (kg), C/J 3 (kg), Max C/J, C/J Rank, Total"
).split(", ")
results_dataframe.columns = column_names
results_dataframe
results_dataframe.drop([0,1], inplace=True)
results_dataframe.reset_index(inplace=True)
results_dataframe.drop("index", axis=1, inplace=True)
# Change country name to country code for consistency
for country in results_dataframe["Nationality"].values.tolist():
if country in wf.country_codes["Country"].values.tolist():
index = wf.country_codes["Country"].values.tolist().index(country)
code = wf.country_codes["Alpha-3 code"][index]
results_dataframe["Nationality"][index] = code
else:
pass
results_dataframe
wf.ResultsCleanup.column_row_cleanup(df)
wf.ResultsCleanup.data_cleanup(df)
wf.ResultsCleanup.lift_rankings(df, snatch_cols, "Max Snatch", "Snatch Rank")
wf.ResultsCleanup.lift_rankings(df, clean_cols, "Max C/J", "C/J Rank")
df.insert(0,"Year", year)
df.insert(1, "Gender", gender)
file_name = url_header + ".csv"
df.to_csv(file_name)
# !jupyter nbconvert --to script iwf-championships-results-scraping.ipynb
|
.ipynb_checkpoints/iwf-championships-results-scraping-Copy1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
# - Author: <NAME>
# - GitHub Repository: https://github.com/rasbt/deeplearning-models
# %load_ext watermark
# %watermark -a '<NAME>' -v -p tensorflow,numpy
# # Chunking an Image Dataset for Minibatch Training using NumPy NPZ Archives
# This notebook provides an example for how to organize a large dataset of images into chunks for quick access during minibatch learning. This approach uses NumPy .npz archive files and only requires having NumPy as a dependency so that this approach should be compatible with different Python-based machine learning and deep learning libraries and packages for further image (pre)processing and augmentation.
#
# While this approach performs reasonably well (sufficiently well for my applications), you may also be interested in TensorFlow's "[Reading Data](https://www.tensorflow.org/programmers_guide/reading_data)" guide to work with `TfRecords` and file queues.
#
# ## 0. The Dataset
# Let's pretend we have a directory of images containing two subdirectories with images for training, validation, and testing. The following function will create such a dataset of images in JPEG format locally for demonstration purposes.
# +
# Note that executing the following code
# cell will download the MNIST dataset
# and save all the 60,000 images as separate JPEG
# files. This might take a few minutes depending
# on your machine.
import numpy as np
# load utilities from ../helper.py
import sys
sys.path.insert(0, '..')
from helper import mnist_export_to_jpg
np.random.seed(123)
mnist_export_to_jpg(path='./')
# +
import os
for i in ('train', 'valid', 'test'):
print('mnist_%s subdirectories' % i, os.listdir('mnist_%s' % i))
# -
# Note that the names of the subdirectories correspond directly to the class label of the images that are stored under it.
# To make sure that the images look okay, the snippet below plots an example image from the subdirectory `mnist_train/9/`:
# +
# %matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
some_img = os.path.join('./mnist_train/9/', os.listdir('./mnist_train/9/')[0])
img = mpimg.imread(some_img)
print(img.shape)
plt.imshow(img, cmap='binary');
# -
# Note: The JPEG format introduces a few artifacts that we can see in the image above. In this case, we use JPEG instead of PNG. Here, JPEG is used for demonstration purposes since that's still format many image datasets are stored in.
# ## 1. Chunking Images into NumPy NPZ Archive Files
# The following wrapper function creates .npz archive files training, testing, and validation. It will group images together into integer arrays that are then saved as .npz archive files. The number of rows (images) in each .npz archive will be equal to the `archive_size` argument.
# +
import numpy as np
import glob
def images_to_pickles(data_stempath='./mnist_', which_set='train',
archive_size=5000, width=28, height=28, channels=1,
shuffle=False, seed=None):
if not os.path.exists('%snpz' % data_stempath):
os.mkdir('%snpz' % data_stempath)
img_paths = [p for p in glob.iglob('%s%s/**/*.jpg' %
(data_stempath, which_set), recursive=True)]
if shuffle:
rgen = np.random.RandomState(seed)
paths = rgen.shuffle(img_paths)
idx, file_idx = 0, 1
data = np.zeros((archive_size, height, width, channels), dtype=np.uint8)
labels = np.zeros(archive_size, dtype=np.uint8)
for path in img_paths:
if idx >= archive_size - 1:
idx = 0
savepath = os.path.join('%snpz' % data_stempath, '%s_%d.npz' %
(which_set, file_idx))
file_idx += 1
np.savez(savepath, data=data, labels=labels)
label = int(os.path.basename(os.path.dirname(path)))
image = mpimg.imread(path)
if len(image.shape) == 2:
data[idx] = image[:, :, np.newaxis]
labels[idx] = label
idx += 1
# -
images_to_pickles(which_set='train', shuffle=True, seed=1)
images_to_pickles(which_set='valid', shuffle=True, seed=1)
images_to_pickles(which_set='test', shuffle=True, seed=1)
# The .npz files we created are stored under a new directory, `mnist_npz`:
os.listdir('mnist_npz')
# To check that the archiving worked correctly, we will now load one of those .npz archives. Note that we can now access each archive just like a python dictionary. Here the `'data'` key contains the image data and the `'labels'` key stores an array containing the corresponding class labels:
data = np.load('mnist_npz/test_1.npz')
print(data['data'].shape)
print(data['labels'].shape)
plt.imshow(data['data'][0][:, :, -1], cmap='binary');
print('Class label:', data['labels'][0])
# ## 2. Loading Minibatches
# The following cell implements a class for iterating over the MNIST images, based on the .npz archives, conveniently.
# Via the `normalize` parameter we additionally scale the image pixels to [0, 1] range, which typically helps with gradient-based optimization in practice.
#
# The key functions (here: generators) are
#
# - load_train_epoch
# - load_valid_epoch
# - load_test_epoch
#
# These let us iterate over small chunks (determined via `minibatch_size`). Each of these functions will load the images from a particular .npz archive into memory (here: 5000 images) and yield minibatches of smaller or equal size (for example, 50 images at a time). Via the two shuffle parameters, we can further control if the images within each .npz archive should be shuffled, and if the order the .npz files are loaded should shuffled after each epoch. By setting `onehot=True`, the labels are converted into a onehot representation for convenience.
class BatchLoader():
def __init__(self, minibatches_path,
normalize=True):
self.normalize = normalize
self.train_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'train' in f]
self.valid_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'valid' in f]
self.test_batchpaths = [os.path.join(minibatches_path, f)
for f in os.listdir(minibatches_path)
if 'train' in f]
self.num_train = 45000
self.num_valid = 5000
self.num_test = 10000
self.n_classes = 10
def load_train_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='train',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def load_test_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='test',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def load_validation_epoch(self, batch_size=50, onehot=False,
shuffle_within=False, shuffle_paths=False,
seed=None):
for batch_x, batch_y in self._load_epoch(which='valid',
batch_size=batch_size,
onehot=onehot,
shuffle_within=shuffle_within,
shuffle_paths=shuffle_paths,
seed=seed):
yield batch_x, batch_y
def _load_epoch(self, which='train', batch_size=50, onehot=False,
shuffle_within=True, shuffle_paths=True, seed=None):
if which == 'train':
paths = self.train_batchpaths
elif which == 'valid':
paths = self.valid_batchpaths
elif which == 'test':
paths = self.test_batchpaths
else:
raise ValueError('`which` must be "train" or "test". Got %s.' %
which)
rgen = np.random.RandomState(seed)
if shuffle_paths:
paths = rgen.shuffle(paths)
for batch in paths:
dct = np.load(batch)
if onehot:
labels = (np.arange(self.n_classes) ==
dct['labels'][:, None]).astype(np.uint8)
else:
labels = dct['labels']
if self.normalize:
# normalize to [0, 1] range
data = dct['data'].astype(np.float32) / 255.
else:
data = dct['data']
arrays = [data, labels]
del dct
indices = np.arange(arrays[0].shape[0])
if shuffle_within:
rgen.shuffle(indices)
for start_idx in range(0, indices.shape[0] - batch_size + 1,
batch_size):
index_slice = indices[start_idx:start_idx + batch_size]
yield (ary[index_slice] for ary in arrays)
# The following for loop will iterate over the 45,000 training examples in our MNIST training set, yielding 50 images and labels at a time (note that we previously set aside 5000 training example as our validation datast).
# +
batch_loader = BatchLoader(minibatches_path='./mnist_npz/',
normalize=True)
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
print(batch_x.shape)
print(batch_y.shape)
break
# +
cnt = 0
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
cnt += batch_x.shape[0]
print('One training epoch contains %d images' % cnt)
# +
def one_epoch():
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=50, onehot=True):
pass
% timeit one_epoch()
# -
# As we can see from the benchmark above, an iteration over one training epoch (45k images) is relatively fast.
# Similarly, we could iterate over validation and test data via
#
# - batch_loader.load_validation_epoch
# - batch_loader.load_test_epoch
# ## 3. Training a Model using TensorFlow's `feed_dict`
# The following code demonstrate how we can feed our minibatches into a TensorFlow graph using a TensorFlow session's `feed_dict`.
# ### Multilayer Perceptron Graph
# +
import tensorflow as tf
##########################
### SETTINGS
##########################
# Hyperparameters
learning_rate = 0.1
training_epochs = 15
batch_size = 100
# Architecture
n_hidden_1 = 128
n_hidden_2 = 256
height, width = 28, 28
n_classes = 10
##########################
### GRAPH DEFINITION
##########################
g = tf.Graph()
with g.as_default():
tf.set_random_seed(123)
# Input data
tf_x = tf.placeholder(tf.float32, [None, height, width, 1], name='features')
tf_x_flat = tf.reshape(tf_x, shape=[-1, height*width])
tf_y = tf.placeholder(tf.int32, [None, n_classes], name='targets')
# Model parameters
weights = {
'h1': tf.Variable(tf.truncated_normal([width*height, n_hidden_1], stddev=0.1)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_classes], stddev=0.1))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_2])),
'out': tf.Variable(tf.zeros([n_classes]))
}
# Multilayer perceptron
layer_1 = tf.add(tf.matmul(tf_x_flat, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
# Loss and optimizer
loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=tf_y)
cost = tf.reduce_mean(loss, name='cost')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(cost, name='train')
# Prediction
correct_prediction = tf.equal(tf.argmax(tf_y, 1), tf.argmax(out_layer, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
# -
# ### Training the Neural Network with Minibatches
# +
##########################
### TRAINING & EVALUATION
##########################
batch_loader = BatchLoader(minibatches_path='./mnist_npz/',
normalize=True)
# preload small validation set
# by unpacking the generator
[valid_data] = batch_loader.load_validation_epoch(batch_size=5000,
onehot=True)
valid_x, valid_y = valid_data[0], valid_data[1]
del valid_data
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.
n_batches = 0
for batch_x, batch_y in batch_loader.load_train_epoch(batch_size=batch_size,
onehot=True,
seed=epoch):
n_batches += 1
_, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,
'targets:0': batch_y.astype(np.int)})
avg_cost += c
train_acc = sess.run('accuracy:0', feed_dict={'features:0': batch_x,
'targets:0': batch_y})
valid_acc = sess.run('accuracy:0', feed_dict={'features:0': valid_x,
'targets:0': valid_y})
print("Epoch: %03d | AvgCost: %.3f" % (epoch + 1, avg_cost / n_batches), end="")
print(" | MbTrain/Valid ACC: %.3f/%.3f" % (train_acc, valid_acc))
# imagine test set is too large to fit into memory:
test_acc, cnt = 0., 0
for test_x, test_y in batch_loader.load_test_epoch(batch_size=100,
onehot=True):
cnt += 1
acc = sess.run(accuracy, feed_dict={'features:0': test_x,
'targets:0': test_y})
test_acc += acc
print('Test ACC: %.3f' % (test_acc / cnt))
|
tensorflow1_ipynb/mechanics/image-data-chunking-npz.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="A-fObiRS6xuW"
# # Machine learning in Earth Engine
# + [markdown] id="OQnuuo-36439"
# Machine Learning (ML) in Earth Engine is supported with Earth Engine API methods in the `ee.Classifier`, `ee.Clusterer`, or `ee.Reducer` packages for training and inference within Earth Engine.
#
# These are useful for approx. less than 400 images.
#
# If more, TensorFlow is the way to go.
#
# TensorFlow is developed and trained outside of Earth Engine, but Earth Engine provides the option to import and export data in [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord#tfrecords_format_details) format. This way, you can generate training datasets in Earth Engine.
#
# Due to time limitations, we are going to focus on Classifiers, but you should explore further in your own time!
# + [markdown] id="xPZHaqbJ8Gul"
# ## Supervised classification algorithms
# + [markdown] id="cBZWDSsK8NfX"
# The `Classifier` package handles supervised classification by traditional Machine Learning (ML) algorithms running in Earth Engine.
#
# These classifiers include Classification and Regression Trees ([CART](https://towardsdatascience.com/https-medium-com-lorrli-classification-and-regression-analysis-with-decision-trees-c43cdbc58054)), [RandomForest](https://towardsdatascience.com/understanding-random-forest-58381e0602d2), [NaiveBayes](https://towardsdatascience.com/all-about-naive-bayes-8e13cef044cf) and Support Vector Machine ([SVM](https://towardsdatascience.com/support-vector-machines-svm-c9ef22815589)).
# + [markdown] id="1KXnM8tu93fj"
# The way classification works is:
#
#
#
# * Collect training data. Assemble features which have a property that stores the known class label and properties storing numeric values for the predictors.
# * Instantiate a classifier. Set its parameters if necessary.
# * Train the classifier using the training data.
# * Classify an image or feature collection.
# * Estimate classification error with independent validation data.
#
#
# + id="XmD3dQUvD4nv" colab={"base_uri": "https://localhost:8080/"} outputId="fee1109f-406d-426b-d0b4-31e94605b761"
# Import earthengine API
import ee
# Authenticate and initialise
ee.Authenticate()
ee.Initialize()
# + id="AXEZqxl4-E6a"
# Make a cloud-free Landsat 8 TOA composite (from raw imagery)
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
image = ee.Algorithms.Landsat.simpleComposite(l8.filterDate('2018-01-01', '2018-12-31'))#,asFloat='true')
# Use these bands for prediction.
bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11']
# Load training points. The numeric property 'class' stores known labels.
points = ee.FeatureCollection('GOOGLE/EE/DEMOS/demo_landcover_labels')
# This property stores the land cover labels as consecutive
# integers starting from zero.
label = 'landcover'
# Overlay the points on the imagery to get training.
training = image.select(bands).sampleRegions(points,properties=[label],scale=30)
# Train a CART classifier with default parameters.
trained = ee.Classifier.smileCart().train(training, label, bands)
# Classify the image with the same bands used for training.
classified = image.select(bands).classify(trained)
# + id="16af-yxDFx-r" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f677ec02-7662-46d2-8258-fe731cfc345b"
# Plot the result
import folium
# !pip install geehydro # Life saver for plotting GEE stuff with Python!
import geehydro
#print(points.getInfo())
# Use folium to visualize the imagery.
map = folium.Map(location=[37.820452055421086,-122.27096557617189],zoom_start=11)
map.addLayer(image, {'min':0, 'max':100, 'bands': ['B4', 'B3', 'B2']}, 'image')
map.addLayer(classified, {'min':0, 'max':2, 'palette': ['red', 'green', 'blue']}, 'classification')
folium.LayerControl().add_to(map)
map
# + [markdown] id="waAv_U9FI9k0"
# Note that the training property (`'landcover'`) stores consecutive integers starting at 0 (Use `remap()` on your table to turn your class labels into consecutive integers starting at zero if necessary).
#
# If the training data are polygons representing homogenous regions, every pixel in each polygon is a training point. You can use polygons to train as illustrated in the following example:
# + id="YbGbywJXJDsP"
# Make a cloud-free Landsat 8 TOA composite (from raw imagery).
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
image = ee.Algorithms.Landsat.simpleComposite(l8.filterDate('2018-01-01', '2018-12-31'))
# Use these bands for prediction.
bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11']
# Manually created polygons.
forest1 = ee.Geometry.Rectangle(-63.0187, -9.3958, -62.9793, -9.3443)
forest2 = ee.Geometry.Rectangle(-62.8145, -9.206, -62.7688, -9.1735)
nonForest1 = ee.Geometry.Rectangle(-62.8161, -9.5001, -62.7921, -9.4486)
nonForest2 = ee.Geometry.Rectangle(-62.6788, -9.044, -62.6459, -8.9986)
# Make a FeatureCollection from the hand-made geometries.
polygons = ee.FeatureCollection([
ee.Feature(nonForest1, {'class': 0}),
ee.Feature(nonForest2, {'class': 0}),
ee.Feature(forest1, {'class': 1}),
ee.Feature(forest2, {'class': 1}),
])
# Get the values for all pixels in each polygon in the training.
# Get the sample from the polygons FeatureCollection.
# Keep this list of properties from the polygons.
# Set the scale to get Landsat pixels in the polygons.
training = image.sampleRegions(polygons, properties= ['class'], scale= 30)
# Create an SVM classifier with custom parameters.
# RBF = Radial Basis Function kernel
classifier = ee.Classifier.libsvm(kernelType='RBF',gamma= 0.5,cost= 10)
# Train the classifier.
trained = classifier.train(training, 'class', bands);
# Classify one image.
classified = image.classify(trained)
# Redude the region to plot it without issues
roi = ee.Geometry.Rectangle([-62.836, -9.2399, -8, -61]);
classified_reduced = classified.clip(roi)
# + id="Q9UbYEN8mlgR" colab={"base_uri": "https://localhost:8080/", "height": 433} outputId="cd722c2b-afe8-4c84-d9a7-4027b67a0e8f"
# Plot the result
map = folium.Map(location=[-9.2399,-62.836],zoom_start=9)
map.addLayer(image, {'bands': ['B4', 'B3', 'B2']}, 'image')
map.addLayer(polygons, {}, 'training polygons')
map.addLayer(classified_reduced, {'min': 0, 'max': 1, 'palette': ['red', 'green']}, 'deforestation') # Probably wont be able to plot it!
folium.LayerControl().add_to(map)
map
# + [markdown] id="t8mms6Bd7CaE"
# ## Unsupervised classification
# + [markdown] id="JfD47CRl7Fa6"
# The `ee.Clusterer` package handles unsupervised classification (or clustering) in Earth Engine. More details about each Clusterer are available in the [reference docs in the Code Editor](https://code.earthengine.google.com/#workspace).
#
# Clusterers are used in the same manner as classifiers in Earth Engine. The general workflow for clustering is:
#
# * Assemble features with numeric properties in which to find clusters.
# * Instantiate a clusterer. Set its parameters if necessary.
# * Train the clusterer using the training data.
# * Apply the clusterer to an image or feature collection.
# * Label the clusters.
#
#
# The training data is a `FeatureCollection` with properties that will be input to the clusterer.
#
# Unlike classifiers, there is no input class value for a Clusterer.
#
# Like classifiers, the data for the train and apply steps are expected to have the same number of values. When a trained clusterer is applied to an image or table, it assigns an integer cluster ID to each pixel or feature.
#
# These algorithms are currently based on the algorithms with the same name in [Weka](https://www.cs.waikato.ac.nz/ml/weka/).
#
# Here is a simple example of building and using an `ee.Clusterer`:
# + id="Nd2E-BStv8dF" outputId="900d390f-4d2e-4690-e549-e0ac1e68bc17" colab={"base_uri": "https://localhost:8080/", "height": 712}
# testing classification on a Borneo image
inputB = ee.ImageCollection('COPERNICUS/S2_SR').filterBounds(ee.Geometry.Point(117.5, 0)).filterDate('2020-01-01', '2020-12-31').sort('CLOUDY_PIXEL_PERCENTAGE').first()
inputC = ee.ImageCollection('projects/planet-nicfi/assets/basemaps/asia').filterBounds(ee.Geometry.Point(117.5, 0)).filterDate('2020-01-01', '2020-12-31').first()
regionB = ee.Geometry.Rectangle(117.5, 0.6, 117.8, 0.9)
mapB = folium.Map(location = [0.6, 117.5], zoom_start=10)
mapB.addLayer(inputB, {'min': 0, 'max': 2000, 'bands':['B4','B3','B2'],}, 'Borneo')
# mapB.addLayer(inputC, {'min': 0, 'max': 2000, 'bands':['R','G','B'],}, 'Borneo')
mapB.addLayer(ee.Image().paint(regionB, 0, 2), {}, 'region')
mapB
# + id="XSCjkJqgySMK" outputId="00f714ad-0efe-4d30-f1fb-1fd647590262" colab={"base_uri": "https://localhost:8080/", "height": 712}
# Make the training dataset.
trainingB = inputB.sample(region = regionB,scale= 4.77, numPixels= 5000)
# Instantiate the clusterer and train it.
clustererB = ee.Clusterer.wekaKMeans(10).train(trainingB)
# Cluster the input using the trained clusterer.
resultB = inputB.cluster(clustererB)
# Display the clusters with random colors.
mapB.addLayer(resultB.randomVisualizer(), {}, 'clusters')
folium.LayerControl().add_to(mapB)
mapB
# + id="r09eieJ27E6P" colab={"base_uri": "https://localhost:8080/", "height": 712} outputId="8e626a5f-3bd4-48d1-d476-84f1862042e7"
# Load a pre-computed Landsat composite for input.
input = ee.Image('LANDSAT/LE7_TOA_1YEAR/2001')
# Define a region in which to generate a sample of the input.
region = ee.Geometry.Rectangle(29.7, 30, 32.5, 31.7)
# Display the sample region.
map = folium.Map(location=[31,31.5],zoom_start=8)
map.addLayer(ee.Image().paint(region, 0, 2), {}, 'region')
map
# + id="-fzRJyrc90qx" colab={"base_uri": "https://localhost:8080/", "height": 712} outputId="db7f6719-1f26-41c1-b3d5-4162cf98b5d0"
# Make the training dataset.
training = input.sample(region = region,scale= 30, numPixels= 5000)
# Instantiate the clusterer and train it.
clusterer = ee.Clusterer.wekaKMeans(15).train(training)
# Cluster the input using the trained clusterer.
result = input.cluster(clusterer)
# Display the clusters with random colors.
map.addLayer(result.randomVisualizer(), {}, 'clusters')
folium.LayerControl().add_to(map)
map
|
3_Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import freetype
from PIL import Image, ImageDraw
HEIGHT = 18
CJK_WIDTH = 18
ASCII_WIDTH = CJK_WIDTH / 2
face = freetype.Face('NotoSansCJKsc-Light.otf')
face.set_pixel_sizes(CJK_WIDTH , HEIGHT )
FONT_ROW_SIZE = (CJK_WIDTH - 1) // 8 + 1
FONT_SIZE = FONT_ROW_SIZE * HEIGHT
def drawGlyph(writeFunc, g, x, y):
b = g.bitmap
w = b.width
h = b.rows
pitch = b.pitch
buf = b.buffer
t = 0
for cy in range(0, h):
for cx in range(0, w):
if (cx % 8 == 0):
t = buf[pitch * cy + cx // 8]
v = 0
if (t & 0x80):
v = 1
writeFunc(x + cx, y + cy, v)
t <<= 1
def pilWriteFunc(x, y, v):
global pix
p = 0
if v > 0:
p = 0xFFFFFF
pix[x,y] = p
def loadGlyph(char):
face.load_char(char, flags=freetype.FT_LOAD_TARGET_MONO | freetype.FT_LOAD_RENDER )
return face.glyph
def calcDrawOffset(g, targetWidth, targetHeight):
x = 0
y = 0
y = targetHeight - g.bitmap_top - 2
x = g.bitmap_left
if x + g.bitmap.width > targetWidth:
x -= (x + g.bitmap.width) - targetWidth
if y + g.bitmap.rows > targetHeight:
y -= (y + g.bitmap.rows) - targetHeight
if (x < 0):
x = 0
if (y < 0):
y = 0
return (x, y)
def drawTestImage(str):
global im, pix
imgWidth = CJK_WIDTH * 10 + 11
imgHeight = HEIGHT * 20 + 21
im = Image.new('RGB', (imgWidth, imgHeight))
draw = ImageDraw.Draw(im)
pix = im.load()
for x in range(0, imgWidth, CJK_WIDTH + 1):
draw.line([x, 0, x, imgHeight], 0xFF00FF)
for y in range(0, imgHeight, HEIGHT + 1):
draw.line([0, y, imgWidth, y], 0xFF00FF)
x = 1
y = 1
for ch in str:
g = loadGlyph(ch)
b = g.bitmap
offx, offy = calcDrawOffset(g, CJK_WIDTH, HEIGHT)
drawGlyph(pilWriteFunc, g, x + offx, y + offy )
x += CJK_WIDTH + 1
if ((x + CJK_WIDTH) >= imgWidth):
x = 1
y += HEIGHT + 1
return im
def bufWriteFunc(x, y, v):
if x >= CJK_WIDTH:
return
if y >= HEIGHT:
return
ptr = y * FONT_ROW_SIZE + x // 8
buf[ptr] |= v << (7 - (x % 8))
def genCharFont(ch):
global buf
buf = bytearray(FONT_SIZE)
g = loadGlyph(ch)
offx, offy = calcDrawOffset(g, CJK_WIDTH, HEIGHT)
drawGlyph(bufWriteFunc, g, offx, offy)
return buf
def genGB2312Font():
ret = b''
for qu in range(1, 88):
if (qu >= 10) and (qu <= 15):
continue
for wei in range(1, 95):
try:
ch = bytes([0xA0+qu, 0xA0+wei]).decode('gb2312')
except:
ch = '?'
print(qu, wei, ch)
buf = genCharFont(ch)
ret += buf
return ret
# -
ret = genGB2312Font()
with open('out.bin', 'wb') as f:
f.write(ret)
genCharFont('你')
drawTestImage('$一二三四口朴槿惠在申请书中称,自己被羁押已有2年多,期间颈椎病和腰椎间盘突出等病情恶化,需要外出治疗。今年4月17日,朴槿惠曾委托律师,向检方递交一份停止执行监禁的申请,理由是腰疼难忍,但遭到驳回。检方称,朴槿惠身体状况没有严重到无法服刑的程度。')
6000*18*18/8
bytes()
1 << 7
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV,KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score,confusion_matrix
import warnings
warnings.filterwarnings("ignore")
df1= pd.read_csv("mHealth_subject1.log",header=None, delimiter='\t')
df = pd.concat([df1],axis=0)
# Separating out the features
X = df.iloc[0:,0:23].values
# Separating out the target
y = df.iloc[0:,23].values
# Train Test Spit of Data
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42)
# Scaling
ss = StandardScaler()
X_trained_scaled =ss.fit_transform(X_train)
X_test_scaled = ss.transform(X_test)
# PCA
pca = PCA(n_components=12)
X_trained_scaled_pca = pca.fit_transform(X_trained_scaled)
X_test_scaled_pca = pca.transform(X_test_scaled)
# KFOLD
kfold = KFold(n_splits=5,random_state=42)
# # Model 1 Random Forest Classification
rfc = RandomForestClassifier()
#without scaling and without pca
rfc.fit(X_train,y_train)
y_pred = rfc.predict(X_test)
accuracy_score(y_pred,y_test)
#with scaling and without pca
rfc.fit(X_trained_scaled,y_train)
y_pred = rfc.predict(X_test_scaled)
accuracy_score(y_pred,y_test)
#with scaling and with pca
rfc.fit(X_trained_scaled_pca,y_train)
y_pred = rfc.predict(X_test_scaled_pca)
accuracy_score(y_pred,y_test)
#with hypertuning
param_grid = {'max_depth' : [10,20,30,40,50]}
gs_rf = GridSearchCV(rfc,param_grid = param_grid,cv = kfold,scoring = 'accuracy')
gs_rf.fit(X_trained_scaled,y_train)
y_pred = gs_rf.predict(X_test_scaled)
accuracy_score(y_pred,y_test)
# # Model 2 K-Nearest Neighbour
knn = KNeighborsClassifier()
#without scaling and without pca
knn.fit(X_train,y_train)
y_pred = knn.predict(X_test)
accuracy_score(y_test,y_pred)
#with scaling and without pca
knn.fit(X_trained_scaled,y_train)
y_pred = knn.predict(X_test_scaled)
accuracy_score(y_test,y_pred)
#with scaling and with pca
knn.fit(X_trained_scaled_pca,y_train)
y_pred = knn.predict(X_test_scaled_pca)
accuracy_score(y_test,y_pred)
# # Model 3 Stochastic Gradient Descent
sgd = SGDClassifier()
#without scaling and without pca
sgd.fit(X_train,y_train)
y_pred = sgd.predict(X_test)
accuracy_score(y_pred,y_test)
#with scaling and without pca
sgd.fit(X_trained_scaled,y_train)
y_pred = sgd.predict(X_test_scaled)
accuracy_score(y_pred,y_test)
#with scaling and with pca
sgd.fit(X_trained_scaled_pca,y_train)
y_pred = sgd.predict(X_test_scaled_pca)
accuracy_score(y_pred,y_test)
param_grid = {'loss': ['log'],'alpha': [10 ** x for x in range(-2, 1)],'l1_ratio': [0, 0.05, 0.1, 0.2, 0.5]}
gs_sgd = GridSearchCV(sgd,param_grid = param_grid,cv = kfold,scoring = 'accuracy')
gs_sgd.fit(X_trained_scaled,y_train)
y_pred = gs_sgd.predict(X_test_scaled)
accuracy_score(y_pred,y_test)
# # Confusion Matrix
#Confusion matrix of random forest as we are getting maximium accuracy for random forest.
print(pd.DataFrame(confusion_matrix(y_pred,y_test)))
|
MHEALTHDATASET/Mhealth Dataset Project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('./data/csv/textos_limpios.csv', sep=";")
df['titulo'] = df['titulo'].str.lower()
df['texto'] = df['texto'].str.lower()
df.reset_index(inplace=True)
texto_limpio = []
for index, row in df.iterrows():
texto_limpio.append(row['texto'].replace(str(row['titulo']), ''))
with open('./data/txt/texto_limpio.txt', 'w') as f:
for item in texto_limpio:
f.write("%s\n" % item)
|
limpieza_texto.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# ## Load TSML filters
using TSML
# ## Create artificial data function
# +
using DataFrames
using Dates
using Random
ENV["COLUMNS"]=1000 # for dataframe column size
function generateXY()
Random.seed!(123)
gdate = DateTime(2014,1,1):Dates.Minute(15):DateTime(2014,1,5)
gval = Array{Union{Missing,Float64}}(rand(length(gdate)))
gmissing = floor(0.30*length(gdate)) |> Integer
gndxmissing = Random.shuffle(1:length(gdate))[1:gmissing]
X = DataFrame(Date=gdate,Value=gval)
X.Value[gndxmissing] .= missing
Y = rand(length(gdate))
(X,Y)
end;
# -
# ## Generate artificial data with missing
(df,outY)=generateXY()
first(df,10)
# ## User Pipeline and Plotter to plot artificial data
# +
pltr=Plotter(Dict(:interactive => false))
mypipeline = Pipeline(Dict(
:transformers => [pltr]
)
)
fit!(mypipeline, df)
transform!(mypipeline, df)
# -
# ## Get statistics including blocks of missing data
# +
statfier = Statifier(Dict(:processmissing=>true))
mypipeline = Pipeline(Dict(
:transformers => [statfier]
)
)
fit!(mypipeline, df)
res = transform!(mypipeline, df)
# -
# ## Use Pipeline: aggregate, impute, and plot
# +
valgator = DateValgator(Dict(:dateinterval=>Dates.Hour(1)))
valnner = DateValNNer(Dict(:dateinterval=>Dates.Hour(1)))
mypipeline = Pipeline(Dict(
:transformers => [valgator,pltr]
)
)
fit!(mypipeline, df)
transform!(mypipeline, df)
# -
# ## Try real data
# +
fname = joinpath(dirname(pathof(TSML)),"../data/testdata.csv")
csvreader = CSVDateValReader(Dict(:filename=>fname,:dateformat=>"dd/mm/yyyy HH:MM"))
outputname = joinpath(dirname(pathof(TSML)),"/tmp/testdata_output.csv")
csvwriter = CSVDateValWriter(Dict(:filename=>outputname))
valgator = DateValgator(Dict(:dateinterval=>Dates.Hour(1)))
valnner = DateValNNer(Dict(:dateinterval=>Dates.Hour(1)))
stfier = Statifier(Dict(:processmissing=>true))
outliernicer = Outliernicer(Dict(:dateinterval=>Dates.Hour(1)));
# -
# ## Plot real data with missing values
# +
mpipeline1 = Pipeline(Dict(
:transformers => [csvreader,valgator,pltr]
)
)
fit!(mpipeline1)
transform!(mpipeline1)
# -
# ## Get statistics including blocks of missing data
# +
mpipeline1 = Pipeline(Dict(
:transformers => [csvreader,valgator,stfier]
)
)
fit!(mpipeline1)
respipe1 = transform!(mpipeline1)
# -
# ## Try imputing and get statistics
# +
mpipeline2 = Pipeline(Dict(
:transformers => [csvreader,valgator,valnner,stfier]
)
)
fit!(mpipeline2)
respipe2 = transform!(mpipeline2)
# -
# ## Plot imputted data
# +
mpipeline2 = Pipeline(Dict(
:transformers => [csvreader,valgator,valnner,pltr]
)
)
fit!(mpipeline2)
transform!(mpipeline2)
# -
# ### Monotonicer
# +
regularfile = joinpath(dirname(pathof(TSML)),"../data/typedetection/regular.csv")
monofile = joinpath(dirname(pathof(TSML)),"../data/typedetection/monotonic.csv")
dailymonofile = joinpath(dirname(pathof(TSML)),"../data/typedetection/dailymonotonic.csv")
regularfilecsv = CSVDateValReader(Dict(:filename=>regularfile,:dateformat=>"dd/mm/yyyy HH:MM"))
monofilecsv = CSVDateValReader(Dict(:filename=>monofile,:dateformat=>"dd/mm/yyyy HH:MM"))
dailymonofilecsv = CSVDateValReader(Dict(:filename=>dailymonofile,:dateformat=>"dd/mm/yyyy HH:MM"))
valgator = DateValgator(Dict(:dateinterval=>Dates.Hour(1)))
valnner = DateValNNer(Dict(:dateinterval=>Dates.Hour(1)))
stfier = Statifier(Dict(:processmissing=>true))
mono = Monotonicer(Dict())
stfier = Statifier(Dict(:processmissing=>true))
outliernicer = Outliernicer(Dict(:dateinterval=>Dates.Hour(1)));
# -
# ## Plot of monotonic data
# +
monopipeline = Pipeline(Dict(
:transformers => [monofilecsv,valgator,valnner,pltr]
)
)
fit!(monopipeline)
transform!(monopipeline)
# -
# ## Plot after normalization of monotonic data
# +
monopipeline = Pipeline(Dict(
:transformers => [monofilecsv,valgator,valnner,mono,pltr]
)
)
fit!(monopipeline)
transform!(monopipeline)
# -
# ## Plot with Monotonicer and Outliernicer
# +
monopipeline = Pipeline(Dict(
:transformers => [monofilecsv,valgator,valnner,mono,outliernicer,pltr]
)
)
fit!(monopipeline)
transform!(monopipeline)
# -
# ## Plot of daily monotonic
# +
dailymonopipeline = Pipeline(Dict(
:transformers => [dailymonofilecsv,valgator,valnner,pltr]
)
)
fit!(dailymonopipeline)
transform!(dailymonopipeline)
# -
# ## Plot of daily monotonic data with Monotonicer
dailymonopipeline = Pipeline(Dict(
:transformers => [dailymonofilecsv,valgator,valnner,mono,pltr]
)
)
fit!(dailymonopipeline)
transform!(dailymonopipeline)
# ## Plot of daily monotonic with Monotonicer and Outliernicer
dailymonopipeline = Pipeline(Dict(
:transformers => [dailymonofilecsv,valgator,valnner,mono,outliernicer,pltr]
)
)
fit!(dailymonopipeline)
transform!(dailymonopipeline)
# ## Plot regular TS after monotonic normalization
# +
regpipeline = Pipeline(Dict(
:transformers => [regularfilecsv,valgator,valnner,mono,pltr]
)
)
fit!(regpipeline)
transform!(regpipeline)
# -
# ## Plot of regular TS with outlier normalization
regpipeline = Pipeline(Dict(
:transformers => [regularfilecsv,valgator,valnner,mono,outliernicer,pltr]
)
)
fit!(regpipeline)
transform!(regpipeline)
# ## TS Discovery by automatic data type classification
# +
using TSML: TSClassifier
Random.seed!(12)
trdirname = joinpath(dirname(pathof(TSML)),"../data/realdatatsclassification/training")
tstdirname = joinpath(dirname(pathof(TSML)),"../data/realdatatsclassification/testing")
modeldirname = joinpath(dirname(pathof(TSML)),"../data/realdatatsclassification/model")
tscl = TSClassifier(Dict(:trdirectory=>trdirname,
:tstdirectory=>tstdirname,
:modeldirectory=>modeldirname,
:feature_range => 6:20,
:num_trees=>10)
)
fit!(tscl)
dfresults = transform!(tscl)
# +
apredict = dfresults.predtype
fnames = dfresults.fname
myregex = r"(?<dtype>[A-Z _ - a-z]+)(?<number>\d*).(?<ext>\w+)"
mtypes=map(fnames) do fname
mymatch=match(myregex,fname)
mymatch[:dtype]
end
sum(mtypes .== apredict)/length(mtypes) * 100
# -
|
docs/notebooks/StaticPlotting.jl.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multibin NormSys
# %pylab inline
import pyhf
import logging
from pyhf import Model
def prep_data(sourcedata):
spec = {
'signal': {
'signal': {
'data': sourcedata['signal']['bindata']['sig'],
'mods': [
{
'name': 'mu',
'type': 'normfactor',
'data': None
}
]
},
'background': {
'data': sourcedata['signal']['bindata']['bkg'],
'mods': [
{
'name': 'uncorr_bkguncrt_signal',
'type': 'shapesys',
'data': sourcedata['signal']['bindata']['bkgerr']
}
]
}
},
'control': {
'background': {
'data': sourcedata['control']['bindata']['bkg'],
'mods': [
{
'name': 'uncorr_bkguncrt_control',
'type': 'shapesys',
'data': sourcedata['control']['bindata']['bkgerr']
}
]
}
}
}
pdf = Model(spec)
data = []
for c in pdf.config.channel_order:
data += sourcedata[c]['bindata']['data']
data = data + pdf.config.auxdata
return data, pdf
# +
source = {
"channels": {
"signal": {
"binning": [2,-0.5,1.5],
"bindata": {
"data": [110.0, 155.0],
"bkgerr": [10.0, 10.0],
"bkg": [100.0, 150.0],
"sig": [10.0, 35.0]
}
},
"control": {
"binning": [2,-0.5,1.5],
"bindata": {
"data": [205.0, 345.0],
"bkg": [200.0, 350.0],
"bkgerr": [5.0, 10.0]
}
}
}
}
d,pdf = prep_data(source['channels'])
print (d)
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
print (pdf.pdf(init_pars, d))
unconpars = pyhf.unconstrained_bestfit(d,pdf,init_pars,par_bounds)
print ('UNCON',unconpars)
# print d
# print pdf.expected_data(unconpars)
conpars = pyhf.constrained_bestfit(0.0,d,pdf,init_pars,par_bounds)
print ('CONS', conpars)
# print pdf.expected_data(conpars)
# # print '????',aux
# aux = pdf.expected_auxdata(conpars)
# # print '????',aux
# print 'ASIMOV',pyhf.generate_asimov_data(0.0,d,pdf,init_pars,par_bounds)
# +
def plot_results(testmus,cls_obs, cls_exp, test_size = 0.05):
plt.plot(mutests,cls_obs, c = 'k')
for i,c in zip(range(5),['grey','grey','grey','grey','grey']):
plt.plot(mutests,cls_exp[i], c = c)
plt.plot(testmus,[test_size]*len(testmus), c = 'r')
plt.ylim(0,1)
def invert_interval(testmus,cls_obs, cls_exp, test_size = 0.05):
point05cross = {'exp':[],'obs':None}
for cls_exp_sigma in cls_exp:
yvals = [x for x in cls_exp_sigma]
point05cross['exp'].append(np.interp(test_size,list(reversed(yvals)),list(reversed(testmus))))
yvals = cls_obs
point05cross['obs'] = np.interp(test_size,list(reversed(yvals)),list(reversed(testmus)))
return point05cross
pyhf.runOnePoint(1.0, d, pdf, init_pars, par_bounds)[-2:]
mutests = np.linspace(0,5,61)
tests = [pyhf.runOnePoint(muTest, d,pdf,init_pars, par_bounds)[-2:] for muTest in mutests]
cls_obs = [test[0] for test in tests]
cls_exp = [[test[1][i] for test in tests] for i in range(5)]
plot_results(mutests, cls_obs, cls_exp)
invert_interval(mutests, cls_obs, cls_exp)
# -
pdf.config.par_map
|
docs/examples/notebooks/multichannel-normsys.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://scipython.com/blog/visualizing-a-vector-field-with-matplotlib/
import numpy as np
import k3d
# +
def E(q, r0, x, y, z):
"""Return the electric field vector E=(Ex,Ey,Ez) due to charge q at r0."""
den = np.hypot(x-r0[0], y-r0[1], z-r0[2])**3
return q * (x - r0[0]) / den, q * (y - r0[1]) / den, q * (z - r0[2]) / den
# Grid of x, y points
nx, ny, nz = 20, 20, 1
x = np.linspace(-2, 2, nx, dtype=np.float32)
y = np.linspace(-2, 2, ny, dtype=np.float32)
z = np.linspace(-0, 2, nz, dtype=np.float32)
X, Y, Z = np.meshgrid(x, y, z)
charges = [(-1, (1.0, 0.0, 0)), (1, (-1.0, 0.0, 0))]
# Electric field vector, E=(Ex, Ey, Ez), as separate components
Ex, Ey, Ez = np.zeros((ny, nx, nz)), np.zeros((ny, nx, nz)), np.zeros((ny, nx, nz))
for charge in charges:
ex, ey, ez = E(*charge, x=X, y=Y, z=Z)
Ex += ex
Ey += ey
Ez += ez
Ex, Ey, Ez = np.ravel(Ex), np.ravel(Ey), np.ravel(Ez)
efield = np.stack([Ex, Ey, Ez]).T
efield /= np.stack([np.linalg.norm(efield, axis=1)]).T * 6
efield = np.float32(efield)
X, Y, Z = np.ravel(X), np.ravel(Y), np.ravel(Z)
# -
positive_charge = k3d.points([charges[1][1]], color=0xff0000 , point_size=0.1)
negative_charge = k3d.points([charges[0][1]], color=0x0000ff , point_size=0.1)
electric_field = k3d.vectors(np.stack([X, Y, Z]).T, efield, head_size=0.5)
negative_charge + positive_charge + electric_field
# +
ntc = 70
test_charges = []
for i in range(ntc):
test_charges.append([np.cos(2*np.pi*i/ntc), np.sin(2*np.pi*i/ntc),0])
test_charges = np.array(test_charges)
#test_charges = np.random.uniform(-1,1, size=(ntc,3))
#test_charges[:,2] *= 0
test_charges /= np.stack([np.linalg.norm(test_charges, axis=1)]).T * 10
test_charges = np.float32(test_charges)
plot = k3d.plot(camera_auto_fit=False, grid_auto_fit=False)
pcharges = k3d.points(test_charges+positive_charge.positions, color=0x00ff00, point_size=0.05)
ncharges = k3d.points(-1*test_charges+negative_charge.positions, color=0x00ffff, point_size=0.05)
plot += negative_charge + positive_charge + electric_field #+ pcharges + ncharges
plot.display()
# +
pX, pY, pZ = pcharges.positions[:,0], pcharges.positions[:,1], pcharges.positions[:,2]
nX, nY, nZ = ncharges.positions[:,0], ncharges.positions[:,1], ncharges.positions[:,2]
ptrajectories, ntrajectories = [], []
for i in range(ntc):
ptrajectories.append([])
ntrajectories.append([])
for t in range(600):
pEx, pEy, pEz = np.zeros(ntc), np.zeros(ntc), np.zeros(ntc)
nEx, nEy, nEz = np.zeros(ntc), np.zeros(ntc), np.zeros(ntc)
for charge in charges:
pex, pey, pez = E(*charge, x=pX, y=pY, z=pZ)
nex, ney, nez = E(*charge, x=nX, y=nY, z=nZ)
pEx += pex
pEy += pey
pEz += pez
nEx -= nex
nEy -= ney
nEz -= nez
dt = np.float32(t/10000)
pE = np.float32(np.vstack([pEx, pEy, pEz]).T)
nE = np.float32(np.vstack([nEx, nEy, nEz]).T)
pcharges.positions = pcharges.positions + pE*dt
pX, pY, pZ = pcharges.positions[:,0], pcharges.positions[:,1], pcharges.positions[:,2]
ncharges.positions = ncharges.positions + nE*dt
nX, nY, nZ = ncharges.positions[:,0], ncharges.positions[:,1], ncharges.positions[:,2]
p = np.vstack([pX,pY,pZ]).T
n = np.vstack([nX,nY,nZ]).T
for i in range(ntc):
ptrajectories[i].append(p[i])
ntrajectories[i].append(n[i])
# +
ptrajectories = np.array(ptrajectories)
ptrajectories = list(ptrajectories)
ntrajectories = np.array(ntrajectories)
ntrajectories = list(ntrajectories)
# -
for i in range(ntc):
cond1 = (np.sum(ptrajectories[i] < np.array([2.0, 2.0, 2.0]), axis=1)//3).astype(np.bool)
cond2 = (np.sum(ptrajectories[i] > np.array([-2.0, -2.0, -2.0]), axis=1)//3).astype(np.bool)
square_cond = cond1 & cond2
try:
border = np.where(square_cond == False)[0][0]
except IndexError:
border = None
ptrajectories[i] = ptrajectories[i][:border]
neg_area_cond = np.sqrt(np.sum((ptrajectories[i] - negative_charge.positions)**2, axis=1)) > 0.25
try:
border = np.where(neg_area_cond == False)[0][0]
except IndexError:
border = None
ptrajectories[i] = ptrajectories[i][:border]
cond1 = (np.sum(ntrajectories[i] < np.array([2.0, 2.0, 2.0]), axis=1)//3).astype(np.bool)
cond2 = (np.sum(ntrajectories[i] > np.array([-2.0, -2.0, -2.0]), axis=1)//3).astype(np.bool)
square_cond = cond1 & cond2
try:
border = np.where(square_cond == False)[0][0]
except IndexError:
border = None
ntrajectories[i] = ntrajectories[i][:border]
pos_area_cond = np.sqrt(np.sum((ntrajectories[i] - positive_charge.positions)**2, axis=1)) > 0.25
try:
border = np.where(pos_area_cond == False)[0][0]
except IndexError:
border = None
ntrajectories[i] = ntrajectories[i][:border]
for i in range(ntc):
plot += k3d.line(ptrajectories[i], shader='simple', color=0x000000)
plot += k3d.line(ntrajectories[i], shader='simple', color=0x00000)
|
Processed_examples/Dipole.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Advanced in silico drug design workshop. Olomouc, 21-25 January, 2019.
# ### Deep Leaning Tutorial: Multi-Layer Perceptron with Keras
#
# Dr <NAME>
#
# Research Scientist
#
# IOCB - Institute of Organic Chemistry and Biochemistry of the Czech Academy of Sciences
# Prague, Czech Republic
#
# &
# CEITEC - Central European Institute of Technology
# Brno, Czech Republic
#
# email: <EMAIL>
#
# website: https://sites.google.com/site/thomasevangelidishomepage/
#
# ##Objectives:
# In this tutorial you will learn how to construct a simple Multi-Layer Perceptron model with Keras. Specifically you will learn to:
# * Create and add layers including weight initialization and activation.
# * Compile models including optimization method, loss function and metrics.
# * Fit models include epochs and batch size.
# * Model predictions.
# * Summarize the model.
# * Train an initial model on large relevant data and transfer the hidden layers of that model to a new one, which will be training with fewer focused data (Transfer LEarning).
from keras.wrappers.scikit_learn import KerasRegressor, KerasClassifier
from keras.callbacks import EarlyStopping
from rdkit.Chem import AllChem, SDMolSupplier
from rdkit import DataStructs
import numpy as np
from keras.models import Sequential, Input, Model
from keras.layers import Dense
from scipy.stats import kendalltau, pearsonr
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.model_selection import cross_validate, KFold
# #### Reading molecules and activity from SDF
# +
fname = "data/cdk2.sdf"
mols = []
y = []
for mol in SDMolSupplier(fname):
if mol is not None:
mols.append(mol)
y.append(float(mol.GetProp("pIC50")))
# -
# #### Calculate descriptors (fingerprints) and convert them into numpy array
# generate binary Morgan fingerprint with radius 2
fp = [AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in mols]
def rdkit_numpy_convert(fp):
output = []
for f in fp:
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(f, arr)
output.append(arr)
return np.asarray(output)
x = rdkit_numpy_convert(fp)
# fix random seed for reproducibility
seed = 2019
np.random.seed(seed)
mol_num, feat_num = x.shape
print("# molecules for training = %i, # of features = %i\n" % (mol_num, feat_num))
# #### Define a function to create a simple feed forward network with one fully connected hidden layer or 300 neurons. The network uses the rectifier activation function for the hidden layer. No activation function is used for the output layer because it is a regression problem and we are interested in predicting numerical values directly without transform. The ADAM algorithm is employed to optimize the loss function.
# create model
def MLP1(feat_num, loss):
net = Sequential()
net.add(Dense(300, input_dim=feat_num, kernel_initializer='normal', activation='relu'))
net.add(Dense(1, kernel_initializer='normal'))
# Compile model
net.compile(loss=loss, optimizer='adam')
return net
# #### Print summary of layers and trainable parameters.
MLP1(feat_num, 'mean_squared_error').summary()
# #### Evaluate model with Keras wrapper of scikit-learn (faster and easier), using Mean Squared Error as the loss function, 300 training epochs and batch size 1/8 of the training set.
estimator = KerasRegressor(build_fn=MLP1,
feat_num=feat_num,
loss='mean_squared_error',
epochs=300,
batch_size=int(x.shape[0]/8),
verbose=0)
# #### Define our own evaluation metrics for the model: 1) Kendall's tau (ranking correlation), 2) Pearson's R (correlation), 3) Mean Squared Error. The evaluation will be done with 5-fold cross-validation.
# +
def kendalls_tau(estimator, X, y):
preds = estimator.predict(X)
t = kendalltau(preds, y)[0]
return t
def pearsons_r(estimator, X, y):
preds = estimator.predict(X)
r = pearsonr(preds, y)[0]
return r
def MSE(estimator, X, y):
preds = estimator.predict(X)
mse = mean_squared_error(preds, y)
return mse
scoring = {'tau': kendalls_tau, 'R':pearsons_r, 'MSE':MSE}
kfold = KFold(n_splits=5, random_state=seed)
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False)
print(scores)
print("\nResults: average tau=%f+-%f, average R=%f+-%f, average MSE=%f+-%f\n" %
(scores['test_tau'].mean(), scores['test_tau'].std(),
scores['test_R'].mean(), scores['test_R'].std(),
scores['test_MSE'].mean(), scores['test_MSE'].std()))
# -
# #### Running this code gives us an estimate of the model’s performance on the problem for unseen data. The result reports the average and standard deviation of each metric across all 5 folds of the cross validation evaluation.
#
# #### Now let's try Absolute Mean Error as a loss function.
estimator = KerasRegressor(build_fn=MLP1,
feat_num=feat_num,
loss='mean_absolute_error',
epochs=300,
batch_size=int(x.shape[0]/8),
verbose=0)
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False)
print (scores)
print("\nResults: average tau=%f+-%f, average R=%f+-%f, average MSE=%f+-%f\n" %
(scores['test_tau'].mean(), scores['test_tau'].std(),
scores['test_R'].mean(), scores['test_R'].std(),
scores['test_MSE'].mean(), scores['test_MSE'].std()))
# #### We see a subtle performance increase.
#
# #### Now let's add an extra hidden layer to the network with 200 neurons, to see if the performance improves further.
# create model
def MLP2(feat_num, loss):
net = Sequential()
net.add(Dense(300, input_dim=feat_num, kernel_initializer='normal', activation='relu'))
net.add(Dense(200, input_dim=feat_num, kernel_initializer='normal', activation='relu'))
net.add(Dense(1, kernel_initializer='normal'))
# Compile model
net.compile(loss=loss, optimizer='adam')
return net
# #### Print summary of layers and trainable parameters.
MLP2(feat_num, 'mean_absolute_error').summary()
# #### We increase the training epochs to 500 because the addition of an extra layer increased the trainable variables.
estimator = KerasRegressor(build_fn=MLP2,
feat_num=feat_num,
loss='mean_absolute_error',
epochs=500,
batch_size=int(x.shape[0]/8),
verbose=0)
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False)
print scores
print("\nResults: average tau=%f+-%f, average R=%f+-%f, average MSE=%f+-%f\n" %
(scores['test_tau'].mean(), scores['test_tau'].std(),
scores['test_R'].mean(), scores['test_R'].std(),
scores['test_MSE'].mean(), scores['test_MSE'].std()))
# #### We don't see statistically significant improvement because our training set is small (436 molecules).
# # Transfer Learning
# #### We will use all binding assays for CDK2 from CHEMBL (1519 molecules) to train a network and then we will transfer its hidden layer to a new network which we shall train with the smaller training set that we have been using so far.
#
# #### The following block of code is just for data preparation, you can go hrough it very fast.
# +
# LOAD TRAINING DATA FOR TRANSFER LEARNING
fname = "data/cdk2_large.sdf"
mols = []
molnames = []
for mol in SDMolSupplier(fname):
if mol is not None:
molname = mol.GetProp("_Name")
if not molname in molnames:
molnames.append(molname)
mols.append(mol)
# -
molname2pK_dict = {}
with open("data/cdk2_pK.dat", 'r') as f:
for line in f:
molname, pK = line.split()
if not molname in molname2pK_dict.keys():
molname2pK_dict[molname] = float(pK)
molnames1 = set(molnames)
molnames2 = set(molname2pK_dict.keys())
common_molnames = molnames1.intersection(molnames2)
y_transf = [molname2pK_dict[molname] for molname in molnames if molname in common_molnames]
# #### Generate binary Morgan fingerprint with radius 2 as feature vectors for training.
fp = [AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in mols if m.GetProp("_Name") in common_molnames]
x_transf = rdkit_numpy_convert(fp)
mol_num, feat_num = x_transf.shape
print("# molecules for transfer training = %i, # of features = %i\n" % (mol_num, feat_num))
# #### We train a network with one fully connected hidden layer of 300 neurons, like in our first example.
net = MLP1(feat_num=feat_num,
loss='mean_absolute_error')
net.fit(x_transf,
y_transf,
epochs=300,
batch_size=int(x_transf.shape[0]/8),
verbose=0)
# #### Below we create a function that transfers hiddel layers (up to index 'idx', starting from 0) to a new network. 'lhl_sizes' is a tuple defining the number of neurons in each hidden layer, e.g. (200,100) means two hidden layers with 200 and 100 neurons respectivelly.
def transf_MLP(feat_num, idx, lhl_sizes, loss='mean_absolute_error'):
global net # net is a networks and cannot be pickled! Therefore it cannot be an input argument for cross_validate() to work!
inp = Input(shape=(feat_num,))
shared_layer = net.layers[0]
shared_layer.trainable = False # deactivate training in all re-used layers of MLP1
out_tensor = shared_layer(inp)
# idx = 1 # index of desired layer
for i in range(1,idx+1):
shared_layer = net.layers[i] # deactivate training in all re-used layers of MLP1
shared_layer.trainable = False # deactivate training in all re-used layers of MLP1
out_tensor = shared_layer(out_tensor)
# Here add all the new layers
for l_size in lhl_sizes:
out_tensor = Dense(l_size, kernel_initializer='normal', activation='relu')(out_tensor)
# Close the network
out_tensor = Dense(1, kernel_initializer='normal')(out_tensor)
# Create the model
transf_model = Model(inp, out_tensor)
transf_model.compile(loss=loss, optimizer='adam')
return transf_model
estimator = KerasRegressor(build_fn=transf_MLP,
feat_num=feat_num,
idx=0,
lhl_sizes=(300, 200),
loss='mean_absolute_error',
epochs=300,
batch_size=int(x.shape[0]/8),
verbose=0)
# #### Measure the performance of the new hybrid network on our initial small dataset with 5-fold cross-validation.
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False)
print (scores)
print("\nResults: average tau=%f+-%f, average R=%f+-%f, average MSE=%f+-%f\n" %
(scores['test_tau'].mean(), scores['test_tau'].std(),
scores['test_R'].mean(), scores['test_R'].std(),
scores['test_MSE'].mean(), scores['test_MSE'].std()))
# #### We see an impressive performance gain!
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False,)
print scores
print("\nResults: average tau=%f+-%f, average R=%f+-%f, average MSE=%f+-%f\n" %
(scores['test_tau'].mean(), scores['test_tau'].std(),
scores['test_R'].mean(), scores['test_R'].std(),
scores['test_MSE'].mean(), scores['test_MSE'].std()))
# #### NOTE:
# Transfer learning does not always improve the performance. In this case we used a larger set of compounds binding to the same receptor (CDK2) to pretrain an network and transfer it's hidden layer to another one. If we have done the same but with compounds binding to CDK1 (59% sequence identity with CDK2) then the performance would have been worse. So be caucious where you apply transfer learning and which training data you use!
# As a home work you can apply the same procedure by instead of "data/cdk2_large.sdf" and "data/cdk2_pK.dat", to use "data/cdk1_large.sdf" and "data/cdk1_pK.dat".
# ## Train a Classifier network instead of a Regressor.
# #### The difference with our Regressor MLP1 is that the output layer contains a single neuron and uses the sigmoid activation function in order to produce a probability output in the range of 0 to 1 that can easily and automatically be converted to class values.
# create a Classifier
def MLP3(feat_num, loss='binary_crossentropy'):
net = Sequential()
net.add(Dense(300, input_dim=feat_num, kernel_initializer='normal', activation='relu'))
net.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
net.compile(optimizer='rmsprop', loss=loss, metrics=['accuracy'])
# Compile model
net.compile(loss=loss, optimizer='adam')
return net
# #### Load and prepare the Blood Brain Barrier permeability data for classification.
# +
fname = "data/logBB.sdf"
mols = []
y = []
for mol in SDMolSupplier(fname):
if mol is not None:
mols.append(mol)
y.append(float(mol.GetProp("logBB_class")))
# -
# #### Generate binary Morgan fingerprint with radius 2 for training.
fp = [AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in mols]
x = rdkit_numpy_convert(fp)
mol_num, feat_num = x.shape
print("# molecules for training = %i, # of features = %i\n" % (mol_num, feat_num))
# #### Print summary of layers and trainable parameters.
MLP3(feat_num, 'binary_crossentropy').summary()
estimator = KerasClassifier(build_fn=MLP3,
feat_num=feat_num,
loss='binary_crossentropy',
epochs=100, # ~300 is the optimum value, but we use 1000 to see the effect of overfitting
batch_size=int(x.shape[0]/8),
verbose=0)
# #### Use this time a classification metric to score the predictions, the area under the Receiver Operating Characteristic Curve (AUC-ROC).
# +
def AUC_ROC(estimator, X, y):
preds = estimator.predict(X)
auc = roc_auc_score(preds, y)
return auc
scoring = {'roc': AUC_ROC}
kfold = KFold(n_splits=5, random_state=seed)
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False)
print (scores)
print("\nResults: average AUC-ROC=%f+-%f\n" %
(scores['test_roc'].mean(), scores['test_roc'].std()))
# -
# #### The MLP classifier had a relative good performance. Compare with with ML model performance from the QSAR tutorial.
# #### Let's use early stopping to see if the performance improves even further.
scores = cross_validate(estimator, x, y, scoring=scoring, cv=kfold, return_train_score=False,
fit_params={'callbacks': [EarlyStopping(patience=3)]})
print (scores)
print("\nResults: average AUC-ROC=%f+-%f\n" %
(scores['test_roc'].mean(), scores['test_roc'].std()))
# ## Try to recreate each of the network architectures that you created with Keras using the following online tool:
# #### http://playground.tensorflow.org
# #### We don't see statistically significant change, probably because the training set is small (321 molecules). In real case problems you have thousands of training samples and there the effects of overfitting are more evident.
|
Multilayer_Perceptron_Keras/MLP_Keras.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import glob
import os
from IPython.core.pylabtools import figsize
figsize(12, 8)
# Enter no. of re-runs
runs=5
# Combine data files for multiple runs
for f in glob.glob("3D/PCF_run*.dat"):
os.system("cat "+f+" >> all_PCF_data_3D.dat")
### Add same again for 2D
for f in glob.glob("2D/PCF_run*.dat"):
os.system("cat "+f+" >> all_PCF_data_2D.dat")
# Plot multiple systems together
file1= np.genfromtxt('3D/PCF_run1.dat', delimiter = ' ') # Single run to compare to
all_PCF_data_3D = np.genfromtxt('all_PCF_data_3D.dat', delimiter = ' ') # Combined data from all runs
### Add same again for 2D
file2= np.genfromtxt('2D/PCF_run1.dat', delimiter = ' ') # Single run to compare to
all_PCF_data_2D = np.genfromtxt('all_PCF_data_2D.dat', delimiter = ' ') # Combined data from all runs
plt.scatter(all_PCF_data_3D[:,0], abs(all_PCF_data_3D[:,1]), marker="o", color="orange", s=20, label='3D Cu/ Zn disorder')
plt.scatter(all_PCF_data_2D[:,0], abs(all_PCF_data_2D[:,1]), marker="x", color="green", s=20, label='2D Cu/ Zn disorder')
# Add polynomial line of best fit to all_data
x_3D = all_PCF_data_3D[:,0]
y_3D = all_PCF_data_3D[:,1]
### Add y2 for 2D? + edit below
x_2D = all_PCF_data_2D[:,0]
y_2D = all_PCF_data_2D[:,1]
# Polynomial fit for 3D Cu/ Zn disorder
z_3D = np.polyfit(x_3D, y_3D, 13) # deg of polynomial just chosen to give best shape to curve
f_3D = np.poly1d(z_3D)
x_new_3D = np.linspace(min(x_3D), max(x_3D), 50)
y_new_3D = f_3D(x_new_3D)
plt.plot(x_3D,y_3D,'o', x_new_3D, y_new_3D, color="orange")
# Polynomial fit for 2D Cu/ Zn disorder
z_2D = np.polyfit(x_2D, y_2D, 13) # deg of polynomial just chosen to give best shape to curve
f_2D = np.poly1d(z_2D)
x_new_2D = np.linspace(min(x_2D), max(x_2D), 50)
y_new_2D = f_2D(x_new_2D)
plt.plot(x_2D,y_2D,'x', x_new_2D, y_new_2D, color="green")
plt.legend(loc='upper right', frameon=False)
plt.rcParams.update({'font.size': 20})
plt.xlabel('Simulation temperature (K)')
plt.ylabel('Nearest-neighbour Zn-Zn PCF peak intensity')
plt.legend(frameon=False)
#plt.xlim([150,850])
plt.rcParams.update({'font.size': 16})
plt.savefig("PCF_3D+2D.png")
plt.show()
# +
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
from scipy.optimize import curve_fit
from IPython.core.pylabtools import figsize
figsize(12, 8)
# Combine data files for multiple runs
for f in glob.glob("3D/PCF_run*.dat"):
os.system("cat "+f+" >> all_PCF_data_3D.dat")
### Add same again for 2D
for f in glob.glob("2D/PCF_run*.dat"):
os.system("cat "+f+" >> all_PCF_data_2D.dat")
# Plot multiple systems together
all_PCF_data_3D = np.genfromtxt('all_PCF_data_3D.dat', delimiter = ' ') # Combined data from all runs
all_PCF_data_2D = np.genfromtxt('all_PCF_data_2D.dat', delimiter = ' ') # Combined data from all runs
plt.scatter(all_PCF_data_3D[:,0], all_PCF_data_3D[:,1], marker="o", color="orange", label='3D Cu/ Zn disorder')
plt.scatter(all_PCF_data_2D[:,0], all_PCF_data_2D[:,1], marker="x", color="green", label='2D Cu/ Zn disorder')
x_3D = all_PCF_data_3D[:,0]
x_2D = all_PCF_data_2D[:,0]
x_plot_3D = np.linspace(min(x_3D), max(x_3D), num=1200)
x_plot_2D = np.linspace(min(x_2D), max(x_2D), num=1200)
print(x_plot_3D)
def fsigmoid_lin(x, a, b, c, d, e):
return 1.0 / (c + np.exp(-a*(x-b))) + (d*x**2+e)
def fsin(x, a, b,c):
return a*np.sin(b*x)+c
def farctan(x, a, b, c,d):
return a * np.arctan(d*x-b) + c
def ftanh(x, a, b, c, d):
return a * np.tanh(-d*x-b) + c
#popt, pcov = curve_fit(fsigmoid_lin, all_PCF_data_3D[:,0], abs(all_PCF_data_3D[:,1]), p0=[0.1, 700.0, 10.0, 1,2.0])
#print(popt)
#plt.plot(fsigmoid_lin(x_plot_3D, *popt))
#popt, pcov = curve_fit(fsin, all_PCF_data_3D[:,0], abs(all_PCF_data_3D[:,1]), p0=[0.15, 1, 700])
#plt.plot(fsin(x_plot_3D, *popt))
#popt, pcov = curve_fit(farctan, all_PCF_data_3D[:,0], abs(all_PCF_data_3D[:,1]), p0=[5., 650., 3.5,2.5])
#plt.plot(farctan(x_plot_3D-90, *popt), color='orange')
#popt, pcov = curve_fit(ftanh, all_PCF_data_3D[:,0], abs(all_PCF_data_3D[:,1]), p0=[0.5, 650., 0.05,2.5])
#plt.plot(ftanh(x_plot_3D, *popt), color='orange')
#popt, pcov = curve_fit(farctan, all_PCF_data_2D[:,0], abs(all_PCF_data_2D[:,1]), p0=[5., 650., 3.5,2.5])
#plt.plot(farctan(x_plot_2D-90, *popt), color='green')
plt.legend(loc='upper right', frameon=False)
plt.rcParams.update({'font.size': 24})
plt.xlabel('Simulation temperature (K)')
plt.ylabel('n.n. Zn-Zn PCF peak intensity')
plt.legend(frameon=False)
plt.xlim([200,1200])
plt.savefig("fig7_PCF_3D+2D_no_line.png")
plt.show()
# -
|
AnalysisNotebooks/DisorderAnalysis/PCF_2D+3D.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# tgb - 2/8/2019 - This script aims at predicting convective heating, longwave cooling and shortwave heating separately. It follows the notebook 002 that predicts:
# ***
# [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN] as a function of:
# [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]
# ***
# to now predict:
# ***
# [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN] as a function of:
# [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]
# ***
# Two remarks:
# 1) The energy conservation constraint will now involve a mass-weighted integral of QRL and QRS, knowing that:
# $$
# \int_{0}^{p_{s}}\frac{dp}{g}c_{p}QRL\left(p\right)=FLNS-FLNT
# $$
# $$
# \int_{0}^{p_{s}}\frac{dp}{g}c_{p}QRS\left(p\right)=FSNT-FSNS
# $$
# 2) For consistency, we shoud calculate FLNS, FSNS as residuals from (QRL,FLNT) and (QRS,FSNS) respectively
# ## 1) Preprocess all the necessary variables
# ### 1.1) Build feature, target and initial normalization files
# #!ln -s /filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/cbrain \
# #/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/notebooks/tbeucler_devlog/cbrain
from cbrain.imports import *
from cbrain.data_generator import *
from cbrain.models import *
from cbrain.losses import *
from cbrain.utils import limit_mem
import tensorflow as tf
import tensorflow.math as tfm
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import xarray as xr
import numpy as np
# Otherwise tensorflow will use ALL your GPU RAM for no reason
limit_mem()
TRAINDIR = '/local/Tom.Beucler/SPCAM_PHYS/'
DATADIR = '/project/meteo/w2w/A6/S.Rasp/SP-CAM/sp32fbp_andkua/'
PREFIX = '32_col_rad_12m_'
# %cd /filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM
# tgb - 2/8/2019 - We need to modify the pp_config file to incorporate the 3D radiative heating profiles, as well as the layers of the network to conserve mass, energy, and infer the surface radiative balance.
# The new pp file will be called "32col_rad_tbeucler_local.yml", while the prefix will be "32_col_rad_12m_"
#
# !python cbrain/Test01_preprocess_aqua.py \
# --config pp_config/32col_rad_tbeucler_local.yml \
# --aqua_names '*.h1.0000-*-0[1-12]-*' \
# --out_pref 32_col_rad_12m_train
# ### 1.2) Create validation dataset
#
# !python cbrain/Test01_preprocess_aqua.py \
# --config pp_config/32col_rad_tbeucler_local.yml \
# --aqua_names '*.h1.0001-*-0[1-3]-*' \
# --out_pref 32_col_rad_12m_valid --ext_norm Nope
# ### 1.3) Shuffle the training dataset
# tgb - 1/16/2019 - Adapted from Stephan's entire worlflow for 32 column run
# tgb - 2/8/2019 - Make sure to reserve enough RAM for the shuffling, especially for shuffling the training dataset
# %cd /filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM
# !python cbrain/shuffle_ds.py --pref $TRAINDIR/32_col_rad_12m_train
# !python cbrain/shuffle_ds.py --pref $TRAINDIR/32_col_rad_12m_valid
# ### 1.4) Change the output's normalization using pressure levels
# tgb - 2/6/2019 - See notebook 001 for careful test of each of the steps below
# tgb - 2/8/2019 - Now y=[PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN], so we have to multiply the first 6 * 30 elements by dP and the next 30 elements by dP / dt.
# +
ds = xr.open_dataset(TRAINDIR + PREFIX + 'train_norm.nc')
# Open the pickle files containing the pressure converters
with open(os.path.join('/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/cbrain', 'hyai_hybi.pkl'), 'rb') as f:
hyai, hybi = pickle.load(f)
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1]; # Differential pressure [Pa]
dt = 30*60; # timestep
ds.target_conv[:210] = np.multiply(ds.target_conv[:210],np.concatenate((dP,dP,dP,dP,dP,dP,np.divide(dP,dt))))
print(ds.target_conv)
# Copy old normalization file
path1 = os.path.join(TRAINDIR,PREFIX+'train_norm.nc')
path2 = os.path.join(TRAINDIR,PREFIX+'train_oldnorm.nc')
# !cp $path1 $path2
# Create new dataset with characteristics of modified ds
new_ds = xr.Dataset({
'feature_means': ds.feature_means,
'feature_stds': ds.feature_stds,
'feature_mins': ds.feature_mins,
'feature_maxs': ds.feature_maxs,
'target_means': ds.target_means,
'target_stds': ds.target_stds,
'target_mins': ds.target_mins,
'target_maxs': ds.target_maxs,
'feature_names': ds.feature_names,
'target_names': ds.target_names,
'feature_stds_by_var': ds.feature_stds_by_var,
'target_conv': ds.target_conv
})
# 4.3 Write new data set to initial target_conv file
# !rm $path1 # Remove normalization file
new_ds.to_netcdf(path1) # Save the new dataset as the new normalization file
# 4.4 Close the xarray handler
xr.open_dataset(path1).close() # Don't forget to close xarray handler!!
# -
# ## 2) Create data generator and produce data sample
#
# ### 2.1) Create data generator from training dataset
train_gen_obj = DataGenerator(
data_dir=TRAINDIR,
feature_fn=PREFIX+'train_shuffle_features.nc',
target_fn=PREFIX+'train_shuffle_targets.nc',
batch_size=512,
norm_fn=PREFIX+'train_norm.nc',
fsub='feature_means', # Subtracct the mean
fdiv='feature_stds_by_var', # Then divide by Std
tmult='target_conv', # For targets/output: use values from preprocess_aqua.
shuffle=True,
)
gen = train_gen_obj.return_generator()
# Produce data sample
x, y = next(gen)
# and check its shape
x.shape, y.shape
# ### 2.2) Create data generator from validation dataset and produce sample
valid_gen_obj = DataGenerator(
data_dir=TRAINDIR,
feature_fn=PREFIX+'valid_shuffle_features.nc',
target_fn=PREFIX+'valid_shuffle_targets.nc',
batch_size=512,
norm_fn=PREFIX+'train_norm.nc',
fsub='feature_means', # Subtracct the mean
fdiv='feature_stds_by_var', # Then divide by Std
tmult='target_conv', # For targets/output: use values from preprocess_aqua.
shuffle=True,
)
# +
validgen = valid_gen_obj.return_generator()
xval, yval = next(validgen)
xval.shape, yval.shape
# -
# ## 3) Building the custom layers for mass/energy conserving neural networks
# ## 3.1) Load all the normalization variables
# +
# 1) Open the file containing the normalization of the targets
ds = xr.open_dataset(TRAINDIR + PREFIX + 'train_norm.nc')
# 2) Open the pickle files containing the pressure converters
with open(os.path.join('/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/cbrain', 'hyai_hybi.pkl'), 'rb') as f:
hyai, hybi = pickle.load(f)
# 3) Define fsub, fdiv, normq
fsub = ds.feature_means.values
fdiv = ds.feature_stds_by_var.values
normq = ds.target_conv.values
print('fsub.shape=',fsub.shape)
print('fdiv.shape=',fdiv.shape)
print('normq.shape=',normq.shape)
print('hyai.shape=',hyai.shape)
print('hybi.shape=',hybi.shape)
ds.close()
# -
# ## 3.2) Check radiative integrals in numpy
# tgb - 2/8/2019 - We are checking that:
# $$
# \int_{0}^{p_{s}}\frac{dp}{g}c_{p}QRL\left(p\right)=FLNS-FLNT
# $$
# $$
# \int_{0}^{p_{s}}\frac{dp}{g}c_{p}QRS\left(p\right)=FSNT-FSNS
# $$
# Using the normalization to units W/m2, we are checking that:
# $$
# \int_{0}^{\widetilde{p_{s}}}d\widetilde{p}\cdot\delta QRL=FLNS-FLNT
# $$
# $$
# \int_{0}^{\widetilde{p_{s}}}d\widetilde{p}\cdot\delta QRS=FSNT-FSNS
# $$
# x = [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]
# y = [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]
# In this sub-section we have several objectives:
# 1) Making sure that for y, the integral relation holds
# 2) Coding the surface radiation layer in numpy
# 3) Convert the surface radiation layer to tensorflow
# ### 3.2.1) Checking consistency of radiative-integrals
# +
x,y = next(gen) # x.shape = 304 & y.shape = 218
import copy
inp = copy.copy(x)
# 0) Constants
C_P = 1.00464e3 # Specific heat capacity of air at constant pressure
G = 9.80616; # Reference gravity constant [m.s-2]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
PS = np.add( np.multiply( inp[:,300], fdiv[300]), fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = np.add( np.multiply( P0, hyai), \
np.multiply( PS[:,None], hybi))
dP = np.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = np.divide( \
np.multiply(normq[:30], \
G), L_V)
# dp_tilde = dp/dp_norm
# Wondering about broadcasting here...
# tf.div or simply \ would support broadcasting
dP_TILD = np.divide( dP, dP_NORM)
# 2) Radiative integrals
SWVEC = np.multiply( dP_TILD, y[:, 150:180])
SWINT = np.sum( SWVEC, axis=1)
SWNET = y[:,210]-y[:,211] # FSNT-FSNS
LWVEC = np.multiply( dP_TILD, y[:, 120:150])
LWINT = np.sum( LWVEC, axis=1) # LW integral
LWNET = y[:,213]-y[:,212] # FLNS-FLNT
plt.hist(SWINT-SWNET)
# -
# ### 3.2.2) Coding surface radiation layer in numpy
# tgb - 2/8/2019 - I plan on using the surface radiation layer before the mass and enthalpy conservation layers in the conserving network architecture (C), so let's work backwards from y to find what the input/output would be:
# y = [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]
# y_before_ent = [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND \ {TPHYSTND30}, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]
# y_before_mass = [PHQ \ {PHQ30}, PHCLDLIQ, PHCLDICE, TPHYSTND \ {TPHYSTND30}, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]
# y_before_surfrad = [PHQ \ {PHQ30}, PHCLDLIQ, PHCLDICE, TPHYSTND \ {TPHYSTND30}, QRL, QRS, DTVKE, FSNT, FLNT, PRECT, PRECTEND, PRECST, PRECSTEN]
# We would like to code the surface radiation layer so that:
# (Surface radiation layer): y_before_surfrad [214] |---> y_before_mass [216]
# +
x,y = next(gen) # x.shape = 304 & y.shape = 218
import copy
inp = copy.copy(x)
# Mimic the vector that comes in
ybef = np.concatenate([y[:,:29],y[:,30:119],y[:,120:211],y[:,212:213],y[:,214:]],1)
ytar = np.concatenate([y[:,:29],y[:,30:119],y[:,120:]],1)
print('y before surf rad',ybef.shape)
print('Target y after surf rad',ytar.shape)
# 0) Constants
C_P = 1.00464e3 # Specific heat capacity of air at constant pressure
G = 9.80616; # Reference gravity constant [m.s-2]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
PS = np.add( np.multiply( inp[:,300], fdiv[300]), fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = np.add( np.multiply( P0, hyai), \
np.multiply( PS[:,None], hybi))
dP = np.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = np.divide( \
np.multiply(normq[:30], \
G), L_V)
# dp_tilde = dp/dp_norm
# Wondering about broadcasting here...
# tf.div or simply \ would support broadcasting
dP_TILD = np.divide( dP, dP_NORM)
# 2) Radiative integrals
SWVEC = np.multiply( dP_TILD, ybef[:, 148:178])
SWINT = np.sum( SWVEC, axis=1)
#SWNET = y[:,210]-y[:,211] # FSNT-FSNS
LWVEC = np.multiply( dP_TILD, ybef[:, 118:148])
LWINT = np.sum( LWVEC, axis=1) # LW integral
#LWNET = y[:,213]-y[:,212] # FLNS-FLNT
# 3) Infer surface radiative fluxes from radiative integrals and TOA radiative fluxes
FSNS = np.subtract(ybef[:,208], SWINT) # FSNS = FSNT-SWINT
FLNS = np.add(ybef[:,209], LWINT) # FLNS = FLNT+LWINT
# 4) Concatenate the input of the dense layer with
# the net surface radiative fluxes to form
# the output of the surface radiation layer
yaft = np.concatenate([ybef[:, :209], np.expand_dims(FSNS, axis=1),\
ybef[:, 209:210], np.expand_dims(FLNS, axis=1),\
ybef[:, 210:]], 1)
print('y after surf rad', yaft.shape)
plt.hist(yaft[:,209]-ytar[:,209])
plt.hist(yaft[:,211]-ytar[:,211])
# -
# ### 3.2.3) Port the surface radiation layer to tensorflow
class SurRadLay(Layer):
def __init__(self, fsub, fdiv, normq, hyai, hybi, output_dim, **kwargs):
self.fsub = fsub # Subtraction for normalization of inputs
self.fdiv = fdiv # Division for normalization of inputs
self.normq = normq # Normalization of output's water concentration
self.hyai = hyai # CAM constants to calculate d_pressure
self.hybi = hybi # CAM constants to calculate d_pressure
self.output_dim = output_dim # Dimension of output
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape) # Be sure to call this somewhere!
# tgb - 2/6/2019 - following https://github.com/keras-team/keras/issues/4871
def get_config(self):
config = {'fsub': list(self.fsub), 'fdiv': list(self.fdiv),
'normq': list(self.normq), 'hyai': list(self.hyai),
'hybi': list(self.hybi), 'output_dim': self.output_dim}
base_config = super(SurRadLay, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, arrs):
# Split between the inputs inp & the output of the densely connected
# neural network, massout
inp, densout = arrs
# 0) Constants
C_P = 1.00464e3 # Specific heat capacity of air at constant pressure
G = 9.80616; # Reference gravity constant [m.s-2]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
PS = tfm.add( tfm.multiply( inp[:,300], self.fdiv[300]), self.fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = tfm.add( tfm.multiply( P0, self.hyai), \
tfm.multiply( PS[:,None], self.hybi))
dP = tfm.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = tfm.divide( \
tfm.multiply(self.normq[:30], \
G),\
L_V)
# dp_tilde = dp/dp_norm
dP_TILD = tfm.divide( dP, dP_NORM)
# 2) Radiative integrals
SWVEC = tfm.multiply( dP_TILD, densout[:, 148:178])
SWINT = tfm.reduce_sum( SWVEC, axis=1)
LWVEC = tfm.multiply( dP_TILD, densout[:, 118:148])
LWINT = tfm.reduce_sum( LWVEC, axis=1) # LW integral
# 3) Infer surface radiative fluxes from radiative integrals and TOA radiative fluxes
FSNS = tfm.subtract( densout[:,208], SWINT) # FSNS = FSNT-SWINT
FLNS = tfm.add( densout[:,209], LWINT) # FLNS = FLNT+LWINT
# 4) Concatenate the input of the dense layer with
# the net surface radiative fluxes to form
# the output of the surface radiation layer
out = tf.concat([densout[:, :209], tf.expand_dims(FSNS, axis=1),\
densout[:, 209:210], tf.expand_dims(FLNS, axis=1),\
densout[:, 210:]], 1)
return out
def compute_output_shape(self, input_shape):
# tgb - 2/7/2019 - Wrap the returned output shape in Tensorshape
# to avoid problems with custom layers & eager execution
# https://github.com/tensorflow/tensorflow/issues/20805
return tf.TensorShape((input_shape[0][0], self.output_dim))
# The layer takes inputs from the previous layer that have shape 124
# and outputs y of shape 126 to be fed to the mass cons. layers
# ## 3.3) Adapt mass conservation layer to new output format
# tgb - 2/8/2019 - For now, change indices of MasConsLay in cbrain/models
class MasConsLay(Layer):
def __init__(self, fsub, fdiv, normq, hyai, hybi, output_dim, **kwargs):
self.fsub = fsub # Subtraction for normalization of inputs
self.fdiv = fdiv # Division for normalization of inputs
self.normq = normq # Normalization of output's water concentration
self.hyai = hyai # CAM constants to calculate d_pressure
self.hybi = hybi # CAM constants to calculate d_pressure
self.output_dim = output_dim # Dimension of output
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape) # Be sure to call this somewhere!
# tgb - 2/6/2019 - following https://github.com/keras-team/keras/issues/4871
def get_config(self):
config = {'fsub': list(self.fsub), 'fdiv': list(self.fdiv),
'normq': list(self.normq), 'hyai': list(self.hyai),
'hybi': list(self.hybi), 'output_dim': self.output_dim}
base_config = super(MasConsLay, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, arrs):
# Split between the inputs inp & the output of the densely connected
# neural network, sradout
inp, sradout = arrs
# 0) Constants
G = 9.80616; # Reference gravity constant [m.s-2]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
# In the input vector, PS is the 151st element after
# the first elements = [QBP, ..., VBP with shape 30*5=150]
PS = tfm.add( tfm.multiply( inp[:,300], self.fdiv[300]), self.fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = tfm.add( tfm.multiply( P0, self.hyai), \
tfm.multiply( PS[:,None], self.hybi))
dP = tfm.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = tfm.divide( \
tfm.multiply(self.normq[:30], \
G), L_V)
# dp_tilde = dp/dp_norm
# Wondering about broadcasting here...
# tf.div or simply \ would support broadcasting
dP_TILD = tfm.divide( dP, dP_NORM)
# 2) Calculate cloud water vertical integral from level 1 to level 30
# The indices are tricky here because we are missing del(q_v)@(level 30)
# so e.g. q_liq@(level 1) is the 30th element of the output of the
# previous dense layer
CLDVEC = tfm.multiply( dP_TILD, \
tfm.add( sradout[:, 29:59], sradout[:, 59:89]))
CLDINT = tfm.reduce_sum( CLDVEC, axis=1)
# 3) Calculate water vapor vertical integral from level 1 to level 29
VAPVEC = tfm.multiply( dP_TILD[:, :29], \
sradout[:, :29])
VAPINT = tfm.reduce_sum( VAPVEC, axis=1)
# 4) Calculate forcing on the right-hand side (Net Evaporation-Precipitation)
# E-P is already normalized to units W.m-2 in the output vector
# so all we need to do is input-unnormalize LHF that is taken from the input vector
LHF = tfm.add( tfm.multiply( inp[:,303], self.fdiv[303]), self.fsub[303])
# Note that total precipitation = PRECT + 1e-3*PRECTEND in the CAM model
# PRECTEND already multiplied by 1e-3 in output vector so no need to redo it
# tgb - 2/8/2019 - This is the only line modified from the large-scale version 002
PREC = tfm.add( sradout[:, 212], sradout[:, 213])
# 5) Infer water vapor tendency at level 30 as a residual
# Composing tfm.add 3 times because not sure how to use tfm.add_n
DELQV30 = tfm.divide( \
tfm.add( tfm.add( tfm.add (\
LHF, tfm.negative(PREC)), \
tfm.negative(CLDINT)), \
tfm.negative(VAPINT)), \
dP_TILD[:, 29])
# 6) Concatenate the water tendencies with the newly inferred tendency
# to get the final vector out of shape (#samples,125) with
# [DELQ, DELCLDLIQ, DELCLDICE,
# TPHYSTND\{TPHYSTND AT SURFACE}, FSNT, FSNS, FLNT, FLNS, PRECT PRECTEND]
# Uses https://www.tensorflow.org/api_docs/python/tf/concat
DELQV30 = tf.expand_dims(DELQV30,1) # Adds dimension=1 to axis=1
out = tf.concat([sradout[:, :29], DELQV30, sradout[:, 29:]], 1)
return out
def compute_output_shape(self, input_shape):
# tgb - 2/7/2019 - Wrap the returned output shape in Tensorshape
# to avoid problems with custom layers & eager execution
# https://github.com/tensorflow/tensorflow/issues/20805
return tf.TensorShape((input_shape[0][0], self.output_dim))
# The output has size 125=30*4+6-1
# and is ready to be fed to the energy conservation layer
# before we reach the total number of outputs = 126
# ## 3.4) Adapt enthalpy conservation layer to new output format
# tgb - 2/8/2019 - For now, change indices of EntConsLay in cbrain/models
class EntConsLay(Layer):
def __init__(self, fsub, fdiv, normq, hyai, hybi, output_dim, **kwargs):
self.fsub = fsub # Subtraction for normalization of inputs
self.fdiv = fdiv # Division for normalization of inputs
self.normq = normq # Normalization of output's water concentration
self.hyai = hyai # CAM constants to calculate d_pressure
self.hybi = hybi # CAM constants to calculate d_pressure
self.output_dim = output_dim # Dimension of output
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape) # Be sure to call this somewhere!
# tgb - 2/6/2019 - following https://github.com/keras-team/keras/issues/4871
def get_config(self):
config = {'fsub': list(self.fsub), 'fdiv': list(self.fdiv),
'normq': list(self.normq), 'hyai': list(self.hyai),
'hybi': list(self.hybi), 'output_dim': self.output_dim}
base_config = super(EntConsLay, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, arrs):
# Split between the inputs inp & the output of the densely connected
# neural network, massout
inp, massout = arrs
# 0) Constants
G = 9.80616; # Reference gravity constant [m.s-2]
L_F = 3.337e5; # Latent heat of fusion of water [W.kg-1]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
# In the input vector, PS is the 151st element after
# the first elements = [QBP, ..., VBP with shape 30*5=150]
PS = tfm.add( tfm.multiply( inp[:,300], self.fdiv[300]), self.fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = tfm.add( tfm.multiply( P0, self.hyai), \
tfm.multiply( PS[:,None], self.hybi))
dP = tfm.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = tfm.divide( \
tfm.multiply(self.normq[:30], \
G),\
L_V)
# dp_tilde = dp/dp_norm
dP_TILD = tfm.divide( dP, dP_NORM)
# 2) Calculate net energy input from phase change and precipitation
# PHAS = Lf/Lv*((PRECST+PRECSTEN)-(PRECT+PRECTEND))
PHAS = tfm.divide( tfm.multiply( tfm.subtract(\
tfm.add( massout[:,215], massout[:,216]),\
tfm.add( massout[:,213], massout[:,214])),\
L_F),\
L_V)
# 3) Calculate net energy input from radiation, sensible heat flux and turbulent KE
# 3.1) RAD = FSNT-FSNS-FLNT+FLNS
RAD = tfm.add(\
tfm.subtract( massout[:,209], massout[:,210]),\
tfm.subtract( massout[:,212], massout[:,211]))
# 3.2) Unnormalize sensible heat flux
SHF = tfm.add( tfm.multiply( inp[:,302], self.fdiv[302]), self.fsub[302])
# 3.3) Net turbulent kinetic energy dissipative heating is the column-integrated
# turbulent kinetic energy energy dissipative heating
# tgb - 3/20/2019 - Error here, it should be
KEDVEC = tfm.multiply( dP_TILD, massout[:, 179:209])
KEDINT = tfm.reduce_sum( KEDVEC, axis=1)
# 4) Calculate tendency of normalized column water vapor due to phase change
# 4.1) Unnormalize latent heat flux
LHF = tfm.add( tfm.multiply( inp[:,303], self.fdiv[303]), self.fsub[303])
# 4.2) Column water vapor is the column integral of specific humidity
PHQVEC = tfm.multiply( dP_TILD, massout[:, :30])
PHQINT = tfm.reduce_sum( PHQVEC, axis=1)
# 4.3) Multiply by L_S/L_V to normalize (explanation above)
SPDQINT = tfm.divide( tfm.multiply( tfm.subtract(\
PHQINT, LHF),\
L_S),\
L_V)
# 5) Same operation for liquid water tendency but multiplied by L_F/L_V
SPDQCINT = tfm.divide( tfm.multiply(\
tfm.reduce_sum(\
tfm.multiply( dP_TILD, massout[:, 30:60]),\
axis=1),\
L_F),\
L_V)
# 6) Same operation for temperature but only integrate from level 1 to level 29
DTINT = tfm.reduce_sum( tfm.multiply( dP_TILD[:, :29], massout[:, 90:119]), axis=1)
# 7) Now calculate dT30 as a residual
dT30 = tfm.divide(tfm.add(tfm.add(tfm.add(tfm.add(tfm.add(tfm.add(\
PHAS,RAD),\
SHF),\
KEDINT),\
tfm.negative( SPDQINT)),\
tfm.negative( SPDQCINT)),\
tfm.negative( DTINT)),\
dP_TILD[:, 29])
dT30 = tf.expand_dims(dT30,1)
out = tf.concat([massout[:, :119], dT30, massout[:, 119:]], 1)
return out
def compute_output_shape(self, input_shape):
# tgb - 2/7/2019 - Wrap the returned output shape in Tensorshape
# to avoid problems with custom layers & eager execution
# https://github.com/tensorflow/tensorflow/issues/20805
return tf.TensorShape((input_shape[0][0], self.output_dim))
# and is ready to be used in the cost function
# ## 3.5) Keep custom loss function
# $$
# \mathrm{Loss}\left[\mathrm{W^{2}.m^{-4}}\right]=\alpha\cdot\mathrm{MSE}+\left(1-\alpha\right)\left(\mathrm{Enthalpy\ residual}^{2}+\mathrm{Mass\ residual}^{2}\right)\ \ |\ \ \alpha\in[0,1]
# $$
# tgb - 2/5/2019- Inspired from
# 1) https://stackoverflow.com/questions/46858016/keras-custom-loss-function-to-pass-arguments-other-than-y-true-and-y-pred for the custom loss function
# 2) https://stackoverflow.com/questions/46464549/keras-custom-loss-function-accessing-current-input-pattern for using the inputs in the custom loss function
# Uses the function massent_check as reference for the square Energy and mass residuals
# tgb - 2/9/2019 - Don't forget the additional entries in the output vector (LW and SW heating profiles)!
def customLoss(input_tensor,fsub,fdiv,normq,hyai,hybi,alpha = 0.5):
# tgb - 2/5/2019 - Loss function written above
def lossFunction(y_true,y_pred):
loss = tfm.multiply(alpha, mse(y_true, y_pred))
loss += tfm.multiply(tfm.subtract(1.0,alpha), \
massent_res(input_tensor,y_pred,fsub,fdiv,normq,hyai,hybi))
return loss
# tgb - 2/5/2019 - Mass and enthalpy residual function
# Adapted from massent_check by converting numpy to tensorflow
def massent_res(x,y,fsub,fdiv,normq,hyai,hybi):
# 0) Constants
G = 9.80616; # Reference gravity constant [m.s-2]
L_F = 3.337e5; # Latent heat of fusion of water [W.kg-1]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
L_S = L_F+L_V; # Latent heat of sublimation of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# WATER&ENTHALPY) Get non-dimensional pressure differences (p_tilde above)
# In the input vector, PS is the 151st element after
# the first elements = [QBP, ..., VBP with shape 30*5=150]
PS = tfm.add( tfm.multiply( x[:,300], fdiv[300]), fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = tfm.add( tfm.multiply( P0, hyai), \
tfm.multiply( PS[:,None], hybi))
dP = tfm.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = tfm.divide( \
tfm.multiply(normq[:30], \
G),\
L_V)
# dp_tilde = dp/dp_norm
dP_TILD = tfm.divide( dP, dP_NORM)
# WATER.1) Calculate water vertical integral from level 1 to level 30
WATVEC = tfm.multiply( dP_TILD, tfm.add(tfm.add(y[:, :30],\
y[:, 30:60]),\
y[:, 60:90]))
WATINT = tfm.reduce_sum( WATVEC, axis=1)
# WATER.2) Calculate forcing on the right-hand side (Net Evaporation-Precipitation)
# E-P is already normalized to units W.m-2 in the output vector
# so all we need to do is input-unnormalize LHF that is taken from the input vector
LHF = tfm.add( tfm.multiply( x[:,303], fdiv[303]), fsub[303])
# Note that total precipitation = PRECT + 1e-3*PRECTEND in the CAM model
# PRECTEND already multiplied by 1e-3 in output vector so no need to redo it
PREC = tfm.add( y[:, 214], y[:, 215])
# WATER.FINAL) Residual = E-P-DWATER/DT
WATRES = tfm.add(tfm.add(LHF,\
tfm.negative(PREC)),\
tfm.negative(WATINT))
# ENTHALPY.1) Calculate net energy input from phase change and precipitation
# PHAS = Lf/Lv*((PRECST+PRECSTEN)-(PRECT+PRECTEND))
PHAS = tfm.divide( tfm.multiply( tfm.subtract(\
tfm.add( y[:,216], y[:,217]),\
tfm.add( y[:,214], y[:,215])),\
L_F),\
L_V)
# ENTHALPY.2) Calculate net energy input from radiation, sensible heat flux and turbulent KE
# 2.1) RAD = FSNT-FSNS-FLNT+FLNS
RAD = tfm.add(\
tfm.subtract( y[:,210], y[:,211]),\
tfm.subtract( y[:,213], y[:,212]))
# 2.2) Unnormalize sensible heat flux
SHF = tfm.add( tfm.multiply( x[:,302], fdiv[302]), fsub[302])
# 2.3) Net turbulent kinetic energy dissipative heating is the column-integrated
# turbulent kinetic energy energy dissipative heating
KEDVEC = tfm.multiply( dP_TILD, y[:, 180:210])
KEDINT = tfm.reduce_sum( KEDVEC, axis=1)
# ENTHALPY.3) Calculate tendency of normalized column water vapor due to phase change
# 3.1) Column water vapor is the column integral of specific humidity
PHQVEC = tfm.multiply( dP_TILD, y[:, :30])
PHQINT = tfm.reduce_sum( PHQVEC, axis=1)
# 3.2) Multiply by L_S/L_V to normalize (explanation above)
SPDQINT = tfm.divide( tfm.multiply( tfm.subtract(\
PHQINT, LHF),\
L_S),\
L_V)
# ENTHALPY.4) Same operation for liquid water tendency but multiplied by L_F/L_V
SPDQCINT = tfm.divide( tfm.multiply(\
tfm.reduce_sum(\
tfm.multiply( dP_TILD, y[:, 30:60]),\
axis=1),\
L_F),\
L_V)
# ENTHALPY.5) Same operation for temperature tendency
DTINT = tfm.reduce_sum( tfm.multiply( dP_TILD[:, :30], y[:, 90:120]), axis=1)
# ENTHALPY.FINAL) Residual = SPDQ+SPDQC+DTINT-RAD-SHF-PHAS
ENTRES = tfm.add(tfm.add(tfm.add(tfm.add(tfm.add(tfm.add(SPDQINT,\
SPDQCINT),\
DTINT),\
tfm.negative(RAD)),\
tfm.negative(SHF)),\
tfm.negative(PHAS)),\
tfm.negative(KEDINT))
# Return sum of water and enthalpy square residuals
return tfm.add( tfm.square(WATRES), tfm.square(ENTRES))
return lossFunction
# # 4) Build neural networks
# ## 4.1) Formulate different models
# ### 4.1.1) Mass/energy conserving model (C)
# Conserving model with 5 dense layers
inp = Input(shape=(304,))
densout = Dense(512, activation='linear')(inp)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (4):
densout = Dense(512, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
densout = Dense(214, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
sradout = SurRadLay(
input_shape=(214,), fsub=fsub, fdiv=fdiv, normq=normq,\
hyai=hyai, hybi=hybi, output_dim = 216
)([inp, densout])
massout = MasConsLay(
input_shape=(216,), fsub=fsub, fdiv=fdiv, normq=normq,\
hyai=hyai, hybi=hybi, output_dim = 217
)([inp, sradout])
out = EntConsLay(
input_shape=(217,), fsub=fsub, fdiv=fdiv, normq=normq,\
hyai=hyai, hybi=hybi, output_dim = 218
)([inp, massout])
C_rad_5dens = Model(inputs=inp, outputs=out)
# ### 4.1.2) Unconstrained model (U)
# Unconstrained model with 5 dense layers
inp = Input(shape=(304,))
densout = Dense(512, activation='linear')(inp)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (4):
densout = Dense(512, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
densout = Dense(218, activation='linear')(densout)
out = LeakyReLU(alpha=0.3)(densout)
U_rad_5dens = Model(inputs=inp, outputs=out)
# ### 4.1.3) Weakly-constrained models (W)
# Weakly-constrained models with 5 dense layers
# alpha=0.01
inp001 = Input(shape=(304,))
densout = Dense(512, activation='linear')(inp001)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (4):
densout = Dense(512, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
densout = Dense(218, activation='linear')(densout)
out = LeakyReLU(alpha=0.3)(densout)
W001_rad_5dens = Model(inputs=inp001, outputs=out)
# alpha=0.5
inp05 = Input(shape=(304,))
densout = Dense(512, activation='linear')(inp05)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (4):
densout = Dense(512, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
densout = Dense(218, activation='linear')(densout)
out = LeakyReLU(alpha=0.3)(densout)
W05_rad_5dens = Model(inputs=inp05, outputs=out)
# alpha=0.99
inp099 = Input(shape=(304,))
densout = Dense(512, activation='linear')(inp099)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (4):
densout = Dense(512, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
densout = Dense(218, activation='linear')(densout)
out = LeakyReLU(alpha=0.3)(densout)
W099_rad_5dens = Model(inputs=inp099, outputs=out)
# ## 4.2) Compile the models
# ### 4.2.1) Use mean square error as loss function
C_rad_5dens.compile('rmsprop','mse')
U_rad_5dens.compile('rmsprop','mse')
# ### 4.2.2) Use custom loss function
W001_rad_5dens.compile(loss=customLoss(inp001,fsub,fdiv,normq,hyai,hybi,alpha = 0.01),\
optimizer='rmsprop')
W05_rad_5dens.compile(loss=customLoss(inp05,fsub,fdiv,normq,hyai,hybi,alpha = 0.5),\
optimizer='rmsprop')
W099_rad_5dens.compile(loss=customLoss(inp099,fsub,fdiv,normq,hyai,hybi,alpha = 0.99),\
optimizer='rmsprop')
# ### 4.2.3) Check model summaries
C_rad_5dens.summary()
# ## 4.3) Train the models
# ### 4.3.1) Train compiled models
Nep = 40
#hC_rad_5dens = C_rad_5dens.fit_generator(gen, train_gen_obj.n_batches, epochs=Nep, \
# validation_data=validgen, validation_steps= valid_gen_obj.n_batches)
#hU_rad_5dens = U_rad_5dens.fit_generator(gen, train_gen_obj.n_batches, epochs=Nep, \
# validation_data=validgen, validation_steps= valid_gen_obj.n_batches)
#hW001_rad_5dens = W001_rad_5dens.fit_generator(gen, train_gen_obj.n_batches, epochs=Nep, \
# validation_data=validgen, validation_steps= valid_gen_obj.n_batches)
#hW05_rad_5dens = W05_rad_5dens.fit_generator(gen, train_gen_obj.n_batches, epochs=Nep, \
# validation_data=validgen, validation_steps= valid_gen_obj.n_batches)
hW099_rad_5dens = W099_rad_5dens.fit_generator(gen, train_gen_obj.n_batches, epochs=Nep, \
validation_data=validgen, validation_steps= valid_gen_obj.n_batches)
# ### 4.3.2) Save models in .h5 format
# %cd $TRAINDIR/HDF5_DATA
# !pwd
C_rad_5dens.save('C_rad_5dens.h5')
U_rad_5dens.save('U_rad_5dens.h5')
W001_rad_5dens.save('W001_rad_5dens.h5')
W05_rad_5dens.save('W05_rad_5dens.h5')
W099_rad_5dens.save('W099_rad_5dens.h5')
# ### 4.3.3) Load models if already trained
# tgb - 2/11/2019 - OPTIONAL: Load the models if already trained.
# %cd $TRAINDIR/HDF5_DATA
# !pwd
# C_rad_5dens.load_weights('C_rad_5dens.h5')
# U_rad_5dens.load_weights('U_rad_5dens.h5')
W001_rad_5dens.load_weights('W001_rad_5dens.h5')
W05_rad_5dens.load_weights('W05_rad_5dens.h5')
W099_rad_5dens.load_weights('W099_rad_5dens.h5')
# %cd $TRAINDIR/HDF5_DATA
# !pwd
C_rad_5dens = load_model('C_rad_5dens.h5')
U_rad_5dens = load_model('U_rad_5dens.h5')
# W001_rad_5dens = load_model('W001_rad_5dens.h5')
# W05_rad_5dens = load_model('W05_rad_5dens.h5')
# W099_rad_5dens = load_model('W099_rad_5dens.h5')
# ### 4.3.4) Loss and validation curves
# +
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot(111)
for index in range (5):
if index==0: hdict = hC_rad_5dens.history; colo = 'bo'; col = 'b'; lab = 'C';
elif index==1: hdict = hU_rad_5dens.history; colo = 'ro'; col = 'r'; lab = 'U';
elif index==2: hdict = hW001_rad_5dens.history; colo = 'go'; col = 'g'; lab = 'W001';
elif index==3: hdict = hW05_rad_5dens.history; colo = 'co'; col = 'c'; lab = 'W05';
elif index==4: hdict = hW099_rad_5dens.history; colo = 'mo'; col = 'm'; lab = 'W099';
train_loss_values = hdict['loss']
valid_loss_values = hdict['val_loss']
epochs = range(1, len(train_loss_values) + 1)
ax.plot(epochs, train_loss_values, colo, label=lab+' Train')
ax.plot(epochs, valid_loss_values, col, label=lab+' Valid')
#plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.ylim((0, 125))
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
# for legend at the right place
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True);
plt.show()
# -
# ### 4.3.5) Test predictions on one batch
pred_uncons = U_rad_5dens.predict_on_batch(x)
pred_cons = C_rad_5dens.predict_on_batch(x)
ind_test = 150;
plt.plot(pred_uncons[ind_test,:], label='unconstrained')
plt.plot(pred_cons[ind_test,:], label='conserving')
plt.plot(y[ind_test,:], label='truth')
plt.legend();
# ## 4.4) Check performances regarding mass/energy conservation and mean square error
# tgb - 2/9/2019 - Adapted mass/energy conservation check to new output vector containing 60 more variables (longwave and shortwave heating profiles)
# ### 4.4.1) Mass/enthalpy conservation
def massent_check(x,y,fsub=fsub,fdiv=fdiv,normq=normq,hyai=hyai,hybi=hybi,outtype="graph"):
import numpy as np
# 0) Constants
G = 9.80616; # Reference gravity constant [m.s-2]
L_F = 3.337e5; # Latent heat of fusion of water [W.kg-1]
L_V = 2.501e6; # Latent heat of vaporization of water [W.kg-1]
L_S = L_F+L_V; # Latent heat of sublimation of water [W.kg-1]
P0 = 1e5; # Reference surface pressure [Pa]
# WATER&ENTHALPY) Get non-dimensional pressure differences (p_tilde above)
# In the input vector, PS is the 151st element after
# the first elements = [QBP, ..., VBP with shape 30*5=150]
PS = np.add( np.multiply( x[:,300], fdiv[300]), fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = np.add( np.multiply( P0, hyai), \
np.multiply( PS[:,None], hybi))
dP = np.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = np.divide( \
np.multiply(normq[:30], \
G),\
L_V)
# dp_tilde = dp/dp_norm
dP_TILD = np.divide( dP, dP_NORM)
# WATER.1) Calculate water vertical integral from level 1 to level 30
WATVEC = np.multiply( dP_TILD, y[:, :30] + y[:, 30:60] + y[:, 60:90])
WATINT = np.sum( WATVEC, axis=1)
# WATER.2) Calculate forcing on the right-hand side (Net Evaporation-Precipitation)
# E-P is already normalized to units W.m-2 in the output vector
# so all we need to do is input-unnormalize LHF that is taken from the input vector
LHF = np.add( np.multiply( x[:,303], fdiv[303]), fsub[303])
# Note that total precipitation = PRECT + 1e-3*PRECTEND in the CAM model
# PRECTEND already multiplied by 1e-3 in output vector so no need to redo it
PREC = np.add( y[:, 214], y[:, 215])
# WATER.FINAL) Residual = E-P-DWATER/DT
WATRES = LHF-PREC-WATINT
# ENTHALPY.1) Calculate net energy input from phase change and precipitation
# PHAS = Lf/Lv*((PRECST+PRECSTEN)-(PRECT+PRECTEND))
PHAS = np.divide( np.multiply( np.subtract(\
np.add( y[:,216], y[:,217]),\
np.add( y[:,214], y[:,215])),\
L_F),\
L_V)
# ENTHALPY.2) Calculate net energy input from radiation, sensible heat flux and turbulent KE
# 2.1) RAD = FSNT-FSNS-FLNT+FLNS
RAD = np.add(\
np.subtract( y[:,210], y[:,211]),\
np.subtract( y[:,213], y[:,212]))
# 2.2) Unnormalize sensible heat flux
SHF = np.add( np.multiply( x[:,302], fdiv[302]), fsub[302])
# 2.3) Net turbulent kinetic energy dissipative heating is the column-integrated
# turbulent kinetic energy energy dissipative heating
KEDVEC = np.multiply( dP_TILD, y[:, 180:210])
KEDINT = np.sum( KEDVEC, axis=1)
# ENTHALPY.3) Calculate tendency of normalized column water vapor due to phase change
# 3.1) Column water vapor is the column integral of specific humidity
PHQVEC = np.multiply( dP_TILD, y[:, :30])
PHQINT = np.sum( PHQVEC, axis=1)
# 3.2) Multiply by L_S/L_V to normalize (explanation above)
SPDQINT = np.divide( np.multiply( np.subtract(\
PHQINT, LHF),\
L_S),\
L_V)
# ENTHALPY.4) Same operation for liquid water tendency but multiplied by L_F/L_V
SPDQCINT = np.divide( np.multiply(\
np.sum(\
np.multiply( dP_TILD, y[:, 30:60]),\
axis=1),\
L_F),\
L_V)
# ENTHALPY.5) Same operation for temperature tendency
DTINT = np.sum( np.multiply( dP_TILD[:, :30], y[:, 90:120]), axis=1)
# ENTHALPY.FINAL) Residual = SPDQ+SPDQC+DTINT-RAD-SHF-PHAS
ENTRES = SPDQINT+SPDQCINT+DTINT-RAD-SHF-PHAS-KEDINT
if outtype=="graph":
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(num=None, figsize=(12, 3), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(121)
plt.hist(WATRES)
plt.xlabel(r"$\mathrm{Water\ Residual\ \left[W.m^{-2}\right]}$", fontsize=16)
plt.ylabel(r'Number of samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(122)
plt.hist(ENTRES)
plt.xlabel(r"$\mathrm{Enthalpy\ Residual\ \left[W.m^{-2}\right]}$", fontsize=16)
plt.ylabel(r'Number of samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
elif outtype=="list":
return WATRES,ENTRES
# +
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(num=None, figsize=(20, 13.5), dpi=80, facecolor='w', edgecolor='k')
XMAX = 100; bins = np.linspace(-XMAX, XMAX, 100)
xval, yval = next(gen)
for index in range (4):
if index==1: pred = U_rad_5dens.predict_on_batch(xval); lab = 'U';
elif index==0: pred = C_rad_5dens.predict_on_batch(xval); lab = 'C';
elif index==2: pred = W001_rad_5dens.predict_on_batch(xval); lab = 'W001';
elif index==3: pred = W05_rad_5dens.predict_on_batch(xval); lab = 'W05';
elif index==4: pred = W099_rad_5dens.predict_on_batch(xval); lab = 'W099';
watres,entres = massent_check(xval,pred,fsub=fsub,fdiv=fdiv,normq=normq,hyai=hyai,hybi=hybi,outtype="list");
ax = plt.subplot(5,2,2*index+1)
ax.hist(watres, bins, alpha=0.5, edgecolor='k', label = lab+' mSQ%i' %np.mean(watres**2)+' stdSQ%i' %np.std(watres**2))
plt.ylabel(r'Nb samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.ylim((0, 50)); plt.xlim((-XMAX, XMAX));
ax.legend(loc='upper left', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True, fontsize = 20);
ax = plt.subplot(5,2,2*index+2)
ax.hist(entres, bins, alpha=0.5, edgecolor='k', label = lab+' mSQ%i' %np.mean(entres**2)+' stdSQ%i' %np.std(entres**2))
plt.ylabel(r'Nb samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.ylim((0, 50)); plt.xlim((-XMAX, XMAX));
ax.legend(loc='upper left', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True, fontsize = 20);
# -
# ### 4.4.2) Mean square error
# +
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(num=None, figsize=(20, 13.5), dpi=80, facecolor='w', edgecolor='k')
XMAX = 180; bins = np.linspace(0, XMAX, 100)
xval, yval = next(gen)
for index in range (5):
if index==0: pred = U_rad_5dens.predict_on_batch(xval); lab = 'U';
elif index==1: pred = C_rad_5dens.predict_on_batch(xval); lab = 'C';
elif index==2: pred = W001_rad_5dens.predict_on_batch(xval); lab = 'W001';
elif index==3: pred = W05_rad_5dens.predict_on_batch(xval); lab = 'W05';
elif index==4: pred = W099_rad_5dens.predict_on_batch(xval); lab = 'W099';
res = np.mean((pred-yval)**2, axis=1);
ax = plt.subplot(5,1,index+1)
ax.hist(res, bins, alpha=0.5, edgecolor='k', label = lab+' m%i' %np.mean(res)+' std%i' %np.std(res))
plt.ylabel(r'Nb samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.ylim((0, 60)); plt.xlim((0, XMAX));
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True, fontsize = 20);
# -
# ### 4.4.3) Check radiative integrals
def radint_check(x,y,fsub=fsub,fdiv=fdiv,normq=normq,hyai=hyai,hybi=hybi):
import copy
inp = copy.copy(x)
# 0) Constants
C_P = 1.00464e3 # Specific heat capacity of air at constant pressure
G = 9.80616; # Reference gravity constant [m.s-2]
P0 = 1e5; # Reference surface pressure [Pa]
# 1) Get non-dimensional pressure differences (p_tilde above)
PS = np.add( np.multiply( inp[:,300], fdiv[300]), fsub[300])
# Reference for calculation of d_pressure is cbrain/models.py (e.g. QLayer)
P = np.add( np.multiply( P0, hyai), \
np.multiply( PS[:,None], hybi))
dP = np.subtract( P[:, 1:], P[:, :-1])
# norm_output = dp_norm * L_V/G so dp_norm = norm_output * G/L_V
dP_NORM = np.divide( \
np.multiply(normq[:30], \
G), L_V)
# dp_tilde = dp/dp_norm
# Wondering about broadcasting here...
# tf.div or simply \ would support broadcasting
dP_TILD = np.divide( dP, dP_NORM)
# 2) Radiative integrals
SWVEC = np.multiply( dP_TILD, y[:, 150:180])
SWINT = np.sum( SWVEC, axis=1)
SWNET = y[:,210]-y[:,211] # FSNT-FSNS
LWVEC = np.multiply( dP_TILD, y[:, 120:150])
LWINT = np.sum( LWVEC, axis=1) # LW integral
LWNET = y[:,213]-y[:,212] # FLNS-FLNT
return (SWINT-SWNET), (LWINT-LWNET)
# +
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(num=None, figsize=(20, 13.5), dpi=80, facecolor='w', edgecolor='k')
XMAX = 100; bins = np.linspace(-XMAX, XMAX, 100)
xval, yval = next(gen)
for index in range (5):
if index==1: pred = U_rad_5dens.predict_on_batch(xval); lab = 'U';
elif index==0: pred = C_rad_5dens.predict_on_batch(xval); lab = 'C';
elif index==2: pred = W001_rad_5dens.predict_on_batch(xval); lab = 'W001';
elif index==3: pred = W05_rad_5dens.predict_on_batch(xval); lab = 'W05';
elif index==4: pred = W099_rad_5dens.predict_on_batch(xval); lab = 'W099';
swres,lwres = radint_check(xval,pred,fsub=fsub,fdiv=fdiv,normq=normq,hyai=hyai,hybi=hybi)
ax = plt.subplot(5,2,2*index+1)
ax.hist(swres, bins, alpha=0.5, edgecolor='k', label = lab+' mSQ%i' %np.mean(swres**2)+' stdSQ%i' %np.std(swres**2))
plt.ylabel(r'Nb samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.ylim((0, 50)); plt.xlim((-XMAX, XMAX));
ax.legend(loc='upper left', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True, fontsize = 20);
ax = plt.subplot(5,2,2*index+2)
ax.hist(lwres, bins, alpha=0.5, edgecolor='k', label = lab+' mSQ%i' %np.mean(lwres**2)+' stdSQ%i' %np.std(lwres**2))
plt.ylabel(r'Nb samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.ylim((0, 50)); plt.xlim((-XMAX, XMAX));
ax.legend(loc='upper left', bbox_to_anchor=(0.5, 1.05),
ncol=5, fancybox=True, shadow=True, fontsize = 20);
# -
# #### Step 6: Check positivity of water species
# There are two necessary steps:
# 1) Load the water species concentrations "before physics" from the input vector and unnormalize them
# 2) Invert the output normalization to get the water concentrations "after physics"
#
# $$
# \delta q_{v,i,l}\left(p\right)=\frac{L_{v}\Delta p_{\mathrm{norm}}}{g}\frac{q_{v,i,l}^{a}\left(p\right)-q_{v,i,l}^{b}\left(p\right)}{\Delta t}\ \Rightarrow\ q_{v,i,l}^{a}\left(p\right)=q_{v,i,l}^{b}\left(p\right)+\frac{g\Delta t}{L_{v}\Delta p_{\mathrm{norm}}}\delta q_{v,i,l}\left(p\right)
# $$
def watpos_check(x,y,fsub=fsub,fdiv=fdiv,normq=normq,dt=30*60):
import numpy as np
# 1) Extract water species concentrations from inputs
QVB = np.add( np.multiply( x[:, :30], fdiv[ :30]), fsub[ :30])
QLB = np.add( np.multiply( x[:, 30:60], fdiv[ 30:60]), fsub[ 30:60])
QSB = np.add( np.multiply( x[:, 60:90], fdiv[ 60:90]), fsub[ 60:90])
# 2) Inverse output normalization and get water concentration after physics
QVA = QVB + np.divide( dt*y[:, :30] , normq[:30])
QLA = QLB + np.divide( dt*y[:, 30:60] , normq[:30])
QSA = QSB + np.divide( dt*y[:, 60:90] , normq[:30])
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(231)
plt.hist(1e3*QVA)
plt.xlabel(r"$\mathrm{Water\ vapor\ concentration\ \left[g/kg\right]}$", fontsize=16)
plt.ylabel(r'Number of samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(232)
plt.hist(1e3*QLA)
plt.xlabel(r"$\mathrm{Liquid\ water\ concentration\ \left[g/kg\right]}$", fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(233)
plt.hist(1e3*QSA)
plt.xlabel(r"$\mathrm{Ice\ concentration\ \left[g/kg\right]}$", fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(234)
plt.hist(1e3*(QVA-QVB))
plt.xlabel(r"$\mathrm{Water\ vapor\ change\ \left[g/kg\right]}$", fontsize=16)
plt.ylabel(r'Number of samples', fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(235)
plt.hist(1e3*(QLA-QLB))
plt.xlabel(r"$\mathrm{Liquid\ water\ change\ \left[g/kg\right]}$", fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplot(236)
plt.hist(1e3*(QSA-QSB))
plt.xlabel(r"$\mathrm{Ice\ change\ \left[g/kg\right]}$", fontsize=16)
plt.xticks(fontsize=14); plt.yticks(fontsize=14)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
watpos_check(xval,yval,fsub,fdiv,normq,dt)
|
notebooks/tbeucler_devlog/005_Predicting_convective_and_radiative_heating_profiles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.2 64-bit (''base'': conda)'
# language: python
# name: python392jvsc74a57bd0249cfc85c6a0073df6bca89c83e3180d730f84f7e1f446fbe710b75104ecfa4f
# ---
# STAT 451: Machine Learning (Fall 2021)
# Instructor: <NAME> (<EMAIL>)
# + [markdown] tags=[]
# # L05 - Data Preprocessing and Machine Learning with Scikit-Learn
# + [markdown] tags=[]
# # 5.5 Preparing Training Data & Transformer API
# +
# Code repeated from 5-2-basic-data-handling.ipynb
import pandas as pd
import numpy as np
df = pd.read_csv('data/iris.csv')
d = {'Iris-setosa': 0,
'Iris-versicolor': 1,
'Iris-virginica': 2}
df['Species'] = df['Species'].map(d)
X = df.iloc[:, 1:5].values
y = df['Species'].values
print(f'X.shape: {X.shape}')
print(f'y.shape: {y.shape}')
# + [markdown] tags=[]
# ## Stratification
# -
# - Previously, we wrote our own code to shuffle and split a dataset into training, validation, and test subsets, which had one considerable downside.
# - If we are working with small datasets and split it randomly into subsets, it will affect the class distribution in the samples -- this is problematic since machine learning algorithms/models assume that training, validation, and test samples have been drawn from the same distributions to produce reliable models and estimates of the generalization performance.
# <img src="images/iris-subsampling.png" alt="drawing" width="400"/>
# - The method of ensuring that the class label proportions are the same in each subset after splitting, we use an approach that is usually referred to as "stratification."
# - Stratification is supported in scikit-learn's `train_test_split` method if we pass the class label array to the `stratify` parameter as shown below.
# +
from sklearn.model_selection import train_test_split
X_temp, X_test, y_temp, y_test = \
train_test_split(X, y, test_size=0.2,
shuffle=True, random_state=123, stratify=y)
np.bincount(y_temp)
# +
X_train, X_valid, y_train, y_valid = \
train_test_split(X_temp, y_temp, test_size=0.2,
shuffle=True, random_state=123, stratify=y_temp)
print('Train size', X_train.shape, 'class proportions', np.bincount(y_train))
print('Valid size', X_valid.shape, 'class proportions', np.bincount(y_valid))
print('Test size', X_test.shape, 'class proportions', np.bincount(y_test))
# -
# ## Data Scaling
# - In the case of the Iris dataset, all dimensions were measured in centimeters, hence "scaling" features would not be necessary in the context of *k*NN -- unless we want to weight features differently.
# - Whether or not to scale features depends on the problem at hand and requires your judgement.
# - However, there are several algorithms (especially gradient-descent, etc., which we will cover later in this course), which work much better (are more robust, numerically stable, and converge faster) if the data is centered and has a smaller range.
# - There are many different ways for scaling features; here, we only cover to of the most common "normalization" schemes: min-max scaling and z-score standardization.
# ### Normalization -- Min-max scaling
# - Min-max scaling squashes the features into a [0, 1] range, which can be achieved via the following equation for a single input $i$:
# $$x^{[i]}_{\text{norm}} = \frac{x^{[i]} - x_{\text{min}} }{ x_{\text{max}} - x_{\text{min}} }$$
# - Below is an example of how we can implement and apply min-max scaling on 6 data instances given a 1D input vector (1 feature) via NumPy.
x = np.arange(6).astype(float)
x
x_norm = (x - x.min()) / (x.max() - x.min())
x_norm
# ### Standardization
# - Z-score standardization is a useful standardization scheme if we are working with certain optimization methods (e.g., gradient descent, later in this course).
# - After standardizing a feature, it will have the properties of a standard normal distribution, that is, unit variance and zero mean ($N(\mu=0, \sigma^2=1)$); however, this does not transform a feature from not following a normal distribution to a normal distributed one.
# - The formula for standardizing a feature is shown below, for a single data point $x^{[i]}$.
# $$x^{[i]}_{\text{std}} = \frac{x^{[i]} - \mu_x }{ \sigma_{x} }$$
x = np.arange(6).astype(float)
x
x_std = (x - x.mean()) / x.std()
x_std
# - Conveniently, NumPy and Pandas both implement a `std` method, which computes the standard devation.
# - Note the different results shown below.
df = pd.DataFrame([1, 2, 1, 2, 3, 4])
df[0].std()
df[0].values.std()
# - The results differ because Pandas computes the "sample" standard deviation ($s_x$), whereas NumPy computes the "population" standard deviation ($\sigma_x$).
# $$s_x = \sqrt{ \frac{1}{n-1} \sum^{n}_{i=1} (x^{[i]} - \bar{x})^2 }$$
#
# $$\sigma_x = \sqrt{ \frac{1}{n} \sum^{n}_{i=1} (x^{[i]} - \mu_x)^2 }$$
# - In the context of machine learning, since we are typically working with large datasets, we typically don't care about Bessel's correction (subtracting one degree of freedom in the denominator).
# - Further, the goal here is not to model a particular distribution or estimate distribution parameters accurately; however, if you like, you can remove the extra degree of freedom via NumPy's `ddof` parameters -- it's not necessary in practice though.
df[0].values.std(ddof=1)
# - A concept that is very important though is how we use the estimated normalization parameters (e.g., mean and standard deviation in z-score standardization).
# - In particular, it is important that we re-use the parameters estimated from the training set to transfrom validation and test sets -- re-estimating the parameters is a common "beginner-mistake" which is why we discuss it in more detail.
# +
mu, sigma = X_train.mean(axis=0), X_train.std(axis=0)
X_train_std = (X_train - mu) / sigma
X_valid_std = (X_valid - mu) / sigma
X_test_std = (X_test - mu) / sigma
# -
# - Again, if we standardize the training dataset, we need to keep the parameters (mean and standard deviation for each feature). Then, we’d use these parameters to transform our test data and any future data later on
# - Let’s assume we have a simple training set consisting of 3 samples with 1 feature column (let’s call the feature column “length in cm”):
#
# - example1: 10 cm -> class 2
# - example2: 20 cm -> class 2
# - example3: 30 cm -> class 1
#
# Given the data above, we estimate the following parameters from this training set:
#
# - mean: 20
# - standard deviation: 8.2
#
# If we use these parameters to standardize the same dataset, we get the following z-score values:
#
# - example1: -1.21 -> class 2
# - example2: 0 -> class 2
# - example3: 1.21 -> class 1
#
# Now, let’s say our model has learned the following hypotheses: It classifies samples with a standardized length value < 0.6 as class 2 (and class 1 otherwise). So far so good. Now, let’s imagine we have 3 new unlabeled data points that you want to classify.
#
# - example4: 5 cm -> class ?
# - example5: 6 cm -> class ?
# - example6: 7 cm -> class ?
#
# If we look at the non-standardized "length in cm" values in the training datast, it is intuitive to say that all of these examples (5, 6, and 7) are likely belonging to class 2 because they are smaller than anything in the training set. However, if we standardize these by re-computing the standard deviation and and mean from the new data, we will get similar values as before (i.e., properties of a standard normal distribtion) in the training set and our classifier would (probably incorrectly) assign the “class 2” label to the samples 4 and 5.
#
# - example5: -1.21 -> class 2
# - example6: 0 -> class 2
# - example7: 1.21 -> class 1
#
# However, if we use the parameters from the "training set standardization," we will get the following standardized values
#
# - example5: -18.37
# - example6: -17.15
# - example7: -15.92
#
# Note that these values are more negative than the value of example1 in the original training set, which makes much more sense now!
# ### Scikit-Learn Transformer API
# - The transformer API in scikit-learn is very similar to the estimator API; the main difference is that transformers are typically "unsupervised," meaning, they don't make use of class labels or target values.
# <img src="images/transformer-api.png" alt="drawing" width="400"/>
# - Typical examples of transformers in scikit-learn are the `MinMaxScaler` and the `StandardScaler`, which can be used to perform min-max scaling and z-score standardization as discussed earlier.
# +
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_std = scaler.transform(X_train)
X_valid_std = scaler.transform(X_valid)
X_test_std = scaler.transform(X_test)
# -
# ## Categorical Data
# - When we preprocess a dataset as input to a machine learning algorithm, we have to be careful how we treat categorical variables.
# - There are two broad categories of categorical variables: nominal (no order implied) and ordinal (order implied).
df = pd.read_csv('data/categoricaldata.csv')
df
# - In the example above, 'size' would be an example of an ordinal variable; i.e., if the letters refer to T-shirt sizes, it would make sense to come up with an ordering like M < L < XXL.
# - Hence, we can assign increasing values to a ordinal values; however, the range and difference between categories depends on our domain knowledge and judgement.
# - To convert ordinal variables into a proper representation for numerical computations via machine learning algorithms, we can use the now familiar `map` method in Pandas, as shown below.
# +
mapping_dict = {'M': 2,
'L': 3,
'XXL': 5}
df['size'] = df['size'].map(mapping_dict)
df
# -
# - Machine learning algorithms do not assume an ordering in the case of class labels.
# - Here, we can use the `LabelEncoder` from scikit-learn to convert class labels to integers as an alternative to using the `map` method
# +
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['classlabel'] = le.fit_transform(df['classlabel'])
df
# -
# - Representing nominal variables properly is a bit more tricky.
# - Since machine learning algorithms usually assume an order if a variable takes on integer values, we need to apply a "trick" here such that the algorithm would not make this assumption.
# - this "trick" is also called "one-hot" encoding -- we binarize a nominal variable, as shown below for the color variable (again, we do this because some ordering like orange < red < blue would not make sense in many applications).
pd.get_dummies(df)
# - Note that executing the code above produced 3 new variables for "color," each of which takes on binary values.
# - However, there is some redundancy now (e.g., if we know the values for `color_green` and `color_red`, we automatically know the value for `color_blue`).
# - While collinearity may cause problems (i.e., the matrix inverse doesn't exist in e.g., the context of the closed-form of linear regression), again, in machine learning we typically would not care about it too much, because most algorithms can deal with collinearity (e.g., adding constraints like regularization penalties to regression models, which we learn via gradient-based optimization).
# - However, removing collinearity if possible is never a bad idea, and we can do this conveniently by dropping e.g., one of the columns of the one-hot encoded variable.
pd.get_dummies(df, drop_first=True)
# Additional categorical encoding schemes are available via the scikit-learn compatible category_encoders library: https://contrib.scikit-learn.org/category_encoders/
# ## Missing Data
# - There are many different ways for dealing with missing data.
# - The simplest approaches are removing entire columns or rows.
# - Another simple approach is to impute missing values via the feature means, medians, mode, etc.
# - There is no rule or best practice, and the choice of the approprite missing data imputation method depends on your judgement and domain knowledge.
# - Below are some examples for dealing with missing data.
df = pd.read_csv('data/missingdata.csv')
df
# +
# missing values per column:
df.isnull().sum()
# +
# drop rows with missing values:
df.dropna(axis=0)
# +
# drop columns with missing values:
df.dropna(axis=1)
# -
df
# +
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
X = df.values
X = imputer.fit_transform(df.values)
X
# -
# - Check https://scikit-learn.org/stable/modules/impute.html for additional imputation techniques, including the KNNImputer based on a k-Nearest Neighbor approach to impute missing features by nearest neighbors
|
05-dataprocessing/code/5-5_preparing-training-data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Mapping Te Papa’s collections
#
# This notebook creates some simple maps using the `production.spatial` facet of the Te Papa API to identify places where collection objects were created.
# <div class="alert alert-block alert-warning">
# <p>If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!.</p>
#
# <p>
# Some tips:
# <ul>
# <li>Code cells have boxes around them.</li>
# <li>To run a code cell click on the cell and then hit <b>Shift+Enter</b>. The <b>Shift+Enter</b> combo will also move you to the next cell, so it's a quick way to work through the notebook.</li>
# <li>While a cell is running a <b>*</b> appears in the square brackets next to the cell. Once the cell has finished running the asterix will be replaced with a number.</li>
# <li>In most cases you'll want to start from the top of notebook and work your way down running each cell in turn. Later cells might depend on the results of earlier ones.</li>
# <li>To edit a code cell, just click on it and type stuff. Remember to run the cell once you've finished editing.</li>
# </ul>
# </p>
# </div>
import requests
import pandas as pd
import altair as alt
import re
import folium
from tqdm.auto import tqdm
from folium.plugins import MarkerCluster
from IPython.display import display, HTML
# ## Get an API key
# [Sign up here](https://data.tepapa.govt.nz/docs/register.html) for your very own API key.
# Insert your API key between the quotes
api_key = 'YOUR API KEY'
# If you don't have an API key yet, you can leave the above blank and we'll pick up a guest token below
print('Your API key is: {}'.format(api_key))
# ## Set some parameters
# +
search_endpoint = 'https://data.tepapa.govt.nz/collection/search'
headers = {
'x-api-key': api_key,
'Accept': 'application/json'
}
if not api_key:
response = requests.get('https://data.tepapa.govt.nz/collection/search')
data = response.json()
guest_token = data['guestToken']
headers['Authorization'] = 'Bearer {}'.format(guest_token)
# -
# Below we set the search parameters. Currently it will return information about all objects in the collection. You can change the `query` value to limit the result set — try replacing the asterix with some keywords.
#
# The `size` parameter sets the number of places to return — so in this case we're getting the 100 places that have the most objects associated with them.
#
# The `production.spatial.href` facet gives us the API url of the place itself, so we can use it to get more information about the place.
post_data = {
'query': '*',
'filters': [{
'field': 'type',
'keyword': 'Object'
}],
'facets': [
{'field': 'production.spatial.href',
'size': 100}
]
}
# ## Get some data
# Make the API request
response = requests.post(search_endpoint, json=post_data, headers=headers)
data = response.json()
# Convert the facets data to a dataframe and do some cleaning up
# We end up with two columns -- one with the place url, and the other with the number of objects associated with that place
places_df = pd.DataFrame(list(data['facets']['production.spatial.href'].items()))
places_df.columns = ['place_id', 'count']
places_df.head()
# ## Add more information about each place
#
# Using the place url we'll get the full record for each place. We'll then save the name of the place, its geospatial coordinates (if any), and its ISO country code (if any) to the dataframe.
# +
tqdm.pandas()
def find_country_code(place):
code = None
if 'alternativeTerms' in place:
for term in place['alternativeTerms']:
try:
if term[:3] == 'ISO':
code = term[3:]
except TypeError:
pass
return code
def add_place_info(place_id):
response = requests.get(place_id, headers=headers)
place_data = response.json()
code = find_country_code(place_data)
if 'geoLocation' in place_data:
lat = place_data['geoLocation']['lat']
lon = place_data['geoLocation']['lon']
else:
lat = None
lon = None
return pd.Series([place_data['title'], lat, lon, code])
places_df[['title', 'lat', 'lon', 'isocode']] = places_df['place_id'].progress_apply(add_place_info)
places_df.head()
# -
# ## Make a map
# +
import html
m = folium.Map(
location=[10, 10],
zoom_start=1.5
)
# We'll cluster the markers for better readability
marker_cluster = MarkerCluster().add_to(m)
for index, row in places_df.dropna(subset=['lat', 'lon']).iterrows():
# We can easily change the API url to a web url and use it to link the map to the Te Papa collection web site
web_url = row['place_id'].replace('/collection/', '/').replace('data', 'collections')
popup = '<b><a target="_blank" href="{}">{}</a></b><br>{} objects'.format(web_url, html.escape(row['title']), row['count'])
folium.Marker([row['lat'], row['lon']], popup=popup).add_to(marker_cluster)
m
# -
# ## Make another map
#
# Let's try and make the **number** of objects created in each place more obvious.
# +
import html
m = folium.Map(
location=[10, 10],
zoom_start=1.5
)
for index, row in places_df.dropna(subset=['lat', 'lon']).iterrows():
popup = '<b>{}</b><br>{} objects'.format(html.escape(row['title']), row['count'])
folium.Circle([row['lat'], row['lon']], radius=row['count']*5, popup=popup, color='#de2d26', fill=True).add_to(m)
m
# -
# ## What's missing?
#
# Remember that we're not seeing **all** the places where objects were created. First of all the facet `size` parameter limited out results to the top 100 places. Trying changing it to see what happens.
#
# Even amongst the top 100, not every place had geospatial coordinates attached to it. So not everything is on the map. Let's create a list of places without coordinates.
places_df.loc[places_df['lat'].isnull()]
# ----
#
# Created by [<NAME>](https://timsherratt.org/) for the [GLAM Workbench](https://glam-workbench.net/). Support this project by becoming a [GitHub sponsor](https://github.com/sponsors/wragge?o=esb).
|
Mapping-Te-Papa-collections.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import seed, uniform, randn
from scipy.stats import norm
def ker_reg(x,X,t,sigma):
pdf_vals = norm.pdf(x - X.reshape(10,1), loc = 0, scale = sigma).T
pdf_vals = pdf_vals/(pdf_vals.sum(axis = 1,keepdims=True))
exp= (pdf_vals * t.reshape(1,10)).sum(axis = 1)
CVar = np.sqrt(sigma **2 + (pdf_vals * t.reshape(1,10) **2).sum(axis = 1) - exp **2)
return exp, CVar
seed(1500)
X = np.linspace(0, 1, 10) + uniform(size=10) * 0.1
t = np.sin(2 * np.pi * X) + randn(10) * 0.4
target = np.sin(2 * np.pi * X)
plt.scatter(X,t)
plt.show()
x = np.linspace(0,1,100)
sigmas = [0.01, 0.02, 0.04, 0.06, 0.08 , 0.1]
fig, axes = plt.subplots(3,2, figsize=(12,15))
for sigma, axis in zip(sigmas, axes.ravel()):
exp,CVar = ker_reg(x,X,t,sigma)
axis.scatter(X,t)
axis.plot(x,exp,color='red')
axis.fill_between(x,exp - 2*CVar, exp + 2*CVar, alpha = 0.2, color='red')
axis.plot(x,np.sin(2*np.pi*x), color="green")
axis.grid(alpha=0.4)
axis.set_title("Sigma = " f"{sigma}")
axis.set_xlim(0, 1);
plt.suptitle("Kernel Regression", fontsize = 20)
|
tutorial5/ker_Reg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import csv
import os
import json
import codecs
import re
import pandas as pd
import xlrd
posts_data = pd.read_excel('topReactedTweets.xlsx')
posts_data.head()
len(posts_data)
reactions = list(posts_data['reaction'])
# +
import numpy as np
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
#import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# -
np.median(np.array(reactions))
data = posts_data[posts_data['reaction'] > 62].copy()
data.tail()
data['text'].fillna('',inplace = True)
txt_lst = list(data['text'])
for i in range(len(txt_lst)):
try:
for j in range(len(txt_lst[i])):
if (ord(txt_lst[i][j]) >127):
txt_lst[i] = txt_lst[i].replace(txt_lst[i][j],'')
except:
continue
txt_lst
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
# +
new_data = []
for i in range (len(txt_lst)):
clean_tweet = re.match('(.*?)http.*?\s?(.*?)', txt_lst[i])
if clean_tweet:
new_tweet = ''+clean_tweet.group(1) + clean_tweet.group(2)
new_data.append(new_tweet)
else:
new_data.append(txt_lst[i])
txt_lst = new_data
# +
txt_lst = [re.sub('\S*@\S*\s?', '', sent) for sent in txt_lst]
# Remove new line characters
txt_lst = [re.sub('\s+', ' ', sent) for sent in txt_lst]
# Remove distracting single quotes
txt_lst = [re.sub("\'", "", sent) for sent in txt_lst]
pprint(txt_lst[:1])
# +
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(txt_lst))
print(data_words[:1])
# +
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
print(trigram_mod[bigram_mod[data_words[0]]])
# +
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
# +
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
# -
data_lemmatized = []
with open('C:\\Users\\16101106\\Desktop\\student2\\data_lem_reaction.txt', "r") as output:
data_lemmatized.append(output.read())
data_lemmatized = data_lemmatized[0].split('\n')
data_lemmatized = [data_lemmatized[i].split(',') for i in range(len(data_lemmatized))]
data_lemmatized
# +
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# -
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# +
# Compute Perplexity
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# -
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
vis
import os
# +
os.environ['MALLET_HOME'] = 'C:\\mallet'
mallet_path = 'C:\\mallet\\bin\\mallet'
ldamallet = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=20, id2word=id2word)
# +
# Show Topics
pprint(ldamallet.show_topics(formatted=False))
# Compute Coherence Score
coherence_model_ldamallet = CoherenceModel(model=ldamallet, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_ldamallet = coherence_model_ldamallet.get_coherence()
print('\nCoherence Score: ', coherence_ldamallet)
# -
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=2, limit=40, step=6)
# Show graph
limit=40; start=2; step=6;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
# Select the model and print the topics
optimal_model = model_list[1]
model_topics = optimal_model.show_topics(formatted=False)
pprint(optimal_model.print_topics(num_words=10))
|
student2/student2/reaction_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
dates = ["2019-01-19","2019-01-20","2019-01-21","2019-01-22","2019-01-23"]
# +
def f(row):
if (row['honeypot'] == 1 or row['blacklist'] == 1) and row['whitelist'] != 1 :
val = 1
elif row['whitelist'] == 1 and (row['honeypot'] != 1 and row['blacklist'] != 1):
val = 0
else:
val = -1
return val
#dir is not keyword
def makemydir(whatever):
try:
os.makedirs(whatever)
except OSError:
pass
# let exception propagate if we just can't
# cd into the specified directory
os.chdir(whatever)
# -
def labeller(dates):
import pandas as pd
import os
import glob
import time
for date in dates:
file_list = []
file_list_lab = []
print(date)
input_dir = "/scratch/rk9cx/conn_log_labelled_runtime/"+date+"/"
for file in glob.glob(os.path.join(input_dir, 'anon.conn_tcp.*.log')):
file_list.append(file.split(date+"/")[1])
for file in glob.glob(os.path.join(input_dir, 'anon.conn_tcp.*.csv')):
file_list_lab.append(file.split(date+"/")[1])
makemydir("/scratch/rk9cx/conn_log_labelled_runtime/labelled_" + date)
output_dir = "/scratch/rk9cx/conn_log_labelled_runtime/labelled_"+date+"/"
for file in file_list:
print("partition")
start = time.time()
labelled_logs = pd.read_csv(input_dir+file.strip(".log")+"_LABELS.csv",
usecols=[2,3,4,5])
labelled_logs.columns = ["src_ip_ext","blacklist","honeypot","whitelist"]
logs = pd.read_csv(input_dir+file, skiprows=8,
usecols=[0,2,3,4,5,6,7,8,9,10,11,12,14],
header = None, delimiter ="\t")
logs = logs.rename({0:'ts', 2:"src_ip", 3: "src_port", 4:'dest_ip',
5:'dest_port', 6:'duration', 7:'src_bytes',
8:'dest_bytes', 9:'conn_state',10:'history',
11:'src_pkts', 12:'dest_pkts',
14:'local'}, axis = 'columns')
print(logs.shape, labelled_logs.shape)
merged_logs = pd.concat([logs, labelled_logs],axis=1, sort=False)
del logs
del labelled_logs
merged_logs["label"] = merged_logs.apply(f, axis=1)
merged_logs = merged_logs[merged_logs.label != -1]
merged_logs.to_csv(output_dir + file + "finale.csv", index = False)
stop = time.time()
print("Complete. Time elapsed: "+ str(stop - start))
del merged_logs
labeller(dates)
|
Rakesh/Conn Logs/Labelling_function.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# import libraries
import numpy as np
import pandas as pd
import xgboost as xgb
import warnings
warnings.filterwarnings('ignore')
fd = pd.read_csv('this_is_the_one.csv', header=0)
fd = fd.drop('Unnamed: 0', 1)
fd = fd.drop('Unnamed: 0.1', 1)
X = fd.drop(['address','logerror'], 1)
y = fd['logerror']
0.06850220739022277 / ((y.max()) + abs(y.min()))
# +
# Dummy classifier
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import train_test_split
# split
X_train, X_test, y_train, y_test = train_test_split (
X, y, test_size=0.25, random_state=42
)
# build
dummy_regressor = DummyRegressor(strategy="mean")
# train
dummy_regressor.fit(X_train, y_train)
# +
# score - mean absolute error (MAE)
from sklearn import metrics
y_pred = dummy_regressor.predict(X_test)
mae = metrics.mean_absolute_error(y_test, y_pred)
print(f'Mean Absolute Error: {mae}')
# +
# linear regression
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_absolute_error
# MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
X_train_scaled = scaler.fit_transform(X_train)
# train
reg = LinearRegression().fit(X_train_scaled, y_train)
X_test_scaled = scaler.transform(X_test)
y_pred = reg.predict(X_test_scaled)
# score
mae = metrics.mean_absolute_error(y_test, y_pred)
print(f'Mean Absolute Error: {mae}')
# +
# xgboost
import xgboost as xgb
# # convert data set into optimized data structure Dmatrix
# data_matrix = xgb.DMatrix(data=X, label=y)
# build
xg_reg = xgb.XGBRegressor(objective='reg:squarederror', colsample_bytree=0.3, learning_rate=0.1,
max_depth=6, alpha=10, n_estimators=100)
# train
xg_reg.fit(X_train_scaled, y_train)
# score
y_pred = xg_reg.predict(X_test_scaled)
mae = metrics.mean_absolute_error(y_test, y_pred)
print(f'Mean Absolute Error: {mae}')
# +
#Assume logerror is ranging from -4.6 to 4.7 and the error is 0.069, it means on average your
#prediction is 0.069 different from ground truth.
#For example, there is a house, the ground truth is 4.5 logerror for it, the model can
#predict 4.44 or 4.56. Of course, this is the average performance, the model can be very good on
#this data point, e.g., producing perfect prediction or be very bad at this data point, make much
#more error than 0.06.
# -
# ### logerror = Log(Zestimate) - Log(Sales price)
# (0.068718758944467) = log(x) - log(3050000)
# x = 3266960
#
# 3266960 - 3050000
# x = 216960
#8611 Bluffdale Dr, Sun Valley, CA 91352
x = 1071130
x = 71130
0.07113
|
homees/Modeling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ADVANCED TEXT MINING
#
# - 본 자료는 텍스트 마이닝을 활용한 연구 및 강의를 위한 목적으로 제작되었습니다.
# - 본 자료를 강의 목적으로 활용하고자 하시는 경우 꼭 아래 메일주소로 연락주세요.
# - 본 자료에 대한 허가되지 않은 배포를 금지합니다.
# - 강의, 저작권, 출판, 특허, 공동저자에 관련해서는 문의 바랍니다.
# - **Contact : ADMIN(<EMAIL>)**
#
# ---
# ## WEEK 02-2. Python 자료구조 이해하기
# - 텍스트 데이터를 다루기 위한 Python 자료구조에 대해 다룹니다.
#
# ---
# ### 1. 리스트(LIST) 자료구조 이해하기
#
# ---
# #### 1.1. 리스트(LIST): 값 또는 자료구조를 저장할 수 있는 구조를 선언합니다.
#
# ---
# 1) 리스트를 생성합니다.
new_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(new_list)
# 2) 리스트의 마지막 원소 뒤에 새로운 원소를 추가합니다.
new_list.append(100)
print(new_list)
# 3) 더하기 연산자를 활용해 두 리스트를 결합합니다.
new_list = new_list + [101, 102]
print(new_list)
# 4-1) 리스트에 존재하는 특정 원소 중 일치하는 가장 앞의 원소를 삭제합니다.
new_list.remove(3)
print(new_list)
# 4-2) 리스트에 존재하는 N 번째 원소를 삭제합니다.
del new_list[3]
print(new_list)
# 5) 리스트에 존재하는 N 번째 원소의 값을 변경합니다.
new_list[0] = 105
print(new_list)
# 6) 리스트에 존재하는 모든 원소를 오름차순으로 정렬합니다.
new_list.sort()
#new_list.sort(reverse=False)
print(new_list)
# 7) 리스트에 존재하는 모든 원소를 내림차순으로 정렬합니다.
new_list.sort(reverse=True)
print(new_list)
# 8) 리스트에 존재하는 모든 원소의 순서를 거꾸로 변경합니다.
new_list.reverse()
print(new_list)
# 9) 리스트에 존재하는 모든 원소의 개수를 불러옵니다.
length = len(new_list)
print(new_list)
# 10-1) 리스트에 특정 원소에 존재하는지 여부를 in 연산자를 통해 확인합니다.
print(100 in new_list)
# 10-2) 리스트에 특정 원소에 존재하지 않는지 여부를 not in 연산자를 통해 확인합니다.
print(100 not in new_list)
# #### 1.2. 리스트(LIST) 인덱싱: 리스트에 존재하는 특정 원소를 불러옵니다.
#
# ---
new_list = [0, 1, 2, 3, 4, 5, 6, 7, "hjvjg", 9]
# 1) 리스트에 존재하는 N 번째 원소를 불러옵니다.
print("0번째 원소 :", new_list[0])
print("1번째 원소 :", new_list[1])
print("4번째 원소 :", new_list[4])
# 2) 리스트에 존재하는 N번째 부터 M-1번째 원소를 리스트 형식으로 불러옵니다.
print("0~3번째 원소 :", new_list[0:3])
print("4~9번째 원소 :", new_list[4:9])
print("2~3번째 원소 :", new_list[2:3])
# 3) 리스트에 존재하는 N번째 부터 모든 원소를 리스트 형식으로 불러옵니다.
print("3번째 부터 모든 원소 :", new_list[3:])
print("5번째 부터 모든 원소 :", new_list[5:])
print("9번째 부터 모든 원소 :", new_list[9:])
# 4) 리스트에 존재하는 N번째 이전의 모든 원소를 리스트 형식으로 불러옵니다.
print("1번째 이전의 모든 원소 :", new_list[:1])
print("7번째 이전의 모든 원소 :", new_list[:7])
print("9번째 이전의 모든 원소 :", new_list[:9])
# 5) 리스트 인덱싱에 사용되는 정수 N의 부호가 음수인 경우, 마지막 원소부터 |N|-1번째 원소를 의미합니다.
print("끝에서 |-1|-1번째 이전의 모든 원소 :", new_list[:-1])
print("끝에서 |-1|-1번째 부터 모든 원소 :", new_list[-1:])
print("끝에서 |-2|-1번째 이전의 모든 원소 :", new_list[:-2])
print("끝에서 |-2|-1번째 부터 모든 원소 :", new_list[-2:])
# #### 1.3. 다차원 리스트(LIST): 리스트의 원소에 다양한 값 또는 자료구조를 저장할 수 있습니다.
#
# ---
# 1-1) 리스트의 원소에는 유형(TYPE)의 값 또는 자료구조를 섞어서 저장할 수 있습니다.
new_list = ["텍스트", 0, 1.9, [1, 2, 3, 4], {"서울": 1, "부산": 2, "대구": 3}]
print(new_list)
# 1-2) 리스트의 각 원소의 유형(TYPE)을 type(변수) 함수를 활용해 확인합니다.
print("Type of new_list[0] :", type(new_list[0]))
print("Type of new_list[1] :", type(new_list[1]))
print("Type of new_list[2] :", type(new_list[2]))
print("Type of new_list[3] :", type(new_list[3]))
print("Type of new_list[4] :", type(new_list[4]))
# 2) 리스트 원소에 리스트를 여러개 추가하여 다차원 리스트(NxM)를 생성할 수 있습니다.
new_list = [[0, 1, 2], [2, 3, 7], [9, 6, 8], [4, 5, 1]]
print("new_list :", new_list)
print("new_list[0] :", new_list[0])
print("new_list[1] :", new_list[1])
print("new_list[2] :", new_list[2])
print("new_list[3] :", new_list[3])
# 3-1) 다차원 리스트(NxM)를 정렬하는 경우 기본적으로 각 리스트의 첫번째 원소를 기준으로 정렬합니다.
new_list.sort()
print("new_list :", new_list)
print("new_list[0] :", new_list[0])
print("new_list[1] :", new_list[1])
print("new_list[2] :", new_list[2])
print("new_list[3] :", new_list[3])
# 3-2) 다차원 리스트(NxM)를 각 리스트의 N 번째 원소를 기준으로 정렬합니다.
new_list.sort(key=lambda elem: elem[2])
print("new_list :", new_list)
print("new_list[0] :", new_list[0])
print("new_list[1] :", new_list[1])
print("new_list[2] :", new_list[2])
print("new_list[3] :", new_list[3])
# ### 2. 딕셔너리(DICTIONARY) 자료구조 이해하기
#
# ---
# #### 2.1. 딕셔너리(DICTIONARY): 값 또는 자료구조를 저장할 수 있는 구조를 선언합니다.
#
# ---
# 1) 딕셔너리를 생성합니다.
new_dict = {"마케팅팀": 98, "개발팀": 78, "데이터분석팀": 83, "운영팀": 33}
print(new_dict)
# 2) 딕셔너리의 각 원소는 KEY:VALUE 쌍의 구조를 가지며, KEY 값에 대응되는 VALUE를 불러옵니다.
print(new_dict["마케팅팀"])
# 3-1) 딕셔너리에 새로운 KEY:VALUE 쌍의 원소를 추가합니다.
new_dict["미화팀"] = 55
print(new_dict)
# 3-2) 딕셔너리에 저장된 각 원소의 KEY 값은 유일해야하기 때문에, 중복된 KEY 값이 추가되는 경우 VALUE는 덮어쓰기 됩니다.
new_dict["데이터분석팀"] = 100
print(new_dict)
# 4) 딕셔너리에 다양한 유형(TYPE)의 값 또는 자료구조를 VALUE로 사용할 수 있습니다.
new_dict["데이터분석팀"] = {"등급": "A"}
new_dict["운영팀"] = ["A"]
new_dict["개발팀"] = "재평가"
new_dict[0] = "오타"
print(new_dict)
# #### 2.2. 딕셔너리(DICTIONARY) 인덱싱: 딕셔너리에 존재하는 원소를 리스트 형태로 불러옵니다.
#
# ---
# 1-1) 다양한 함수를 활용해 딕셔너리를 인덱싱 가능한 구조로 불러옵니다.
new_dict = {"마케팅팀": 98, "개발팀": 78, "데이터분석팀": 83, "운영팀": 33}
print("KEY List of new_dict :", new_dict.keys())
print("VALUE List of new_dict :", new_dict.values())
print("(KEY, VALUE) List of new_dict :", new_dict.items())
for i, j in new_dict.items():
print(i, j)
# 1-2) 불러온 자료구조를 실제 리스트 자료구조로 변환합니다.
print("KEY List of new_dict :", list(new_dict.keys()))
print("VALUE List of new_dict :", list(new_dict.values()))
print("(KEY, VALUE) List of new_dict :", list(new_dict.items()))
|
practice-note/week_02/W02-2_advanced-text-mining_python-data-structure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
#piksel 0-1 arası değer alıyor
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
#çizdirelim bakalım
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
#modeli oluşturalım
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#Yapısına bakalım nasıl olmuş
model.summary()
#maxpooling için katman ekleyelim
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
#Yapısına bakalım
model.summary()
#modeli egitmeye başlayalım
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
#Doğruluk oranını çizdirip bakalım
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
|
Machine_Learning/Conv_neu_network/Conv_network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cocolleen/OOP-1-2/blob/main/Prelim.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9seb4iNogd1r"
#
# + colab={"base_uri": "https://localhost:8080/"} id="l9uUaXUqggkq" outputId="ea1f3c18-996c-4c23-d48e-ecf1b9579ec2"
class Student:
def __init__(self, Name, Student_No, Age, School, Course):
self.Name = Name
self.Student_No = Student_No
self.Age = Age
self.School = School
self.Course = Course
def Info(self):
print("\n", "Hi, I am", self.Name, "and my student number is", self.Student_No,
"\n", "I am currently", self.Age, "\n",
"Studying at", self.School, "taking", self.Course)
Myself = Student("<NAME>", "202102070", "19 years old", "Cavite State Univeristy - Main Campus", "Bachelor of Science in Computer Engineering")
Myself.Info()
|
Prelim.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
mySet = {1,2,3}
print(mySet)
print(type(mySet))
mySet = {1,2,3,1,4}
print(mySet)
mySet = set([1,2,3,4,1,5]) #Set Function will create a set from list
print(mySet)
mySet = set()
print(type(mySet))
mySet = {'a', 56}
mySet.add(5)
print(mySet)
mySet.update([2,'r',12])
print(mySet)
mySet.update([12,'one'],{4,1,5})
print(mySet)
mySet = {11,12,13,14,15}
print(mySet)
mySet.remove(15)
print(mySet)
mySet.discard(14)
print(mySet)
mySet.discard(34)
print(mySet)
mySet = {'abc', 12, 'efg', 1,11}
mySet.pop()
print(mySet)
mySet.pop()
print(mySet)
mySet.clear()
print(mySet)
mySet1 = {1,2,3,4,5}
mySet2 = {3,4,5,6,7}
print(mySet1 | mySet2)
print(mySet1.union(mySet2))
print(mySet1 & mySet2)
print(mySet1.intersection(mySet2))
print(mySet2 - mySet1)
print(mySet1.difference(mySet2))
print(mySet1^mySet2)
print(mySet1.symmetric_difference(mySet2))
a = {1,2,3,4,5,6}
b = {2,4}
print("is set 'a' subset of set 'b' ?", a.issubset(b))
print("is set 'b' subset of set 'a' ?", b.issubset(a))
mySet1 = frozenset([1,2,3,4,5])
mySet2 = frozenset([4,5,6,7,8])
mySet1.add(9)
print(mySet1[4])
print(mySet1 | mySet2)
print(mySet1 & mySet2)
print(mySet1.intersection(mySet2))
print(mySet1^mySet2)
print(mySet1.symmetric_difference(mySet2))
|
Deep Data Dive - Set.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import progressbar
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
# %matplotlib inline
filter_dim, filter_dim2 = 11, 1
batch_size = 4
image_dim, result_dim = 96, 86
input_layer, first_layer, second_layer, third_layer, output_layer = 4, 17, 9, 4, 1
learning_rate = .01
epochs = 300
# +
def get_variance(training_target):
'''
returns variance of the target data. used in normalizing the error.
'''
all_pixels = training_target.flatten()
return all_pixels.var()
def normalize_input(train_data, test_data):
'''
normailizing input across each pixel an each channel (i.e. normalize for each input to network).
'''
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def get_epoch(x, y, n):
'''
splits entire data set into an epoch with minibatch of size n. returns a dict with key being the
minibatch number and the value being a length 2 list with the features in first index and
targets in the second.
'''
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
# +
# data input
data_path = 'https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/'
# train data --- 500 images, 96x96 pixels
orig_500 = pd.read_csv('{}orig_500.txt'.format(data_path), header=None, delim_whitespace = True)
recon_500 = pd.read_csv('{}recon_500.txt'.format(data_path), header=None, delim_whitespace = True)
# test data --- 140 images, 96x96 pixels
orig_140 = pd.read_csv('{}orig_140.txt'.format(data_path), header=None, delim_whitespace = True)
recon_140 = pd.read_csv('{}recon_140.txt'.format(data_path), header=None, delim_whitespace = True)
# train target --- 500 images, 86x86 pixels (dimension reduction due no zero padding being used)
ssim_500 = pd.read_csv('{}ssim_500_nogauss.csv'.format(data_path), header=None)
ssim_140 = pd.read_csv('{}ssim_140_nogauss.csv'.format(data_path), header=None)
# train target --- 500 images, 86x86 pixels (dimension reduction due no zero padding being used)
ssim_500_old = pd.read_csv('{}SSIM_500.txt'.format(data_path), header=None, delim_whitespace=True)
ssim_140_old = pd.read_csv('{}SSIM_140.txt'.format(data_path), header=None, delim_whitespace=True)
# +
# getting 4 input channels for train and test --- (orig, recon, orig squared, recon squared)
original_images_train = orig_500.values
original_images_train_sq = orig_500.values**2
reconstructed_images_train = recon_500.values
reconstructed_images_train_sq = recon_500.values**2
original_images_test = orig_140.values
original_images_test_sq = orig_140.values**2
reconstructed_images_test = recon_140.values
reconstructed_images_test_sq = recon_140.values**2
# stack inputs
training_input = np.dstack((original_images_train, reconstructed_images_train, original_images_train_sq, reconstructed_images_train_sq))
testing_input = np.dstack((original_images_test, reconstructed_images_test, original_images_test_sq, reconstructed_images_test_sq))
# normalize inputs
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
# target values
training_target = ssim_500.values
testing_target = ssim_140.values
# target values 2
training_target2 = ssim_500_old.values
testing_target2 = ssim_140_old.values
# +
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping features to (num images, 96x96, 4 channels)
train_features = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,input_layer])
test_features = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,input_layer])
# reshaping target to --- (num images, 86x86, 1)
train_target = np.reshape(training_target, [train_size, result_dim, result_dim, output_layer])
test_target = np.reshape(testing_target, [test_size, result_dim, result_dim, output_layer])
# reshaping target2 to --- (num images, 96x96, 1)
train_target2 = np.reshape(training_target2, [train_size, image_dim, image_dim, output_layer])
test_target2 = np.reshape(testing_target2, [test_size, image_dim, image_dim, output_layer])
# inverse
train_features = -1*train_features
test_features = -1*test_features
# +
f, axarr = plt.subplots(nrows=3,ncols=6, figsize=(18,9))
for i in range(3):
axarr[i,0].imshow(train_features[i,:,:,0], cmap='Greys')
axarr[i,1].imshow(train_features[i,:,:,1], cmap='Greys')
axarr[i,2].imshow(train_features[i,:,:,2], cmap='Greys')
axarr[i,3].imshow(train_features[i,:,:,3], cmap='Greys')
axarr[i,4].imshow(train_target[i,:,:,0], cmap='magma')
axarr[i,5].imshow(train_target2[i,:,:,0], cmap='magma')
axarr[0,0].set_title('original')
axarr[0,1].set_title('reconstructed')
axarr[0,2].set_title('orig squared')
axarr[0,3].set_title('recon squared')
axarr[0,4].set_title('ssim')
axarr[0,5].set_title('ssim2')
for ax_row in axarr:
for ax in ax_row:
ax.set_xticklabels([])
ax.set_yticklabels([])
f.suptitle('training data sample', size=20)
plt.savefig('compare_ssim.png')
plt.show()
|
bin/calculations/ssim/check_ssim.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ALA
# language: python
# name: ala
# ---
# ###################################################################
# #Script Name :
# #Description :
# #Args :
# #Author : <NAME> in R, converted to Python by Nor Raymond
# #Email : <EMAIL>
# ###################################################################
# ### Fail Rate Reports for Pilot
import os
import glob
import pandas as pd
import numpy as np
import yaml
import warnings
from functools import reduce
warnings.filterwarnings("ignore")
# +
# Function to load yaml configuration file
def load_config(config_name):
with open(os.path.join(config_path, config_name), 'r') as file:
config = yaml.safe_load(file)
return config
config_path = "conf/base"
try:
# load yaml catalog configuration file
config = load_config("catalog.yml")
os.chdir(config["project_path"])
root_path = os.getcwd()
except:
os.chdir('..')
# load yaml catalog configuration file
config = load_config("catalog.yml")
os.chdir(config["project_path"])
root_path = os.getcwd()
# import data_processing module
import src.data.data_processing as data_processing
# import data_processing module
import src.data.data_cleaning as data_cleaning
# -
def language_selection(languages):
while True:
try:
language_index = int(input("\nPlease select the number of the Language you are assessing: "))
if language_index < min(languages.index) or language_index > max(languages.index):
print(f"\nYou must enter numbers between {min(languages.index)} - {max(languages.index)}... Please try again")
continue
elif language_index == "":
print("\nYou must enter any numbers")
continue
else:
print(f"\nYou have selected {language_index} for {languages.iloc[language_index, 0]}")
language_selected = languages.iloc[language_index, 0]
break
except ValueError:
print(f"\nYou must enter numerical values only... Please try again")
continue
else:
break
return language_selected
# #### Functions for Language Modification - getting the overall time taken
# +
# function for Language Modification
def get_time_taken(df, language_selected):
# Filter data based on selected language
dfr = df[df['Language'] == language_selected]
# Time Taken by Item
dfr["Time_Taken_Seconds"] = (dfr['_created_at'] - dfr['_started_at']).dt.seconds
# Time Taken Overall
dfr_grouped = dfr.groupby('_worker_id').sum('Time_Taken_Seconds')
dfr_grouped["Time_Taken_Minutes_Overall"] = dfr_grouped["Time_Taken_Seconds"] / 60
dfr_grouped = dfr_grouped.reset_index()
dfr = pd.merge(dfr, dfr_grouped[["Time_Taken_Minutes_Overall", "_worker_id"]], how = 'left', on = '_worker_id')
return dfr
def get_time_taken_all(language_selected, rc, v1, v2):
df_list = [rc, v1, v2]
keys = ["rcR", "v1R", "v2R"]
df_time = {}
for df, key in zip(df_list, keys) :
dfr = get_time_taken(df, language_selected)
df_time[key] = dfr
rcR, v1R, v2R = df_time["rcR"], df_time["v1R"], df_time["v2R"]
return rcR, v1R, v2R
# -
# #### Functions for calculating Fail Rates
# #### REPORT 1 : "Near Exact Match" - v1_actual_correct_by_question
# +
def v1_fail_rate(v1R):
vR_temp = v1R[['Language', 'Fluency', '_worker_id', '_unit_id', 'question_', 'a_domain', 'a_register',
'wordphrase_a', 'b_domain', 'b_register', 'wordphrase_b', 'difficulty', 'Answer', 'Score']]
# first grouping
vR_grouped = vR_temp.groupby(['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a',
'b_domain', 'b_register', 'wordphrase_b', 'difficulty', 'Answer', 'Score'])['_worker_id'].count().reset_index()
vR_grouped = vR_grouped.rename(columns = {"_worker_id" : "Count_of_Test_Takers"})
# second grouping
vR_grouped['Total_Test_Takers'] = vR_grouped.groupby(['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a',
'b_domain', 'b_register', 'wordphrase_b', 'difficulty'])['Count_of_Test_Takers'].transform('sum')
vR_grouped['Fail_Rate'] = round((vR_grouped['Count_of_Test_Takers'] / vR_grouped['Total_Test_Takers']), 2)
# filter Score 0
vR_grouped = vR_grouped[vR_grouped['Score'] == 0]
# sort values by Market and Fail_rate descending
vR_grouped = vR_grouped.sort_values(['Fluency', 'Fail_Rate'], ascending=[True, False])
vR_fail_rates = vR_grouped.reset_index(drop=True) #re-order df index
return vR_fail_rates
def generate_report_1(v1R):
v1_actual_correct_by_question = v1_fail_rate(v1R)
return v1_actual_correct_by_question
# -
# #### REPORT 2 : "Close Match" - v2_fail_rates
# +
def v2_fail_rate(v2R):
vR_temp = v2R[['Language', 'Fluency', '_worker_id', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty', 'Answers', 'Score']]
# first grouping
vR_grouped = vR_temp.groupby(['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty', 'Answers', 'Score'])['_worker_id'].count().reset_index()
vR_grouped = vR_grouped.rename(columns = {"_worker_id" : "Count_of_Test_Takers"})
# second grouping
vR_grouped['Total_Test_Takers'] = vR_grouped.groupby(['Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a',
'b_domain', 'b_register', 'wordphrase_b', 'difficulty'])['Count_of_Test_Takers'].transform('sum')
vR_grouped['Overall_Fail_Rate'] = round((vR_grouped['Count_of_Test_Takers'] / vR_grouped['Total_Test_Takers']), 2)
# filter Score 0
vR_grouped = vR_grouped[vR_grouped['Score'] == 0]
# sort values by Market and _unit_id
vR_grouped = vR_grouped.sort_values(['Fluency', '_unit_id'], ascending = [True, True])
# drop Score column
vR_grouped = vR_grouped.drop('Score', axis = 1)
vR_fail_rates = vR_grouped.reset_index(drop=True) #re-order df index
return vR_fail_rates
def v2_fail_rate_2(v2R):
vR_temp = v2R[['Language', 'Fluency', '_worker_id', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty', 'rater_answer', 'Answers', 'Score']]
# first grouping
vR_grouped = vR_temp.groupby(['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty', 'rater_answer', 'Answers', 'Score'])['_worker_id'].count().reset_index()
vR_grouped = vR_grouped.rename(columns = {"_worker_id" : "Count_of_Test_Takers"})
# second grouping
vR_grouped['Total_Test_Takers'] = vR_grouped.groupby(['Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty'])['Count_of_Test_Takers'].transform('sum')
vR_grouped['Rate'] = round((vR_grouped['Count_of_Test_Takers'] / vR_grouped['Total_Test_Takers']), 2)
# filter Score 0
vR_grouped = vR_grouped[vR_grouped['Score'] == 0]
# sort values by Market and _unit_id
vR_grouped = vR_grouped.sort_values(['Fluency', '_unit_id', 'Score', 'Rate'], ascending = [True, True, True, False])
# drop Score columns
vR_grouped = vR_grouped.drop(['Score', 'Count_of_Test_Takers', 'Total_Test_Takers'], axis = 1)
vR_fail_rates = vR_grouped.reset_index(drop=True) #re-order df index
vR_fail_rates = pd.pivot_table(vR_fail_rates,
index=['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a', 'b_domain',
'b_register', 'wordphrase_b', 'difficulty', 'Answers'],
values='Rate', columns=['rater_answer']).reset_index()
vR_fail_rates.columns.name = None # remove name for columns
# remove duplicate rows in the dataframe
vR_fail_rates = vR_fail_rates.drop_duplicates()
return vR_fail_rates
def merge_v2_fail_rates(v2_actual_correct_by_question, v2_actual_correct_by_question_with_answer):
v2_fail_rates = pd.merge(v2_actual_correct_by_question_with_answer, v2_actual_correct_by_question, how = 'left',
on = ["Language", "Fluency", "_unit_id", "question_", "a_domain", "a_register", "wordphrase_a", "b_domain",
"b_register", "wordphrase_b", "difficulty", "Answers"])
# v2_fail_rates = v2_fail_rates[['Language', 'Fluency', '_unit_id', 'question_', 'a_domain', 'a_register', 'wordphrase_a',
# 'b_domain', 'b_register', 'wordphrase_b', 'difficulty', 'Count_of_Test_Takers', 'Total_Test_Takers',
# 'Overall_Fail_Rate', 'Answers', 'a_and_b_are_not_related', 'a_and_b_are_related', 'a_and_b_have_the_same_meaning',
# 'a_is_more_specific_than_b', 'b_is_more_specific_than_a']]
return v2_fail_rates
def generate_report_2(v2R):
v2_actual_correct_by_question = v2_fail_rate(v2R)
v2_actual_correct_by_question_with_answer = v2_fail_rate_2(v2R)
v2_fail_rates = merge_v2_fail_rates(v2_actual_correct_by_question, v2_actual_correct_by_question_with_answer)
return v2_fail_rates
# -
# #### REPORT 3 : "Reading Comprehension" : rc_question_skill_pass_rate
# +
def rc_fail_rate(rcR):
vR_temp = rcR[['Language', '_worker_id', '_country', 'Fluency', 'Time_Taken_Seconds', '_unit_id', 'title', 'test_',
'question_1_difficulty', 'question_1_google_translate_error', 'Question 1 Skill tested',
'question_2_difficulty', 'question_2_google_translate_error', 'Question 2 Skill tested',
'question_3_difficulty', 'question_3_google_translate_error', 'Question 3 Skill tested',
'question_4_difficulty', 'question_4_google_translate_error', 'Question 4 Skill tested',
'register', 'topic', 'text_type', 'complexity', 'familiarity',
'question_no_1', 'question_no_2', 'question_no_3', 'question_no_4',
'Answer_no_1', 'Answer_no_2', 'Answer_no_3', 'Answer_no_4',
'Score']]
# evaluate if Answers are the same as the questions. If either Q or A are empty, return NaN
if vR_temp['question_no_1'].isnull().all() == True or vR_temp['Answer_no_1'].isnull().all() == True:
vR_temp['a1'] = np.nan
else:
vR_temp['a1'] = np.where(vR_temp['question_no_1'] == vR_temp['Answer_no_1'], 1, 0).astype('str')
if vR_temp['question_no_2'].isnull().all() == True or vR_temp['Answer_no_2'].isnull().all() == True:
vR_temp['a2'] = np.nan
else:
vR_temp['a2'] = np.where(vR_temp['question_no_2'] == vR_temp['Answer_no_2'], 1, 0).astype('str')
if vR_temp['question_no_3'].isnull().all() == True or vR_temp['Answer_no_3'].isnull().all() == True:
vR_temp['a3'] = np.nan
else:
vR_temp['a3'] = np.where(vR_temp['question_no_3'] == vR_temp['Answer_no_3'], 1, 0).astype('str')
if vR_temp['question_no_4'].isnull().all() == True or vR_temp['Answer_no_4'].isnull().all() == True:
vR_temp['a4'] = np.nan
else:
vR_temp['a4'] = np.where(vR_temp['question_no_4'] == vR_temp['Answer_no_4'], 1, 0).astype('str')
# Dropping columns
vR_temp = vR_temp.drop(['question_no_1', 'question_no_2', 'question_no_3', 'question_no_4',
'Answer_no_1', 'Answer_no_2', 'Answer_no_3', 'Answer_no_4', 'Score'], axis =1)
# concatenate values from different columns with delimiter ;
vR_temp['Score'] = vR_temp[['a1', 'a2', 'a3', 'a4']].astype('str').agg(';'.join, axis=1)
vR_temp['Question'] = ';'.join(['Question 1', 'Question 2', 'Question 3', 'Question 4'])
vR_temp['Difficulty'] = vR_temp[['question_1_difficulty', 'question_2_difficulty',
'question_3_difficulty', 'question_4_difficulty']].astype('str').agg(';'.join, axis=1)
vR_temp['Google_Translate_Error'] = vR_temp[['question_1_google_translate_error',
'question_2_google_translate_error',
'question_3_google_translate_error',
'question_4_google_translate_error']].astype('str').agg(';'.join, axis=1)
vR_temp['Skill'] = vR_temp[['Question 1 Skill tested', 'Question 2 Skill tested',
'Question 3 Skill tested', 'Question 4 Skill tested']].astype('str').agg(';'.join, axis=1)
# Dropping more columns
vR_temp = vR_temp.drop(['question_1_difficulty', 'question_1_google_translate_error', 'Question 1 Skill tested',
'question_2_difficulty', 'question_2_google_translate_error', 'Question 2 Skill tested',
'question_3_difficulty', 'question_3_google_translate_error', 'Question 3 Skill tested',
'question_4_difficulty', 'question_4_google_translate_error', 'Question 4 Skill tested',
'a1', 'a2', 'a3', 'a4'], axis =1)
# Python explode function to split delimited columns and expand to rows - row_separate in R
vR_temp = vR_temp.set_index(['Language', '_worker_id', '_country', 'Fluency', 'Time_Taken_Seconds',
'_unit_id', 'title', 'test_', 'register', 'topic', 'text_type',
'complexity', 'familiarity']).apply(lambda x: x.str.split(';').explode()).reset_index()
vR_temp[['Score', 'Question', 'Difficulty', 'Google_Translate_Error', 'Skill']] = vR_temp[['Score', 'Question', 'Difficulty',
'Google_Translate_Error', 'Skill']].replace('nan', np.nan)
vR_temp = vR_temp.dropna(subset = ['Score']) # remove rows with NaN values in Score
vR_temp['Score'] = vR_temp['Score'].astype('int') # set Score as integer
rc_answer = vR_temp
return vR_temp
## Melt RC and categorize question choice with letter and question number
def melt_rc_assign(rc_choices, q_list, choice_list):
df=[]
for ql in q_list:
for cl in choice_list:
df_temp_1 = rc_choices[rc_choices['variable'].str.contains('question_' + str(ql))]
df_temp_2 = df_temp_1[df_temp_1['variable'].str.contains('choice_' + str(cl))]
df_temp_2['Question'] = 'Question ' + str(ql)
if cl == 1 :
df_temp_2['Answer'] = 'a'
elif cl == 2 :
df_temp_2['Answer'] = 'b'
elif cl == 3 :
df_temp_2['Answer'] = 'c'
df.append(df_temp_2)
rc_choices = pd.concat(df)
return rc_choices
## Melt RC and categorize question choice with letter and question number
def melt_rc(rcR):
vR_temp = rcR[['Language', '_unit_id', 'title', 'test_',
'question_1_choice_1', 'question_1_choice_2', 'question_1_choice_3',
'question_2_choice_1', 'question_2_choice_2', 'question_2_choice_3',
'question_3_choice_1', 'question_3_choice_2', 'question_3_choice_3',
'question_4_choice_1', 'question_4_choice_2', 'question_4_choice_3']]
# remove duplicate rows in the dataframe
vR_temp = vR_temp.drop_duplicates().reset_index(drop=True)
vR_temp = pd.melt(vR_temp, id_vars=['Language', '_unit_id', 'title', 'test_'])
rc_choices = vR_temp
q_list, choice_list = [1,2,3,4], [1,2,3]
rc_choices = melt_rc_assign(rc_choices, q_list, choice_list)
rc_choices = rc_choices[['Language', '_unit_id', 'title', 'test_', 'Question', 'Answer', 'variable', 'value']]
rc_choices = rc_choices.sort_values(['Language', 'title', 'test_', 'Question', 'Answer'])
actual_answer = rc_choices
rater_answer = rc_choices
return rc_choices, actual_answer, rater_answer
# ## Melt RC into long format with actual answers
def melt_rc_answer_actual(rcR):
vR_temp = rcR[['Language', '_worker_id', '_country', 'Fluency', 'Time_Taken_Seconds', '_unit_id', 'title', 'test_',
'question_1_difficulty', 'question_1_google_translate_error', 'Question 1 Skill tested',
'question_2_difficulty', 'question_2_google_translate_error', 'Question 2 Skill tested',
'question_3_difficulty', 'question_3_google_translate_error', 'Question 3 Skill tested',
'question_4_difficulty', 'question_4_google_translate_error', 'Question 4 Skill tested',
'register', 'topic', 'text_type', 'complexity', 'familiarity',
'question_no_1', 'question_no_2', 'question_no_3', 'question_no_4',
'Answer_no_1', 'Answer_no_2', 'Answer_no_3', 'Answer_no_4',
'Score']]
# evaluate if Answers are the same as the questions. If either Q or A are empty, return NaN
if vR_temp['question_no_1'].isnull().all() == True or vR_temp['Answer_no_1'].isnull().all() == True:
vR_temp['a1'] = np.nan
else:
vR_temp['a1'] = np.where(vR_temp['question_no_1'] == vR_temp['Answer_no_1'], 1, 0).astype('str')
if vR_temp['question_no_2'].isnull().all() == True or vR_temp['Answer_no_2'].isnull().all() == True:
vR_temp['a2'] = np.nan
else:
vR_temp['a2'] = np.where(vR_temp['question_no_2'] == vR_temp['Answer_no_2'], 1, 0).astype('str')
if vR_temp['question_no_3'].isnull().all() == True or vR_temp['Answer_no_3'].isnull().all() == True:
vR_temp['a3'] = np.nan
else:
vR_temp['a3'] = np.where(vR_temp['question_no_3'] == vR_temp['Answer_no_3'], 1, 0).astype('str')
if vR_temp['question_no_4'].isnull().all() == True or vR_temp['Answer_no_4'].isnull().all() == True:
vR_temp['a4'] = np.nan
else:
vR_temp['a4'] = np.where(vR_temp['question_no_4'] == vR_temp['Answer_no_4'], 1, 0).astype('str')
vR_temp = vR_temp.drop('Score', axis = 1)
# concatenate values from different columns with delimiter ;
vR_temp['Score'] = vR_temp[['a1', 'a2', 'a3', 'a4']].astype('str').agg(';'.join, axis=1)
vR_temp['Rater_Answer'] = vR_temp[['question_no_1', 'question_no_2', 'question_no_3', 'question_no_4']].astype('str').agg(';'.join, axis=1)
vR_temp['Actual_Answer'] = vR_temp[['Answer_no_1', 'Answer_no_2', 'Answer_no_3', 'Answer_no_4']].astype('str').agg(';'.join, axis=1)
vR_temp['Question'] = ';'.join(['Question 1', 'Question 2', 'Question 3', 'Question 4'])
vR_temp['Difficulty'] = vR_temp[['question_1_difficulty', 'question_2_difficulty',
'question_3_difficulty', 'question_4_difficulty']].astype('str').agg(';'.join, axis=1)
vR_temp['Google_Translate_Error'] = vR_temp[['question_1_google_translate_error',
'question_2_google_translate_error',
'question_3_google_translate_error',
'question_4_google_translate_error']].astype('str').agg(';'.join, axis=1)
vR_temp['Skill'] = vR_temp[['Question 1 Skill tested', 'Question 2 Skill tested',
'Question 3 Skill tested', 'Question 4 Skill tested']].astype('str').agg(';'.join, axis=1)
vR_temp = vR_temp.drop(['question_1_difficulty', 'question_1_google_translate_error', 'Question 1 Skill tested',
'question_2_difficulty', 'question_2_google_translate_error', 'Question 2 Skill tested',
'question_3_difficulty', 'question_3_google_translate_error', 'Question 3 Skill tested',
'question_4_difficulty', 'question_4_google_translate_error', 'Question 4 Skill tested',
'question_no_1', 'question_no_2', 'question_no_3', 'question_no_4',
'Answer_no_1', 'Answer_no_2', 'Answer_no_3', 'Answer_no_4',
'a1', 'a2', 'a3', 'a4'], axis = 1)
# Python explode function to split delimited columns and expand to rows - row_separate in R
vR_temp = vR_temp.set_index(['Language', '_worker_id', '_country', 'Fluency', 'Time_Taken_Seconds',
'_unit_id', 'title', 'test_', 'register', 'topic', 'text_type',
'complexity', 'familiarity']).apply(lambda x: x.str.split(';').explode()).reset_index()
vR_temp[['Score', 'Rater_Answer', 'Actual_Answer', 'Question', 'Difficulty', 'Google_Translate_Error', 'Skill']] = vR_temp[['Score', 'Rater_Answer',
'Actual_Answer','Question',
'Difficulty',
'Google_Translate_Error',
'Skill']].replace('nan', np.nan)
vR_temp = vR_temp.dropna(subset = ['Score']) # remove rows with NaN values in Score
vR_temp['Score'] = vR_temp['Score'].astype('int') # set Score as integer
rc_answer_actual = vR_temp
return rc_answer_actual
def rc_q_s_pass_rate(rc_answer):
# first grouping
vR_grouped = rc_answer.groupby(['Language', 'Fluency', '_unit_id', 'title', 'test_', 'Score', 'Question', 'Difficulty', 'register', 'Skill'])['_worker_id'].count().reset_index()
vR_grouped = vR_grouped.rename(columns = {"_worker_id" : "Count"})
# second grouping
vR_grouped['Total'] = vR_grouped.groupby(['Language', 'Fluency', '_unit_id', 'title', 'test_', 'Question', 'Difficulty', 'register', 'Skill'])['Count'].transform('sum')
vR_grouped['Fail_Rate'] = round((vR_grouped['Count'] / vR_grouped['Total']), 2)
# filter Score 0
vR_grouped = vR_grouped[vR_grouped['Score'] == 0]
# sort values by Market and _unit_id
vR_grouped = vR_grouped.sort_values(['Fluency', 'Fail_Rate'], ascending = [True, False])
vR_grouped = vR_grouped.reset_index(drop=True) #re-order df index
rc_question_skill_pass_rate = vR_grouped
return rc_question_skill_pass_rate
def generate_report_3(rcR):
rc_answer = rc_fail_rate(rcR)
rc_choices, actual_answer, rater_answer = melt_rc(rcR)
rc_answer_actual = melt_rc_answer_actual(rcR)
rc_question_skill_pass_rate = rc_q_s_pass_rate(rc_answer)
return rc_question_skill_pass_rate
# -
# #### REPORT 4 : "RC with Answers" : rc_question_skill_pass_rate_answer_final
# +
def rc_q_s_pass_rate_answer(rc_answer_actual):
# first grouping
vR_grouped = rc_answer_actual.groupby(['Language', 'Fluency', '_unit_id', 'title', 'test_', 'Actual_Answer', 'Rater_Answer',
'Score', 'Question', 'Difficulty', 'register', 'Skill'])['_worker_id'].count().reset_index()
vR_grouped = vR_grouped.rename(columns = {"_worker_id" : "Count"})
# second grouping
vR_grouped['Total'] = vR_grouped.groupby(['Language', 'Fluency', '_unit_id', 'title', 'test_', 'Question', 'Difficulty', 'register', 'Skill'])['Count'].transform('sum')
vR_grouped['Fail_Rate'] = round((vR_grouped['Count'] / vR_grouped['Total']), 2)
# filter Score 0
vR_grouped = vR_grouped[vR_grouped['Score'] == 0]
# sort values by Market and _unit_id
vR_grouped = vR_grouped.sort_values(['Fluency', '_unit_id', 'Question', 'Fail_Rate'], ascending = [True, True, True, False])
vR_grouped = vR_grouped.reset_index(drop=True) #re-order df index
rc_question_skill_pass_rate_answer = vR_grouped
return rc_question_skill_pass_rate_answer
def join_rc_q_s_pass_rate_answer(rc_question_skill_pass_rate_answer, actual_answer, rater_answer):
first_join = rc_question_skill_pass_rate_answer
first_join = pd.merge(first_join, actual_answer, how = 'left',
left_on = ["Language", "_unit_id", "title" , "test_", "Question", "Actual_Answer"],
right_on = ["Language", "_unit_id", "title" , "test_", "Question", "Answer"])
first_join = first_join.drop('Answer', axis=1)
second_join = pd.merge(first_join, rater_answer, how = 'left',
left_on = ["Language", "_unit_id", "title" , "test_", "Question", "Rater_Answer"],
right_on = ["Language", "_unit_id", "title" , "test_", "Question", "Answer"])
second_join = second_join.drop('Answer', axis=1)
second_join = second_join[['Language', 'Fluency', '_unit_id', 'title', 'test_', 'Difficulty', 'register', 'Skill', 'Question',
'Actual_Answer', 'value_x', 'Rater_Answer', 'value_y', 'Count', 'Total', 'Fail_Rate']]
second_join = second_join.rename(columns = { "Actual_Answer" : "Actual_Answer_Letter",
"value_x" : "Actual_Answer_Text",
"Rater_Answer" : "Rater_Answer_Letter",
"value_y" : "Rater_Answer_Text"})
rc_question_skill_pass_rate_answer_final = second_join
return rc_question_skill_pass_rate_answer_final
def generate_report_4(rcR):
rc_choices, actual_answer, rater_answer = melt_rc(rcR)
rc_answer_actual = melt_rc_answer_actual(rcR)
rc_question_skill_pass_rate_answer = rc_q_s_pass_rate_answer(rc_answer_actual)
rc_question_skill_pass_rate_answer_final = join_rc_q_s_pass_rate_answer(rc_question_skill_pass_rate_answer, actual_answer, rater_answer)
return rc_question_skill_pass_rate_answer_final
# -
# +
def generate_all_fail_rate_reports(rcR, v1R, v2R, rc, v1, v2, run_value):
# Report 1 - Near Exact Match - v1_actual_correct_by_question
v1_actual_correct_by_question = generate_report_1(v1R)
# Report 2 - Close Match - v2_fail_rates
v2_fail_rates = generate_report_2(v2R)
# Report 3 - Reading Comprehension - rc_question_skill_pass_rate
rc_question_skill_pass_rate = generate_report_3(rcR)
# Report 4 - RC with Answers - rc_question_skill_pass_rate_answer_final
rc_question_skill_pass_rate_answer_final = generate_report_4(rcR)
# store all 4 reports into a dictionary set
list_of_datasets = {"Near Exact Match" : v1_actual_correct_by_question,
"Close Match" : v2_fail_rates,
"Reading Comprehension" : rc_question_skill_pass_rate,
"RC with Answers" : rc_question_skill_pass_rate_answer_final}
if run_value == 'Deployment':
# store all 3 summaries into a dictionary set
list_of_summaries = {"deployment_rc" : rc,
"deployment_v1" : v1,
"deployment_v2" : v2}
else:
# store all 3 summaries into a dictionary set
list_of_summaries = { run_value + "_rc" : rc,
run_value + "_v1" : v1,
run_value + "_v2" : v2}
return list_of_datasets, list_of_summaries
def file_check_create(root_path, config, language_selected, run_value, run_value_2):
if run_value == 'Deployment':
run_folder = os.path.join(root_path, config['report']['deliverable'], run_value, language_selected)
if not os.path.exists(run_folder):
os.makedirs(run_folder, exist_ok=True)
folder_tag = 'Deployment Summary'
analysis_folder = os.path.join(root_path, config['report']['analysis'], folder_tag)
if not os.path.exists(analysis_folder):
os.makedirs(analysis_folder, exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'RC')):
os.makedirs(os.path.join(analysis_folder, 'RC'), exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'V1')):
os.makedirs(os.path.join(analysis_folder, 'V1'), exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'V2')):
os.makedirs(os.path.join(analysis_folder, 'V2'), exist_ok=True)
else:
run_folder = os.path.join(root_path, config['report']['deliverable'], run_value, run_value_2, language_selected)
if not os.path.exists(run_folder):
os.makedirs(run_folder, exist_ok=True)
folder_tag = 'Grand Summary'
analysis_folder = os.path.join(root_path, config['report']['analysis'], folder_tag)
if not os.path.exists(analysis_folder):
os.makedirs(analysis_folder, exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'RC')):
os.makedirs(os.path.join(analysis_folder, 'RC'), exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'V1')):
os.makedirs(os.path.join(analysis_folder, 'V1'), exist_ok=True)
if not os.path.exists(os.path.join(analysis_folder, 'V2')):
os.makedirs(os.path.join(analysis_folder, 'V2'), exist_ok=True)
return run_folder, analysis_folder, folder_tag
def write_fail_report_to_excel(run_folder, list_of_datasets, encode=None):
with pd.ExcelWriter(os.path.join(run_folder, 'language_fail_rates.xlsx')) as writer:
for key, value in list_of_datasets.items():
value.to_excel(writer, sheet_name=key, index=False, encoding=encode)
def write_summary_to_csv(analysis_folder, list_of_summaries, encode=None):
folders = ['RC', 'V1', 'V2']
for lists, f in zip(list_of_summaries.items(), folders):
key, value = lists[0], lists[1]
value.to_csv(os.path.join(os.path.join(analysis_folder,f), key + '.csv'), index=False, encoding=encode)
# -
# #### Run all
# +
def main():
print('\nData processing in progress...')
# import data from data_processing module
raters, r1, r2, r3, languages, rc, v1, v2, run_value , run_value_2, survey_selected, survey_files = data_processing.main()
print('Data processing completed.')
print("\n")
print(languages)
# Get input language selection
language_selected = language_selection(languages)
# Get data from language modification processes
rcR, v1R, v2R = get_time_taken_all(language_selected, rc, v1, v2)
print('\nGenerating reports ...')
# Start generating fail rate reports
list_of_datasets, list_of_summaries = generate_all_fail_rate_reports(rcR, v1R, v2R, rc, v1, v2, run_value)
# Check the run type and language and create folders in reports > deliverables
run_folder, analysis_folder, folder_tag = file_check_create(root_path, config, language_selected, run_value, run_value_2)
# Write reports to excel file in run_folder path
write_fail_report_to_excel(run_folder, list_of_datasets, encode=None)
print(f"\n1. Language fail rates report completed and stored in reports > deliverables > {run_value} > {run_value_2} > {language_selected}")
# Write summaries to csv file in analysis_folder path
write_summary_to_csv(analysis_folder, list_of_summaries, encode=None)
print(f"\n2. Summary report completed and stored in analysis > {folder_tag} > RC/V1/V2")
if __name__ == "__main__":
main()
# -
|
notebooks/backup/Version-1.4/4.2-report-item-fail-rates-pilot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Semi-Supervised Architope (SnP 500)
# ---
# - This code Implements Algorithm 3.2 of the "Architopes" paper.
# #### Mode: Code-Testin Parameter(s)
trial_run = True
# ### Meta-parameters
# Test-size Ratio
test_size_ratio = 0.8
min_height = 50
# ### Hyperparameters
#
# Only turn of if running code directly here, typically this script should be run be called by other notebooks.
# load dataset
results_path = "./outputs/models/"
results_tables_path = "./outputs/results/"
raw_data_path_folder = "./inputs/raw/"
data_path_folder = "./inputs/data/"
# ### Import
# Load Packages/Modules
exec(open('Init_Dump.py').read())
# Load Hyper-parameter Grid
exec(open('Grid_Enhanced_Network.py').read())
# Load Helper Function(s)
exec(open('Helper_Functions.py').read())
# Pre-process Data
exec(open('Financial_Data_Preprocessor.py').read())
# Import time separately
import time
# +
# ## TEMP?
# import sys
# import threading
# import queue
# class ExcThread(threading.Thread):
# def __init__(self, bucket):
# threading.Thread.__init__(self)
# self.bucket = bucket
# def run(self):
# try:
# raise Exception('An error occured here.')
# except Exception:
# self.bucket.put(sys.exc_info())
# def main():
# bucket = queue.Queue()
# thread_obj = ExcThread(bucket)
# thread_obj.start()
# while True:
# try:
# exc = bucket.get(block=False)
# except Queue.Empty:
# pass
# else:
# exc_type, exc_obj, exc_trace = exc
# # deal with the exception
# thread_obj.join(0.1)
# if thread_obj.isAlive():
# continue
# else:
# break
# if __name__ == '__main__':
# main()
# -
# #### Pre-Process:
# - Convert Categorical Variables to Dummies
# - Remove Bad Column
# - Perform Training/Test Split
# # Random Lipschitz Partition Builder
#
# We implement the random paritioning method of [Yair Bartal](https://scholar.google.com/citations?user=eCXP24kAAAAJ&hl=en):
# - [On approximating arbitrary metrices by tree metrics](https://dl.acm.org/doi/10.1145/276698.276725)
#
# The algorithm is summarized as follow:
#
# ---
#
# ## Algorithm:
# 1. Sample $\alpha \in [4^{-1},2^{-1}]$ randomly and uniformly,
# 2. Apply a random suffle of the data, (a random bijection $\pi:\{i\}_{i=1}^X \rightarrow \mathbb{X}$),
# 3. For $i = 1,\dots,I$:
# - Set $K_i\triangleq B\left(\pi(i),\alpha \Delta \right) - \bigcup_{j=1}^{i-1} P_j$
#
# 4. Remove empty members of $\left\{K_i\right\}_{i=1}^X$.
#
# **Return**: $\left\{K_i\right\}_{i=1}^{\tilde{X}}$.
#
# For more details on the random-Lipschitz partition of Yair Bartal, see this [well-written blog post](https://nickhar.wordpress.com/2012/03/26/lecture-22-random-partitions-of-metric-spaces/).
# ## Define Random Partition Builder
from scipy.spatial import distance_matrix
# Here we use $\Delta_{in} = Q_{q}\left(\Delta(\mathbb{X})\right)$ where $\Delta(\mathbb{X})$ is the vector of (Euclidean) distances between the given data-points, $q \in (0,1)$ is a hyper-parameter, and $Q$ is the empirical quantile function.
def Random_Lipschitz_Partioner(Min_data_size_percentage,q_in, X_train_in,y_train_in, CV_folds_failsafe, min_size):
#-----------------------#
# Reset Seed Internally #
#-----------------------#
random.seed(2020)
np.random.seed(2020)
#-------------------------------------------#
#-------------------------------------------#
# 1) Sample radius from unifom distribution #
#-------------------------------------------#
#-------------------------------------------#
alpha = np.random.uniform(low=.25,high=.5,size=1)[0]
#-------------------------------------#
#-------------------------------------#
# 2) Apply Random Bijection (Shuffle) #
#-------------------------------------#
#-------------------------------------#
X_train_in_shuffled = X_train_in#.sample(frac=1)
y_train_in_shuffled = y_train_in#.sample(frac=1)
#--------------------#
#--------------------#
# X) Initializations #
#--------------------#
#--------------------#
# Compute-data-driven radius
Delta_X = distance_matrix(X_train_in_shuffled,X_train_in_shuffled)[::,0]
Delta_in = np.quantile(Delta_X,q_in)
# Initialize Random Radius
rand_radius = Delta_in*alpha
# Initialize Data_sizes & ratios
N_tot = X_train_in.shape[0] #<- Total number of data-points in input data-set!
N_radios = np.array([])
N_pool_train_loop = N_tot
# Initialize List of Dataframes
X_internal_train_list = list()
y_internal_train_list = list()
# Initialize Partioned Data-pool
X_internal_train_pool = X_train_in_shuffled
y_internal_train_pool = y_train_in_shuffled
# Initialize counter
part_current_loop = 0
#----------------------------#
#----------------------------#
# 3) Iteratively Build Parts #
#----------------------------#
#----------------------------#
while ((N_pool_train_loop/N_tot > Min_data_size_percentage) or (X_internal_train_pool.empty == False)):
# Extract Current Center
center_loop = X_internal_train_pool.iloc[0]
# Compute Distances
## Training
distances_pool_loop_train = X_internal_train_pool.sub(center_loop)
distances_pool_loop_train = np.array(np.sqrt(np.square(distances_pool_loop_train).sum(axis=1)))
# Evaluate which Distances are less than the given random radius
Part_train_loop = X_internal_train_pool[distances_pool_loop_train<rand_radius]
Part_train_loop_y = y_internal_train_pool[distances_pool_loop_train<rand_radius]
# Remove all data-points which are "too small"
if X_internal_train_pool.shape[0] > max(CV_folds,4):
# Append Current part to list
X_internal_train_list.append(Part_train_loop)
y_internal_train_list.append(Part_train_loop_y)
# Remove current part from pool
X_internal_train_pool = X_internal_train_pool[(np.logical_not(distances_pool_loop_train<rand_radius))]
y_internal_train_pool = y_internal_train_pool[(np.logical_not(distances_pool_loop_train<rand_radius))]
# Update Current size of pool of training data
N_pool_train_loop = X_internal_train_pool.shape[0]
N_radios = np.append(N_radios,(N_pool_train_loop/N_tot))
# Update Counter
part_current_loop = part_current_loop +1
# Update User
print((N_pool_train_loop/N_tot))
# Post processing #
#-----------------#
# Remove Empty Partitions
N_radios = N_radios[N_radios>0]
#-----------------------------------------------------------------#
# Combine parts which are too small to perform CV without an error
#-----------------------------------------------------------------#
# Initialize lists (partitions) with "enough" datums per part
X_internal_train_list_good = list()
y_internal_train_list_good = list()
X_small_parts = list()
y_small_parts = list()
# Initialize first list item test
is_first = True
# Initialize counter
goods_counter = 0
for search_i in range(len(X_internal_train_list)):
number_of_instances_in_part = len(X_internal_train_list[search_i])
if number_of_instances_in_part < max(CV_folds_failsafe,min_size):
# Check if first
if is_first:
# Initialize set of small X_parts
X_small_parts = X_internal_train_list[search_i]
# Initialize set of small y_parts
y_small_parts = y_internal_train_list[search_i]
# Set is_first to false
is_first = False
else:
X_small_parts = X_small_parts.append(X_internal_train_list[search_i])
y_small_parts = np.append(y_small_parts,y_internal_train_list[search_i])
# y_small_parts = y_small_parts.append(y_internal_train_list[search_i])
else:
# Append to current list
X_internal_train_list_good.append(X_internal_train_list[search_i])
y_internal_train_list_good.append(y_internal_train_list[search_i])
# Update goods counter
goods_counter = goods_counter +1
# Append final one to good list
X_internal_train_list_good.append(X_small_parts)
y_internal_train_list_good.append(y_small_parts)
# reset is_first to false (inscase we want to re-run this particular block)
is_first = True
# Set good lists to regular lists
X_internal_train_list = X_internal_train_list_good
y_internal_train_list = y_internal_train_list_good
# Return Value #
#--------------#
return [X_internal_train_list, y_internal_train_list, N_radios]
# # Apply Random Partitioner to the given Dataset
import time
partitioning_time_begin = time.time()
X_parts_list, y_parts_list, N_ratios = Random_Lipschitz_Partioner(Min_data_size_percentage=.1,
q_in=.8,
X_train_in=X_train,
y_train_in=data_y,
CV_folds_failsafe=CV_folds,
min_size = 100)
partitioning_time = time.time() - partitioning_time_begin
print('The_parts_listhe number of parts are: ' + str(len(X_parts_list))+'.')
# #### Building Training Predictions on each part
# - Train locally (on each "naive part")
# - Generate predictions for (full) training and testings sets respectively, to be used in training the classifer and for prediction, respectively.
# - Generate predictions on all of testing-set (will be selected between later using classifier)
# Time-Elapse (Start) for Training on Each Part
Architope_partition_training_begin = time.time()
# Initialize running max for Parallel time
Architope_partitioning_max_time_running = -math.inf # Initialize slowest-time at - infinity to force updating!
# Initialize N_parameter counter for Architope
N_params_Architope = 0
# +
for current_part in range(len(X_parts_list)):
#==============#
# Timer(begin) #
#==============#
current_part_training_time_for_parallel_begin = time.time()
# Initializations #
#-----------------#
# Reload Grid
exec(open('Grid_Enhanced_Network.py').read())
# Modify heights according to optimal (data-driven) rule (with threshold)
current_height = np.ceil(np.array(param_grid_Vanilla_Nets['height'])*N_ratios[current_part])
current_height_threshold = np.repeat(min_height,(current_height.shape[0]))
current_height = np.maximum(current_height,current_height_threshold)
current_height = current_height.astype(int).tolist()
param_grid_Vanilla_Nets['height'] = current_height
# Automatically Fix Input Dimension
param_grid_Vanilla_Nets['input_dim'] = [X_train.shape[1]]
param_grid_Vanilla_Nets['output_dim'] = [1]
# Update User #
#-------------#
print('Status: Current part: ' + str(current_part) + ' out of : '+str(len(X_parts_list)) +' parts.')
print('Heights to iterate over: '+str(current_height))
# Generate Prediction(s) on current Part #
#----------------------------------------#
# Failsafe (number of data-points)
CV_folds_failsafe = min(CV_folds,max(1,(X_train.shape[0]-1)))
# Train Network
y_hat_train_full_loop, y_hat_test_full_loop, N_params_Architope_loop = build_ffNN(n_folds = CV_folds_failsafe,
n_jobs = n_jobs,
n_iter = n_iter,
param_grid_in = param_grid_Vanilla_Nets,
X_train= X_parts_list[current_part],
y_train=y_parts_list[current_part],
X_test_partial=X_train,
X_test=X_test)
# Append predictions to data-frames
## If first prediction we initialize data-frames
if current_part==0:
# Register quality
training_quality = np.array(np.abs(y_hat_train_full_loop-y_train))
training_quality = training_quality.reshape(training_quality.shape[0],1)
# Save Predictions
predictions_train = y_hat_train_full_loop
predictions_train = predictions_train.reshape(predictions_train.shape[0],1)
predictions_test = y_hat_test_full_loop
predictions_test = predictions_test.reshape(predictions_test.shape[0],1)
## If not first prediction we append to already initialized dataframes
else:
# Register Best Scores
#----------------------#
# Write Predictions
# Save Predictions
y_hat_train_loop = y_hat_train_full_loop.reshape(predictions_train.shape[0],1)
predictions_train = np.append(predictions_train,y_hat_train_loop,axis=1)
y_hat_test_loop = y_hat_test_full_loop.reshape(predictions_test.shape[0],1)
predictions_test = np.append(predictions_test,y_hat_test_loop,axis=1)
# Evaluate Errors #
#-----------------#
# Training
prediction_errors = np.abs(y_hat_train_loop.reshape(-1,)-y_train)
training_quality = np.append(training_quality,prediction_errors.reshape(training_quality.shape[0],1),axis=1)
#============#
# Timer(end) #
#============#
current_part_training_time_for_parallel = time.time() - current_part_training_time_for_parallel_begin
Architope_partitioning_max_time_running = max(Architope_partitioning_max_time_running,current_part_training_time_for_parallel)
#============---===============#
# N_parameter Counter (Update) #
#------------===---------------#
N_params_Architope = N_params_Architope + N_params_Architope_loop
# Update User
#-------------#
print(' ')
print(' ')
print(' ')
print('----------------------------------------------------')
print('Feature Generation (Learning Phase): Score Generated')
print('----------------------------------------------------')
print(' ')
print(' ')
print(' ')
# -
# Time-Elapsed Training on Each Part
Architope_partition_training = time.time() - Architope_partition_training_begin
# ---
# ### Train Classifier
# #### Deep Classifier
# Prepare Labels/Classes
# Time-Elapsed Training Deep Classifier
Architope_deep_classifier_training_begin = time.time()
# Initialize Classes Labels
partition_labels_training_integers = np.argmin(training_quality,axis=-1)
partition_labels_training = pd.DataFrame(pd.DataFrame(partition_labels_training_integers) == 0)
# Build Classes
for part_column_i in range(1,(training_quality.shape[1])):
partition_labels_training = pd.concat([partition_labels_training,
(pd.DataFrame(partition_labels_training_integers) == part_column_i)
],axis=1)
# Convert to integers
partition_labels_training = partition_labels_training+0
# Re-Load Grid and Redefine Relevant Input/Output dimensions in dictionary.
# +
# Re-Load Hyper-parameter Grid
exec(open('Grid_Enhanced_Network.py').read())
# Re-Load Helper Function(s)
exec(open('Helper_Functions.py').read())
# Redefine (Dimension-related) Elements of Grid
param_grid_Deep_Classifier['input_dim'] = [X_train.shape[1]]
param_grid_Deep_Classifier['output_dim'] = [partition_labels_training.shape[1]]
# -
# #### Train Deep Classifier
# Train simple deep classifier
predicted_classes_train, predicted_classes_test, N_params_deep_classifier = build_simple_deep_classifier(n_folds = CV_folds,
n_jobs = n_jobs,
n_iter =n_iter,
param_grid_in=param_grid_Deep_Classifier,
X_train = X_train,
y_train = partition_labels_training,
X_test = X_test)
# Time-Elapsed Training Deep Classifier
Architope_deep_classifier_training = time.time() - Architope_deep_classifier_training_begin
# Make Prediction(s)
# Training Set
Architope_prediction_y_train = np.take_along_axis(predictions_train, predicted_classes_train[:,None], axis=1)
# Testing Set
Architope_prediction_y_test = np.take_along_axis(predictions_test, predicted_classes_test[:,None], axis=1)
# #### Write Predictions
# Compute Performance
# +
# Compute Peformance
performance_Architope = reporter(y_train_hat_in=Architope_prediction_y_train,
y_test_hat_in=Architope_prediction_y_test,
y_train_in=y_train,
y_test_in=y_test)
# Write Performance
performance_Architope.to_latex((results_tables_path+"Architopes_full_performance.tex"))
# Update User
print(performance_Architope)
# -
# ---
# ---
# ---
# ### Model Complexity/Efficiency Metrics
# +
# Compute Parameters for composite models #
#-----------------------------------------#
N_params_Architope_full = N_params_Architope + N_params_deep_classifier
# Build AIC-like Metric #
#-----------------------#
AIC_like = 2*(N_params_Architope_full - np.log((performance_Architope['test']['MAE'])))
AIC_like = np.round(AIC_like,3)
Efficiency = np.log(N_params_Architope_full) *(performance_Architope['test']['MAE'])
Efficiency = np.round(Efficiency,3)
# Build Table #
#-------------#
Architope_Model_Complexity_full = pd.DataFrame({'L-time': [Architope_partition_training],
'P-time':[Architope_partitioning_max_time_running],
'N_params_expt': [N_params_Architope_full],
'AIC-like': [AIC_like],
'Eff': [Efficiency]})
# Write Required Training Time(s)
Architope_Model_Complexity_full.to_latex((results_tables_path+"Architope_full_model_complexities.tex"))
#--------------======---------------#
# Display Required Training Time(s) #
#--------------======---------------#
print(Architope_Model_Complexity_full)
# -
# # Summary
print(' ')
print('#===============#')
print('# Model Summary #')
print('#===============#')
print(' ')
print('---------------------------------------------')
print('Model Performance: Semi-Supervised Architope')
print('---------------------------------------------')
print(performance_Architope)
print(' ')
print('--------------------------------------------')
print('Model Complexity: Semi-Supervised Architope')
print('--------------------------------------------')
print(Architope_Model_Complexity_full)
print(' ')
print(' ')
print('😃😃 Have a wonderful day!! 😃😃')
# ---
# # Fin
# ---
|
GET_FFNN_is_ADAM/Architope_Standalone.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pysnooper
class Solution:
@pysnooper.snoop()
def lengthOfLongestSubstring(self, s: str) -> int:
l = len(s)
if l == 0:
return 0
if l == 1:
return 1
start = 0
end = 0
max_len = 1
dict = {}
while end < l:
if start == end:
c = s[end]
if c in dict:
dict[c]+=1
else:
dict[c]=1
end += 1
if end >= l:
break;
c = s[end]
if c in dict:
if dict[c] < 1: # repeat not found
dict[c] += 1
end+=1
else: # repeat found
max_len = end - start if max_len < end - start else max_len
while start <= end:
sc = s[start]
dict[sc] -= 1
start +=1
if sc == c: # find index of repeat
break
else:
dict[c] = 1
end+=1
max_len = end - start if max_len < end - start else max_len # 字符串里面没有重复的情况
return max_len
# -
s = Solution()
s.lengthOfLongestSubstring("abcabcab")
s = Solution()
s.lengthOfLongestSubstring("bbbb")
s=Solution()
s.lengthOfLongestSubstring("au")
a
|
LeetCode3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Spark ML model pipelines on Distributed Deep Neural Nets
#
# This notebook describes how to build machine learning [pipelines with Spark ML](http://spark.apache.org/docs/latest/ml-guide.html) for distributed versions of Keras deep learning models. As data set we use the Otto Product Classification challenge from Kaggle. The reason we chose this data is that it is small and very structured. This way, we can focus more on technical components rather than prepcrocessing intricacies. Also, users with slow hardware or without a full-blown Spark cluster should be able to run this example locally, and still learn a lot about the distributed mode.
#
# Often, the need to distribute computation is not imposed by model training, but rather by building the data pipeline, i.e. ingestion, transformation etc. In training, deep neural networks tend to do fairly well on one or more GPUs on one machine. Most of the time, using gradient descent methods, you will process one batch after another anyway. Even so, it may still be beneficial to use frameworks like Spark to integrate your models with your surrounding infrastructure. On top of that, the convenience provided by Spark ML pipelines can be very valuable (being syntactically very close to what you might know from [```scikit-learn```](http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html)).
#
# **TL;DR:** We will show how to tackle a classification problem using distributed deep neural nets and Spark ML pipelines in an example that is essentially a distributed version of the one found [here](https://github.com/fchollet/keras/blob/master/examples/kaggle_otto_nn.py).
# ## Using this notebook
# As we are going to use elephas, you will need access to a running Spark context to run this notebook. If you don't have it already, install Spark locally by following the [instructions provided here](https://github.com/maxpumperla/elephas/blob/master/README.md). Make sure to also export ```SPARK_HOME``` to your path and start your ipython/jupyter notebook as follows:
# ```
# IPYTHON_OPTS="notebook" ${SPARK_HOME}/bin/pyspark --driver-memory 4G elephas/examples/Spark_ML_Pipeline.ipynb
# ```
# To test your environment, try to print the Spark context (provided as ```sc```), i.e. execute the following cell.
from __future__ import print_function
print(sc)
# ## Otto Product Classification Data
# Training and test data is available [here](https://www.kaggle.com/c/otto-group-product-classification-challenge/data). Go ahead and download the data. Inspecting it, you will see that the provided csv files consist of an id column, 93 integer feature columns. ```train.csv``` has an additional column for labels, which ```test.csv``` is missing. The challenge is to accurately predict test labels. For the rest of this notebook, we will assume data is stored at ```data_path```, which you should modify below as needed.
data_path = "./" # <-- Make sure to adapt this to where your csv files are.
# Loading data is relatively simple, but we have to take care of a few things. First, while you can shuffle rows of an RDD, it is generally not very efficient. But since data in ```train.csv``` is sorted by category, we'll have to shuffle in order to make the model perform well. This is what the function ```shuffle_csv``` below is for. Next, we read in plain text in ```load_data_rdd```, split lines by comma and convert features to float vector type. Also, note that the last column in ```train.csv``` represents the category, which has a ```Class_``` prefix.
# ### Defining Data Frames
#
# Spark has a few core data structures, among them is the ```data frame```, which is a distributed version of the named columnar data structure many will now from either [R](https://stat.ethz.ch/R-manual/R-devel/library/base/html/data.frame.html) or [Pandas](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html). We need a so called ```SQLContext``` and an optional column-to-names mapping to create a data frame from scratch.
# +
from pyspark.sql import SQLContext
from pyspark.ml.linalg import Vectors
import numpy as np
import random
sql_context = SQLContext(sc)
def shuffle_csv(csv_file):
lines = open(csv_file).readlines()
random.shuffle(lines)
open(csv_file, 'w').writelines(lines)
def load_data_frame(csv_file, shuffle=True, train=True):
if shuffle:
shuffle_csv(csv_file)
data = sc.textFile(data_path + csv_file) # This is an RDD, which will later be transformed to a data frame
data = data.filter(lambda x:x.split(',')[0] != 'id').map(lambda line: line.split(','))
if train:
data = data.map(
lambda line: (Vectors.dense(np.asarray(line[1:-1]).astype(np.float32)),
str(line[-1])) )
else:
# Test data gets dummy labels. We need the same structure as in Train data
data = data.map( lambda line: (Vectors.dense(np.asarray(line[1:]).astype(np.float32)),"Class_1") )
return sqlContext.createDataFrame(data, ['features', 'category'])
# -
# Let's load both train and test data and print a few rows of data using the convenient ```show``` method.
# +
train_df = load_data_frame("train.csv")
test_df = load_data_frame("test.csv", shuffle=False, train=False) # No need to shuffle test data
print("Train data frame:")
train_df.show(10)
print("Test data frame (note the dummy category):")
test_df.show(10)
# -
# ## Preprocessing: Defining Transformers
#
# Up until now, we basically just read in raw data. Luckily, ```Spark ML``` has quite a few preprocessing features available, so the only thing we will ever have to do is define transformations of data frames.
#
# To proceed, we will first transform category strings to double values. This is done by a so called ```StringIndexer```. Note that we carry out the actual transformation here already, but that is just for demonstration purposes. All we really need is too define ```string_indexer``` to put it into a pipeline later on.
# +
from pyspark.ml.feature import StringIndexer
string_indexer = StringIndexer(inputCol="category", outputCol="index_category")
fitted_indexer = string_indexer.fit(train_df)
indexed_df = fitted_indexer.transform(train_df)
# -
# Next, it's good practice to normalize the features, which is done with a ```StandardScaler```.
# +
from pyspark.ml.feature import StandardScaler
scaler = StandardScaler(inputCol="features", outputCol="scaled_features", withStd=True, withMean=True)
fitted_scaler = scaler.fit(indexed_df)
scaled_df = fitted_scaler.transform(indexed_df)
# -
print("The result of indexing and scaling. Each transformation adds new columns to the data frame:")
scaled_df.show(10)
# ## Keras Deep Learning model
#
# Now that we have a data frame with processed features and labels, let's define a deep neural net that we can use to address the classification problem. Chances are you came here because you know a thing or two about deep learning. If so, the model below will look very straightforward to you. We build a keras model by choosing a set of three consecutive Dense layers with dropout and ReLU activations. There are certainly much better architectures for the problem out there, but we really just want to demonstrate the general flow here.
# +
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.utils import to_categorical, generic_utils
nb_classes = train_df.select("category").distinct().count()
input_dim = len(train_df.select("features").first()[0])
model = Sequential()
model.add(Dense(512, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# -
# ## Distributed Elephas model
#
# To lift the above Keras ```model``` to Spark, we define an ```Estimator``` on top of it. An ```Estimator``` is Spark's incarnation of a model that still has to be trained. It essentially only comes with only a single (required) method, namely ```fit```. Once we call ```fit``` on a data frame, we get back a ```Model```, which is a trained model with a ```transform``` method to predict labels.
#
# We do this by initializing an ```ElephasEstimator``` and setting a few properties. As by now our input data frame will have many columns, we have to tell the model where to find features and labels by column name. Then we provide serialized versions of our Keras model. We can not plug in keras models into the ```Estimator``` directly, as Spark will have to serialize them anyway for communication with workers, so it's better to provide the serialization ourselves. In fact, while pyspark knows how to serialize ```model```, it is extremely inefficient and can break if models become too large. Spark ML is especially picky (and rightly so) about parameters and more or less prohibits you from providing non-atomic types and arrays of the latter. Most of the remaining parameters are optional and rather self explainatory. Plus, many of them you know if you have ever run a keras model before. We just include them here to show the full set of training configuration.
# +
from elephas.ml_model import ElephasEstimator
from tensorflow.keras import optimizers
adam = optimizers.Adam(lr=0.01)
opt_conf = optimizers.serialize(adam)
# Initialize SparkML Estimator and set all relevant properties
estimator = ElephasEstimator()
estimator.setFeaturesCol("scaled_features") # These two come directly from pyspark,
estimator.setLabelCol("index_category") # hence the camel case. Sorry :)
estimator.set_keras_model_config(model.to_yaml()) # Provide serialized Keras model
estimator.set_categorical_labels(True)
estimator.set_nb_classes(nb_classes)
estimator.set_num_workers(1) # We just use one worker here. Feel free to adapt it.
estimator.set_epochs(20)
estimator.set_batch_size(128)
estimator.set_verbosity(1)
estimator.set_validation_split(0.15)
estimator.set_optimizer_config(opt_conf)
estimator.set_mode("synchronous")
estimator.set_loss("categorical_crossentropy")
estimator.set_metrics(['acc'])
# -
# ## SparkML Pipelines
#
# Now for the easy part: Defining pipelines is really as easy as listing pipeline stages. We can provide any configuration of ```Transformers``` and ```Estimators``` really, but here we simply take the three components defined earlier. Note that ```string_indexer``` and ```scaler``` and interchangable, while ```estimator``` somewhat obviously has to come last in the pipeline.
# +
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[string_indexer, scaler, estimator])
# -
# ### Fitting and evaluating the pipeline
#
# The last step now is to fit the pipeline on training data and evaluate it. We evaluate, i.e. transform, on _training data_, since only in that case do we have labels to check accuracy of the model. If you like, you could transform the ```test_df``` as well.
# +
from pyspark.mllib.evaluation import MulticlassMetrics
fitted_pipeline = pipeline.fit(train_df) # Fit model to data
prediction = fitted_pipeline.transform(train_df) # Evaluate on train data.
# prediction = fitted_pipeline.transform(test_df) # <-- The same code evaluates test data.
pnl = prediction.select("index_category", "prediction")
pnl.show(100)
prediction_and_label = pnl.map(lambda row: (row.index_category, row.prediction))
metrics = MulticlassMetrics(prediction_and_label)
print(metrics.precision())
# -
# ## Conclusion
#
# It may certainly take some time to master the principles and syntax of both Keras and Spark, depending where you come from, of course. However, we also hope you come to the conclusion that once you get beyond the stage of struggeling with defining your models and preprocessing your data, the business of building and using SparkML pipelines is quite an elegant and useful one.
#
# If you like what you see, consider helping further improve elephas or contributing to Keras or Spark. Do you have any constructive remarks on this notebook? Is there something you want me to clarify? In any case, feel free to contact me.
|
examples/Spark_ML_Pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# # Part VI: Managing Fuzzing
#
# This part discusses how to manage fuzzing in the large.
#
# * [Fuzzing in the Large](FuzzingInTheLarge.ipynb) discusses how to create large infrastructures for fuzzing, running millions of tests and managing their results.
#
# * [When to Stop Fuzzing](WhenToStopFuzzing.ipynb) details how to estimate when enough fuzzing is enough – and when you can let your computers work on other tasks.
|
docs/notebooks/06_Managing_Fuzzing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook for scraping missing country data
#
# This notebook was used to do the following:
# - scrape the country for institutions from the web using Wikipedia and Google
# - save a file of CS institutions that could not be scraped with their information for use in Mturk tasks
# - merge the results of the Mturk task and scraping task to create a new version of inst_full_clean.csv
import os, time, requests, re, json, random, csv
import pandas as pd
import numpy as np
from selenium import webdriver
from bs4 import BeautifulSoup
from datetime import datetime
from geopy.geocoders import Nominatim
from geopy.exc import GeopyError
# +
# get coordinates from wikipedia
def wikipedia_location(row):
'''
row: a row from institution_info.data
return: (location, method)
'''
if row[institution_info.headers.index("WikiUrl")] == "":
return "NO_LOCATION_FOUND", "None"
url = row[institution_info.headers.index("WikiUrl")]
attempts = 5
successful = False
while attempts > 0:
try:
page = requests.get(url)
attempts = 0
successful = True
except (requests.exceptions.ConnectionError):
time.sleep(0.5)
attempts -= 1
if not successful:
print("WARNING: Experience ConnectionError", url)
return "NO_LOCATION_FOUND", "None"
soup = BeautifulSoup(page.content, 'html.parser')
# check if coordinates are given
lat_element = soup.find("span", attrs={"class": "latitude"})
lon_element = soup.find("span", attrs={"class": "longitude"})
coordinates_present = lat_element != None and lon_element != None
# return if coordinates are given
if coordinates_present:
location = lat_element.text, lon_element.text
method = "Wiki-Coordinates"
return location, method
# check if country is present
country_element = soup.find("div", attrs={"class": "country-name"})
country_present = country_element != None
# return if country is present
if country_present:
location = country_element.text
method = "Wiki-Country"
return location, method
# check if headquarters location is present
headquarters_element = soup.find("th",string="Headquarters")
headquarters_present = headquarters_element != None
# return if HQ is present
if headquarters_present:
location = headquarters_element.parent.text.replace("Headquarters","")
method = "Wiki-Headquarters"
return location, method
# check if location is present
location_element = soup.find("th",string="Location")
location_present = location_element != None
# return if HQ is present
if location_present:
location = location_element.parent.text.replace("Location","")
method = "Wiki-Location"
return location, method
location = "NO_LOCATION_FOUND"
method = "None"
return location, method
# get location from google location
def google_location(driver, row):
'''
driver: a selenium driver handle
row: a row from institution_info.data
return: (location, method)
'''
name = row[institution_info.headers.index("DisplayName")]
url_google = googlify_name(name)
driver.get(url_google)
try:
element = driver.find_element_by_xpath("//*/div[@class='Z0LcW']")
location = element.text
method = "Google-Headquarters"
return location, method
except:
pass
try:
element = element = driver.find_element_by_xpath("//*[@data-dtype='d3ifr']/span[@class='LrzXr']")
location = element.text
method = "Google-Sidebar"
return location, method
except:
location = "NO_LOCATION_FOUND"
method = "None"
return location, method
# handle needs for different methods to get location
def get_location(driver, row):
name = row[institution_info.headers.index("DisplayName")]
if row[institution_info.headers.index("Country")] != "":
location = row[institution_info.headers.index("Country")]
method = "Existing"
country = location
return (name,location,method,country)
location, method = wikipedia_location(row)
if method != "None":
country = location_to_country(location,method)
return (name,location,method,country)
location, method = google_location(driver,row)
country = location_to_country(location,method)
return (name,location,method,country)
def googlify_name(name, appendage="headquarters"):
# clean name for use in query by url
name = name.replace(' ', '+'); name = name.replace(',', '')
name = name.replace("'", ''); name = name.replace('.', ''); name = name.replace(':', '')
name = name.replace(';', '');
if appendage is not None: name += "+{}".format(appendage)
return "https://www.google.com/search?q=" + name
# +
def location_to_country_coordinates(location):
geolocator = Nominatim(user_agent="my-application")
try:
address = geolocator.reverse(", ".join(location), language="en", addressdetails=True)
country = address.raw["address"]["country"]
except KeyError:
print("ERROR_PROCESSING_LOCATION", location)
country = "ERROR_PROCESSING_LOCATION"
return country
def location_to_country_address(location):
location = split_by_case(location)
geolocator = Nominatim(user_agent="my-application")
try:
address = geolocator.geocode(location, language="en", addressdetails=True)
country = address.raw["address"]["country"]
except (AttributeError):
print("ERROR_PROCESSING_LOCATION", location)
country = "ERROR_PROCESSING_LOCATION"
return country
def location_to_country(location, method):
if method == "None":
country = "NO_COUNTRY_FOUND"
return country
attempts = 10
while attempts > 0:
try:
if method == "Wiki-Coordinates":
country = location_to_country_coordinates(location)
else:
country = location_to_country_address(location)
return country
except (GeopyError):
time.sleep(0.1)
attempts -= 1
country = "NO_COUNTRY_FOUND"
return country
def split_by_case(string):
tokens = list()
next_token_start = 0
prev_lower = False
for i, char in enumerate(string):
current_upper = char.isupper()
if prev_lower and current_upper:
tokens.append(string[next_token_start:i])
next_token_start = i
prev_lower = char.islower() or char.isnumeric()
next
tokens.append(string[next_token_start:])
output = ", ".join(tokens)
return output
# +
class InstitutionInfo:
def __init__(self):
self.input_filename = "inst_full_clean.csv"
self.output_filename = "inst_full_clean_countries.csv"
self.unsaved_rows = 0
self.load_data()
def load_data(self):
self.output_exists = os.path.exists(self.output_filename)
if self.output_exists:
with open(self.output_filename, "r") as fh:
reader = csv.reader(fh, delimiter=",")
self.headers = next(reader)
self.data = [line for line in reader]
else:
with open(self.input_filename, "r") as fh:
reader = csv.reader(fh, delimiter=",")
self.headers = ["Key","DisplayName","Type","Region","Country","Url"]
self.data = [line for line in reader]
self.add_wikipedia_links()
self.add_column(["" for row in self.data], header = "Location")
self.add_column(["" for row in self.data], header = "ScrapedCountry")
self.add_column(["" for row in self.data], header = "Method")
self.save()
def add_wikipedia_links(self):
assert len(self.headers) == 6
self.headers.append("WikiUrl")
with open("inst_fullname.csv", "r") as fh:
reader = csv.reader(fh, delimiter=",")
wiki_urls_dict = {row[0]: None if len(row) != 5 else row[4] for row in reader}
for row in self.data:
assert len(row) == 6
row.append(wiki_urls_dict[row[self.headers.index("Key")]])
self.save()
def add_column(self, column_data, header):
assert len(column_data) == len(self.data)
new_data = [row + [column_data[i]] for i, row in enumerate(self.data)]
self.data = new_data
self.headers.append(header)
def get_column(self, column_name):
idx = self.headers.index(column_name)
return [row[idx] for row in self.data]
def head(self):
print("\t".join(self.headers))
for row in self.data[:10]:
print("\t".join(row))
def save(self, output_filename = None):
if output_filename == None:
output_filename = self.output_filename
with open(output_filename,"w") as fh:
writer = csv.writer(fh,delimiter=",")
writer.writerow(self.headers)
writer.writerows(self.data)
print("finished saving {} rows".format(self.unsaved_rows))
self.unsaved_rows = 0
def get_country_info(self):
start = datetime.now()
driver = webdriver.Chrome()
driver.implicitly_wait(3)
for row in self.data:
if row[self.headers.index("ScrapedCountry")] != "":
continue
if self.unsaved_rows >= 50:
self.save()
print("Took",round((datetime.now()-start).total_seconds(),2), "seconds")
start = datetime.now()
name, location, method, country = get_location(driver,row)
row[self.headers.index("Location")] = location
row[self.headers.index("ScrapedCountry")] = country
row[self.headers.index("Method")] = method
self.unsaved_rows += 1
driver.close()
self.save()
print("Took",round((datetime.now()-start).total_seconds(),2), "seconds")
print("No more rows to fetch")
def reset(self):
os.remove(self.output_filename)
self.load_data()
institution_info = InstitutionInfo()
# +
# institution_info.get_country_info()
# -
print("Missing location",
len([1 for row in institution_info.get_column("Region") if row == "Other"]))
print("Missing type",
len([1 for row in institution_info.get_column("Type") if row == "Other"]))
print("Missing location or type",
len([1 for row in institution_info.data if
row[institution_info.headers.index("Region")] == "Other" or
row[institution_info.headers.index("Type")] == "Other"]))
# +
# prevent north american region being interpreted as missing
na_vals = [
'-1.#IND', '1.#QNAN', '1.#IND',
'-1.#QNAN', '#N/A','N/A',# 'NA',
'#NA', 'NULL', 'NaN', '-NaN',
'nan', '-nan']
full_data = pd.read_csv("inst_full_clean_countries.csv", keep_default_na=False, na_values = na_vals)
# +
MAG_countries = sorted(set(full_data["Country"][np.vectorize(type)(full_data["Country"]) == str]))
scraped_counries = sorted(set(full_data["ScrapedCountry"][full_data["Method"] != "existing"]))
print("COUNTRIES NOT ALREADY IN MAG LIST:\n")
for country in scraped_counries:
if country not in MAG_countries:
print(country)
# +
# convert scraped countries to MAG equivalents
country_aliases = {
"Czech Republic": "Czechia",
"Côte d'Ivoire": "Ivory Coast",
"D.R.": "Dominican Republic",
"DR Congo": "Democratic Republic of the Congo",
"PRC": "China",
"Palestinian Territory": "State of Palestine",
"RSA": "South Africa",
"TW": "Taiwan",
"The Netherlands": "Netherlands",
"USA": "United States",
"United States of America": "United States",
"United States of America (Dry Tortugas territorial waters)": "United States",
"ERROR_PROCESSING_LOCATION": "No data",
"NO_COUNTRY_FOUND": "No data"
}
country_func = np.vectorize(
lambda x: country_aliases[x] if x in country_aliases else x)
full_data["FinalCountry"] = country_func(full_data["ScrapedCountry"])
# +
# Get instititions that have scores in CS venues used in CS metrics
# dirname = "../../data/scores/"
# institutions = set()
# for file in os.listdir(dirname):
# if not file.endswith(".json"):
# continue
# with open(os.path.join(dirname,file), "r") as fh:
# institutions.update(json.load(fh).keys())
# institutions = list(institutions)
# with open("institutions_with_scores.json", "w") as fh:
# json.dump(institutions, fh)
with open("institutions_with_scores.json", "r") as fh:
institutions = set(json.load(fh))
has_score_f = lambda x: x in institutions
has_score_f_vec = np.vectorize(has_score_f)
data = full_data[has_score_f_vec(full_data["Key"])]
# +
def print_scraping_summary(data):
existing = sum(data["Method"] == "Existing")
not_scraped = sum(data["Method"] == "None")
condition = np.vectorize(lambda x,y: (x not in ["None","Existing"]) and (y in ["ERROR_PROCESSING_LOCATION","NO_COUNTRY_FOUND"]))
scraped_not_parsed = sum(condition(data["Method"],data["ScrapedCountry"]))
condition = np.vectorize(lambda x,y: (x not in ["None","Existing"]) and (y not in ["ERROR_PROCESSING_LOCATION","NO_COUNTRY_FOUND"]))
scraped = sum(condition(data["Method"],data["ScrapedCountry"]))
total = len(data)
assert total == existing + not_scraped + scraped_not_parsed + scraped
print("total:",total)
print("existing:",existing, " {}%".format(round(100*existing/total,2)))
print("not scraped:",not_scraped, " {}%".format(round(100*not_scraped/total,2)))
print("scraped not parsed:",scraped_not_parsed, " {}%".format(round(100*scraped_not_parsed/total,2)))
print("scraped:",scraped, " {}%".format(round(100*scraped/total,2)))
print()
print("full data:")
print_scraping_summary(full_data)
print("relevant data:")
print_scraping_summary(data)
# -
# # link countries to regions
with open("country_continent.csv", "r") as fh:
reader = csv.reader(fh)
next(reader)
country_to_region = {country: region for country, region in reader}
country_to_region.keys()
full_data[np.vectorize(lambda x: x in countries_with_nan_region)(full_data["FinalCountry"])]
set(full_data["Region"])
# +
# Save data to use for mturk tasks
condition = np.vectorize(lambda x: x=="No data")
data["GoogleUrl"] = np.vectorize(lambda x: googlify_name(x, appendage="Headquarters"))(data["Key"])
mturk_data = data[condition(data["FinalCountry"])][["DisplayName", "Url","WikiUrl","GoogleUrl","FinalCountry"]]
mturk_data.to_csv("data_for_mturk.csv", index=False)
mturk_data
# +
## Combine results from Mturk with scraped results
mturk_results = dict()
with open("Mturk batch result - Batch_3804034_batch_results.csv", "r") as fh:
reader = csv.reader(fh, delimiter=",")
next(reader)
for row in reader:
institute = row[27]
country = row[32]
if institute not in mturk_results:
mturk_results[institute] = {country: 1}
else:
if country in mturk_results[institute]:
mturk_results[institute][country] += 1
else:
mturk_results[institute][country] = 1
print(len(mturk_results))
mturk_results = {inst: max(countries.items(), key=lambda x: x[1])[0] for inst, countries in mturk_results.items()
if len([k for k,v in countries.items() if v > sum(countries.values()) / 2]) }
print(len(mturk_results), mturk_results)
# +
# assign mturk selected countries to FinalCountry
solved_with_mturk = np.vectorize(lambda inst: inst in mturk_results)
lookup_mturk_answer = np.vectorize(lambda inst: mturk_results[inst] if inst in mturk_results else "No data")
country_to_region["No data"] = "No data"
country_to_region["Aland Islands"] = "EU"
lookup_region = np.vectorize(lambda country: country_to_region[country])
# set final country for mturk results
full_data.loc[solved_with_mturk(full_data["DisplayName"]),"FinalCountry"] = lookup_mturk_answer(
full_data[solved_with_mturk(full_data["DisplayName"])]["DisplayName"])
# set method used to fiund location as mturk
full_data.loc[solved_with_mturk(full_data["DisplayName"]),"Method"] = "Mturk"
full_data["FinalRegion"] = lookup_region(full_data["FinalCountry"])
# +
full_data.to_csv("inst_full_clean.csv",
columns=["Key","DisplayName","Type","FinalRegion","FinalCountry","Url"],
header=False,
index = False)
full_data.to_csv("inst_full_clean_with_scraping_info.csv",
header=True,
index = False)
# -
full_data[solved_with_mturk(full_data["DisplayName"])]
|
app/data/produce_institution_info.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Money Creation Examples](http://www.siebenbrunner.com/moneycreation/) > **Example 7**:
# # Lending in Central Bank Digital Currency
# We present two possible implementations of a Central Bank Digital Currency (CBDC) in which loans are denoted in CBDC. As discussed in the paper, there is an alternative way to implement CBDCs, where lending would be conducted in commercial bank money, and customers could choose to swap their bank deposits into CBDC at a later stage. This implementation would be completely analogous in terms of booking statements to the change between cash and deposits in the initialization example ([Example 1](http://www.siebenbrunner.com/moneycreation/Ex1_Initialization.html)) and is thus not presented separately here. The two alternatives presented herein are:
#
# - **Direct transfor of CBDC**: the bank grants a loan directly in CBDC by transferring a previously owned stock of CBDC to the debtor (Version 1).
# - **Loan transfer from the central bank**: the loan is created by in CBDC by the central bank and subsequently transferred to a commercial bank (Version 2).
#
# We start by importing required utilities.
# +
import os
import sys
base_path = os.path.realpath(os.getcwd()+"/../..")
sys.path.append(base_path)
from abcFinance import Ledger, Account, AccountSide
# -
# We further define a function that computes the money stocks according to our defined taxonomy. Note that we do not count CBDC held by banks as part of the total money stock, in analogy to physical cash and in accordance with the definition adopted in our paper. In practice, central banks will have to decide whether to include CBDC held by banks in the money stock.
# +
from contextlib import suppress
from IPython.core.display import SVG
from IPython.display import display_svg
def print_money_stocks():
# Bank money: bank liabilities that are money
bank_money = private_agent.get_balance('Deposits')[1]
central_bank_money = private_agent.get_balance('CBDC')[1]
with suppress(Exception):
bank_money += private_agent2.get_balance('Deposits')[1]
central_bank_money += private_agent2.get_balance('CBDC')[1]
print("Commercial Bank Money:",bank_money)
print("Central Bank Money:",central_bank_money)
print("Total (Bank) Money:",central_bank_money+bank_money)
def print_balance_sheets_and_money_stocks():
bank_balance_sheet = SVG(bank.draw_balance_sheet("Bank Balance Sheet"))
central_bank_balance_sheet = SVG(central_bank.draw_balance_sheet("Central Bank Balance Sheet"))
if private_agent.get_total_assets() > 0:
private_agent_balance_sheet = SVG(private_agent.draw_balance_sheet("Private Agent Balance Sheet"))
display_svg(bank_balance_sheet, central_bank_balance_sheet, private_agent_balance_sheet)
else:
display_svg(bank_balance_sheet, central_bank_balance_sheet)
print_money_stocks()
# -
# ## Version 1: Direct Transfer of CBDC from Commercial Bank to Private Sector Agent
# In this implementation of a CBDC, banks grant loans directly in CBDC by transferring a stock of CBDC which they already own to the borrower. CBDC lending in this implementation is similar to (but not the same as) how loanable-funds theories describe lending: commercial banks are exogeneously constrained in their ability to grant loans by the availability of CBDC. In contrast to loanable funds theories, however, the limiting resource - CBDC instead of deposits - is an asset for the bank, not a liability.
#
# We start the example by defining a bank, central bank and private sector agent.
# +
bank = Ledger(residual_account_name="Equity")
central_bank = Ledger(residual_account_name="Equity")
private_agent = Ledger(residual_account_name="Equity")
bank.make_asset_accounts(['CBDC','Loans'])
bank.make_liability_accounts(['Deposits'])
private_agent.make_asset_accounts(['CBDC','Deposits'])
private_agent.make_liability_accounts(['Loans'])
central_bank.make_asset_accounts(['Assets'])
central_bank.make_liability_accounts(['CBDC'])
# -
# We initialize the system by endowing the commercial and central bank with some initial stock of money.
bank.book(debit=[('CBDC',100)],credit=[('Equity',100)])
central_bank.book(debit=[('Assets',100)],credit=[('CBDC',100)])
print_balance_sheets_and_money_stocks()
# The bank now uses parts of its pre-existing stock of CBDC to grant a loan. Note that since we excluded the CBDC held by the bank from the money supply, this transaction has the effect of increasing the money stock.
bank.book(debit=[('Loans',50)],credit=[('CBDC',50)])
private_agent.book(debit=[('CBDC',50)],credit=[('Loans',50)])
print_balance_sheets_and_money_stocks()
# ## Version 2: Loan transfer from the Central Bank to Commercial Bank
# An alternative implementation of lending in CBDC would consist in the central bank intially granting the loan, using the same booking statements that commercial banks use for granting loans in bank money currently. In the second step, the loan would be transferred to a commercial bank, in exchange for CBDC. As we will demonstrate, this implementation of CBDC lending results in the same final state if there are no defaults (Case 2a). The booking statements, however, are different, and as we will demonstrate this means that the central bank bears a type of settlement risk similar to [Herstatt risk](https://en.wikipedia.org/wiki/Settlement_risk) (Case 2b).
#
# We start by defining a set of agents again. We include a second private agent and flow accounts for the other agents. These additions will be needed to book the effects of a materialization of Herstatt risk for the central bank in case 2a.
# +
bank = Ledger(residual_account_name="Equity")
central_bank = Ledger(residual_account_name="Equity")
private_agent = Ledger(residual_account_name="Equity")
private_agent2 = Ledger(residual_account_name="Equity")
bank.make_asset_accounts(['CBDC','Loans'])
bank.make_liability_accounts(['Deposits'])
bank.make_flow_accounts(['Expenses','Impairments'])
private_agent.make_asset_accounts(['CBDC','Deposits'])
private_agent.make_liability_accounts(['Loans'])
private_agent.make_flow_accounts(['Expenses'])
private_agent2.make_asset_accounts(['CBDC','Deposits'])
private_agent2.make_flow_accounts(['Revenues','Impairments'])
central_bank.make_asset_accounts(['Assets','Loans'])
central_bank.make_liability_accounts(['CBDC'])
central_bank.make_flow_accounts(['Impairments'])
# -
# ### Case 2a: Favourable case
# We first start by endowing the agents with some initial money allocations, similar to the first example:
bank.book(debit=[('CBDC',100)],credit=[('Deposits',100)])
private_agent.book(debit=[('Deposits',100)],credit=[('Equity',100)])
central_bank.book(debit=[('Assets',100)],credit=[('CBDC',100)])
print_balance_sheets_and_money_stocks()
# Now the loan granting happens: note that the bank expands its balance sheet by creating new CBDC, just as a commercial bank would do when creating a deposit in the course of granting a loan, and thereby increases the total money stock:
central_bank.book(debit=[('Loans',50)],credit=[('CBDC',50)])
private_agent.book(debit=[('CBDC',50)],credit=[('Loans',50)])
print_balance_sheets_and_money_stocks()
# The commercial bank now uses some of its stock of CBDC to purchase the loan from the central bank. This process destroys CBDC, it does not impact total money stocks, however, as the CBDC held by the bank was excluded from the total money stock by definition. This transaction thus has no more impact on money stocks, and the resulting balance sheets look the same as in the first version of CBDC lending, where the bank directly transferred its pre-existing CBDC stock to the borrower.
bank.book(debit=[('Loans',50)],credit=[('CBDC',50)])
central_bank.book(debit=[('CBDC',50)],credit=[('Loans',50)])
print_balance_sheets_and_money_stocks()
# ### Case 2b: Unfavourable case (Herstatt-type risk materialization)
# In order to demonstrate the economic difference between the two versions of CBDC lending, we now assume that the central bank creates another loan for the same private agent. The intial loan creation booking statement and the effect on the money stock is identical.
central_bank.book(debit=[('Loans',50)],credit=[('CBDC',50)])
private_agent.book(debit=[('CBDC',50)],credit=[('Loans',50)])
print_balance_sheets_and_money_stocks()
# However, before the bank can purchase the loan from the central bank, it is met with an unexpected shock in the form having to make a payment to another agent, depleting its stock of CBDC:
bank.book(debit=[('Expenses',50)],credit=[('CBDC',50)])
private_agent2.book(debit=[('CBDC',50)],credit=[('Revenues',50)])
bank.book_end_of_period()
private_agent2.book_end_of_period()
display_svg(SVG(private_agent2.draw_balance_sheet("Private Agent 2 Balance Sheet")))
print_balance_sheets_and_money_stocks()
# With the realization of this shock, the balance sheet of the central bank now looks different from the first version and also from the previous case. However, this event alone does not mean that the central bank is facing losses. It is now only faced with a credit risk which it was expecting to be able to transfer to the commercial bank. We now assume that this credit risk materializes as the first private agent, who was taken out two loans worth 100 in total so far, is also met with an unexpected shock and has to make a payment to another private agent.
private_agent.book(debit=[('Expenses',200)],credit=[('CBDC',100),('Deposits',100)])
private_agent2.book(debit=[('CBDC',100),('Deposits',100)],credit=[('Revenues',200)])
private_agent.book_end_of_period()
private_agent2.book_end_of_period()
display_svg(SVG(private_agent.draw_balance_sheet("Private Agent Balance Sheet")))
display_svg(SVG(private_agent2.draw_balance_sheet("Private Agent 2 Balance Sheet")))
# Since the debtor is now insolvent and has no more assets, both its creditors (the central bank and the commercial bank) now have to write down their exposures. Note that this impairment means that the commercial bank is now insolvent. Hence, all creditors of the commercial bank now have to write down their exposures towards the commercial bank, and we observe the start of a small default cascade. Note that in practice, under current regulations, the default of the commercial bank might trigger a number of bail-in or bail-out mechanisms in the form of e.g. a deposit insurance fund bailing or the government bailing out depositors or all creditors.
bank.book(debit=[('Impairments',50)],credit=[('Loans',50)])
private_agent2.book(debit=[('Impairments',100)],credit=[('Deposits',100)])
central_bank.book(debit=[('Impairments',50)],credit=[('Loans',50)])
bank.book_end_of_period()
private_agent2.book_end_of_period()
central_bank.book_end_of_period()
display_svg(SVG(private_agent2.draw_balance_sheet("Private Agent 2 Balance Sheet")))
print_balance_sheets_and_money_stocks()
|
examples/money_creation/Ex7_CBDC.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Dynamic Linear Models and Kalman Filter
# ### <NAME>
# In this notebook I will show some examples of how to apply Kalman Filter to dynamic linear models (a subclass of state-space models) in R using the "dlm" package. The notation is based on Petris et al. "Dynamic Linear models with R" (2007).
# ## Introduction
# State-space models consider a time series as the output of a dynamic system perturbed by random disturbances. As we shall see, they allow a natural interpretation of a time series as the result of several components, such as trend, seasonal or
# regressive components. At the same time, they have an elegant and powerful probabilistic structure, offering a flexible framework for a very wide range of applications. Computations can be implemented by recursive algorithms. The problems of estimation and forecasting are solved by recursively computing the conditional distribution of the quantities of interest, given the available information. In this sense, they are quite naturally treated from a Bayesian approach. State-space models can be used for modeling univariate or multivariate time series, also in presence of non-stationarity, structural changes, irregular
# patterns. State-space models include ARMA models as special case, but as we shall see, they can be applied to nonstationary time series without requiring a preliminary transformation of the data to get stationarity.
# ## Dynamic Linear Models
# The first, important class of state-space models is given by Gaussian linear state-space models, also called dynamic linear models (DLM). These models are specified by means of two equations:
#
# The state equation, which describes the state vector $\theta_t$ (rx1), which is a Markov Chain:
#
# $\theta_t = G_t \theta_{t-1} + w_t, w_t \sim \mathcal{N_r}(0,W_t)$
#
# The observation equation, which assumes th observation vector $Y_t$ (nx1) is a function of the state vector plus a random disturbance:
#
# $Y_t = F_t \theta_t + v_t, v_t \sim \mathcal{N_n}(0,V_t)$
# The random disturbances, ($v_T$) and ($w_T$), are two independent white noise sequences. Since the Kalman filter is a recursive algorithm, the first step must be usually specified. In particular, it is also assumed:
#
# $\theta_0 \sim \mathcal{N_r}(m_0,C_0)$
#
# for some non-random vector $m_0$ and matrix $C_0$, and it is independent on ($v_T$) and ($w_T$).
# ## Kalman Filter
# Let us know report the Kalman filter recursive formulas for the general dynamic linear model described earlier.
#
# If $\theta_{t-1} \mid \mathcal{D}_{t-1} \sim \mathcal{N_r}(m_{t-1},C_{t-1})$, where $t \geq 1$, then:
# (a) the one-step-ahead state predictive density of $\theta_t \mid \mathcal{D}_{t-1}$ is Gaussian, with parameters:
#
# $a_t = \mathbb{E}[\theta_t \mid \mathcal{D}_{t-1}] = G_t m_{t-1}$
#
# $R_t = \mathbb{V}[\theta_t \mid \mathcal{D}_{t-1}] = G_t C_{t-1} G_t^\intercal + W_t$
# (b) the one-step-ahead predictive density of $Y_t \mid \mathcal{D}_{t-1}$ is Gaussian, with parameters:
#
# $f_t = \mathbb{E}[\theta_t \mid \mathcal{D}_{t-1}] = F_t a_{t}$
#
# $Q_t = \mathbb{V}[\theta_t \mid \mathcal{D}_{t-1}] = F_t R_{t} F_t^\intercal + V_t$
# (c) the filtering density of $\theta_t \mid \mathcal{D}_{t}$ is Gaussian, with parameters:
#
# $m_t = \mathbb{E}[\theta_t \mid \mathcal{D}_{t}] = a_t + R_t F_{t}^\intercal Q_t^{-1} (Y_t - f_t)$
#
# $C_t = \mathbb{V}[\theta_t \mid \mathcal{D}_{t}] = R_{t} - R_t F_{t}^\intercal Q_t^{-1} F_t R_t$
# Note that the expression of $m_t$ has the intuitive estimation-correction form
# ”filter mean equal to the prediction mean at plus a correction depending on
# how much the new observation differs from its prediction”. The weight of the
# correction term is given by the gain matrix:
#
# $ K_t = R_t F_{t}^\intercal Q_t^{-1}$
# The evaluation of the posterior variances Ct (and consequently also of Rt and Qt) using the previous iterative updating formulae, as simple as it may appear, suffers from numerical instability that may lead to nonsymmetric and even negative definite calculated variance matrices. Alternative, stabler, algorithms have been developed to overcome this issue. Apparently,
# the most widely used, at least in the Statistics literature, is the square root filter, which provides formulae for the sequential update of a square root of Ct. References for the square root filter are <NAME> Kailath (1975) and Anderson and Moore (1979, Ch. 6). A more robust algorithm is the one based on sequentially updating the singular value decomposition6 (SVD) of Ct. The details of the algorithm can be found in Oshman and Bar-Itzhack (1986) and Wang et al. (1992). The dlm package use this algorithm to update variance matrices.
# #### Example: Linear Regression
# Linear regression can be thought as a DLM. Moreover, in the DLM structure, it can be extended to have time-varying coefficients. Let us give the state and observation equations that describe this process. For simplicity, let us assume that the $\beta$ vector contains the constant as a first component and hence the $x_t$ vector contains 1 as first component.
# $\beta_t = \beta_{t-1} + w_t, w_t \sim \mathcal{N_r}(0,W_t)$
#
# $y_t = x_t'\beta_t + v_t, v_t \sim \mathcal{N_n}(0,V_t)$
# We also assume our prior distribution on $\beta_0$ to be:
#
# $\beta_0 \sim \mathcal{N_2}(b_0,C_0)$
#
# Let us know see an application of this model for the dynamic estimation of Google's $\beta$.
# Loading useful libraries
library(tidyverse)
library(tseries)
library(dlm)
library(forecast)
library(quantmod)
library(broom)
library(timetk)
# Set ggplot theme
th <- theme_minimal() +
theme(plot.title = element_text(size=12, face="bold"),
axis.title.x = element_text(size=7),
axis.title.y = element_text(size=7),
legend.text = element_text(size=5),
legend.title = element_text(size=7),
axis.text = element_text(size=9),
strip.text = element_text(size=8, face = "italic"))
# Downloading data
y_raw <- getSymbols("GOOG",src="yahoo", auto.assign = FALSE)
x_raw <- getSymbols("^GSPC",src="yahoo", auto.assign = FALSE)
# Focus on adjusted close price
y <- y_raw[,"GOOG.Adjusted"]
x <- x_raw[,"GSPC.Adjusted"]
# +
# Modify time window
y <- window(y, start = "2010-01-01", end = "2020-07-01")
x <- window(x, start = "2010-01-01", end = "2020-07-01")
# interpolate na
y <- na.approx(y)
x <- na.approx(x)
# -
# Monthly frequency (each price corresponds to the first observed each month)
x <- apply.monthly(x, first)
index(x) <- as.yearmon(index(x))
y <- apply.monthly(y, first)
index(y) <- as.yearmon(index(y))
# Make sure series have same length and plot
length(x) == length(y)
autoplot(merge(x,y)) + th
# Log return series
x <- na.omit(diff(log(x)))
y <- na.omit(diff(log(y)))
# Check beta estimate for clasical regression
lin_reg <- lm(y ~ x)
tidy(lin_reg)
# According to the classical time-series regression, the beta of Google is estimated to be around 1.04. Let's now try to look at the beta estimate when we allow this parameter to vary through time.
# +
# In order to define the model, we only need the regressor, the regressand is going to enter the model once we use a filter or a smoother
# We can use the estimates from the simple regression as initial guesses for the state parameters (alpha and beta)
# All the other parameters are going to be left as default
dlm_lin_reg <- dlmModReg(x, m0 = tidy(lin_reg)["estimate"][[1]])
# Create filter ans smoother objects
dlm_lin_reg_filt <- dlmFilter(y, dlm_lin_reg)
dlm_lin_reg_smooth <- dlmSmooth(dlm_lin_reg_filt)
# +
# Plot the filtered estimated of beta through time
t <- time(x)
filter_beta <- zoo(dlm_lin_reg_filt$m[-1,2])
time(filter_beta) <- t
smoother_beta <- zoo(dlm_lin_reg_smooth$s[-1,2])
time(smoother_beta) <- t
autoplot(filter_beta) + th
# -
# As of sample ending's estimate of beta:
filter_beta[length(filter_beta)]
# As we can see the beta estimate is very close to the classic regression model's one. Moreover, we can see that Google's beta has been pretty constant over time. Note that the initial variance is due to the very diffuse prior we had for the state variable at time 0. Note that for this simple example we kept all the parameters to default. In general, the parameters of the model (for example the white noise process covariance matrix) can and should be estimated via maximum likelihood or bayesian methods. Let's try to estimate the parameters of the model by mle.
# Recall that in the dynamic regression model we just specified, using the terminology of the last section, the unknown parameters are $W_t, V_t$, the variance matrices (scalars in the $V_t$ case) of the error terms. These were set to the default:
print(dlm_lin_reg$V)
print(dlm_lin_reg$W)
# As we can see, the default value for W was actually a null matrix. This implied alpha and beta costant through time because their innovation process does not have variance. If we really wanted time-varying parameters, we should have specified this matrix to be non null. On the other hand, the default value for V was 1, which is totally unrelated with our problem, so we need to change this too. How to choose the right parameters? A very convenient solution is given by mle estimation. We first have to define the matrices as functions of some parameters, then the dlm library will automatically maximize the log likelohood of the model. The dlmModReg function assumes uncorrelated innovations for the state variables, alpha and beta, hence the matrix W is diagonal and we only need to estimate 3 parameters.
# +
# define a parmeter vector (you can also define it directly in the dlmMLE function)
parm <- c(0,0,0) # V, W_1_1, W_2_2
# define a dlm as a function of the params (we taxe e^param so we impose variances to be positive)
build <- function(parm) {
dlmModReg(x, m0 = tidy(lin_reg)["estimate"][[1]], dV = exp(parm[1]),
dW = c(exp(parm[2]),exp(parm[3])))
}
# MLE
dlm_lin_reg_mle <- dlmMLE(y, parm, build)
# -
dlm_lin_reg_mle
# Convergence equal to zero means that convergence to a log-likelihood maximum has been achieved. We can now buil the actual model and visualized filtered and smoothed estimates for the beta, out variable of intrest.
# Build the model using the mle
dlm_lin_reg <- build(dlm_lin_reg_mle$par)
# Visualize estimated variance matrices:
print(dlm_lin_reg$V)
print(dlm_lin_reg$W)
# +
# Bild a filter and visualize it
# Create filter ans smoother objects
dlm_lin_reg_filt <- dlmFilter(y, dlm_lin_reg)
dlm_lin_reg_smooth <- dlmSmooth(dlm_lin_reg_filt)
# Plot the filtered estimated of beta through time
t <- time(x)
filter_beta <- zoo(dlm_lin_reg_filt$m[-1,2])
time(filter_beta) <- t
smoother_beta <- zoo(dlm_lin_reg_smooth$s[-1,2])
time(smoother_beta) <- t
autoplot(filter_beta) + th
# -
autoplot(smoother_beta[2:length(t)]) + th
# As we can see, the smoother chart indicates that Google's beta has slighlty increased over the last 10 years. Let us look at our final beta estimate:
filter_beta[length(t)]
# The beta estimate is the same as before. Meaning that the beta has really been stable over time, as the simple regression ponts us in the same direction of the dynamic regression.
# ## Trend models
# DLMs have the very nice property, which allows us to split a time series into several components and model each of these components as a separate DLM. For example, instead of modelling the differenced series separately from the level series, one could specify a dlm that descibes the trend, one that describes the deviations from the trend (supposedly stationary) and add them up together to forecast directly the series of intrest. Let us look at how we could model a trend component. To model a trend component, one could draw from many models, one example of such models is the random walk plus noise, or local level model. Note that in such a model, if we set the noise variance to 0 the whole exercise we just described would just amount to model the differenced series using a dlm, as the forecast for the trend componend would simply be the value of the series at time t.
# ### Random Walk plus Noise
# The random walk plus noise, or local level model, is defined by the following two equations.
#
# $\mu_t = \mu_{t-1} + w_t, w_t \sim \mathcal{N}(0,W)$
#
# $Y_t = \mu_t + v_t, v_t \sim \mathcal{N}(0,V)$
#
# As noted there, the behavior of the process (Yt) is greatly influenced by the signal-to-noise ratio r = W/V , the ratio between the two error variances. This model is very helpful wher no clear upward or downward trend is detected. Let's look at the Nile river flows as an example.
# Let's look at the Nile river flows
y <- Nile
autoplot(y, size = 1) + th
# The series doesn't show any trends and seems to fluctuate around some local level. The random walk plus noise is an appropriate model to describe this series.
# +
# define a parmeter vector (you can also define it directly in the dlmMLE function)
parm <- c(0,0) # V, W
# define a dlm as a function of the params (we taxe e^param so we impose variances to be positive)
# We set the initial estimate od the local level to be the average of the series
build <- function(parm) {
dlmModPoly(order = 1, m0 = mean(y), dV = exp(parm[1]),
dW = exp(parm[2]))
}
# MLE
dlm_lin_reg_mle <- dlmMLE(y, parm, build)
# -
dlm_lin_reg_mle
# The log-likelihood maximization has worked. We can go on and plot the filtered series, which represents the local leves around which the series fluctuates.
# +
loc_level <- build(dlm_lin_reg_mle$par)
# Bild a filter and visualize it
# Create filter ans smoother objects
loc_level_filt <- dlmFilter(y, loc_level)
loc_level_smooth <- dlmSmooth(loc_level_filt)
# Plot the filtered estimated of beta through time
t <- time(y)
filter <- zoo(loc_level_filt$m[-1])
time(filter) <- t
smoother <- zoo(loc_level_smooth$s[-1])
# +
# Plot the series, the filtered local level with 95% confidence bounds
# Reconstruct filtering variances from SV Decomposition
var_filter <- ts(with(loc_level_filt, unlist(dlmSvd2var(U.C, D.C))), start = start(filter))
sd_filter <- sqrt(var_filter)
#Confidence interval under normality of errors
upper <- filter+qnorm(0.975)*sd_filter[-1]
lower <- filter-qnorm(0.975)*sd_filter[-1]
time = time(filter)
data <- tibble(time, y, filter, upper, lower)
ggplot(data, aes(x = time)) +
th +
ggtitle("Filtering Estimates and Credible Intervals") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = filter),col = c("red"), size = 2) +
geom_line(aes(y = y),col = c("black")) +
geom_ribbon(aes(ymin = lower, ymax = upper), alpha=0.3, linetype=1, colour="lightgreen", size=0.1,fill="green")
# -
# The series seems to show a breakpoint just before 1900. As a matter of fact, the annual flows drop in 1898 because the first Ashwan dam was built. Let us confirm this historical fact by testing for structural change.
# Nile data with one breakpoint: the annual flows drop in 1898
library(strucchange)
bp.nile <- breakpoints(y ~ 1)
summary(bp.nile)
plot(bp.nile)
# As we can see, the lowes BIC happens when 1 breakpoint is considered, and that breakpoint actually corresponds to 1898, in line with our historical knowledge. How can we incorporate this information to improve the local level model? We should allow a structural break during 1898, so that the filter level can more quickly adapt to the abrupt change. This is usually done by modifying the W parameter and making it become time dependent, in particular we will set it to a cerain value, $W_0$ for all periods besided 1898, where it is gonna have a value of $W_0*k$, where k is a multiplier greater than 1. We will estimate $W_0$ and $k$ by maximum likelihood.
# +
# define a dlm as a function of the params (we taxe e^param so we impose variances to be positive)
# We set the initial estimate od the local level to be the average of the series
build_sb <- function(parm) {
m <- dlmModPoly(order = 1, m0 = mean(y), dV = exp(parm[1]))
m$JW <- matrix(1) # Makes W be time dependent
m$X <- matrix(exp(parm[2]), ncol=1, nrow=length(y))
j <- which(time(Nile) == 1898)
m$X[j,1] <- m$X[j,1] * (1 + exp(parm[3]))
return(m)
}
dlm_lin_reg_sb_mle <- dlmMLE(y, c(0,0,0), build_sb)
loc_level_sb <- build_sb(dlm_lin_reg_sb_mle$par)
# -
# Bild a filter and visualize it
# Create filter ans smoother objects
loc_level_filt <- dlmFilter(y, loc_level_sb)
loc_level_smooth <- dlmSmooth(loc_level_filt)
# Plot the filtered estimated of beta through time
t <- time(y)
filter <- zoo(loc_level_filt$m[-1])
time(filter) <- t
smoother <- zoo(loc_level_smooth$s[-1])
# +
# Plot the series, the smoothed local level with 95% confidence bounds
# Reconstruct smoothing variances from SV Decomposition
var_smoother <- ts(with(loc_level_smooth, unlist(dlmSvd2var(U.S, D.S))), start = start(smoother))
sd_smoother <- sqrt(var_smoother)
#Confidence interval under normality of errors
upper <- smoother+qnorm(0.975)*sd_smoother[-1]
lower <- smoother-qnorm(0.975)*sd_smoother[-1]
time <- time(smoother)
data <- tibble(time, y, filter, smoother, upper, lower)
ggplot(data, aes(x = time)) +
th +
ggtitle("Smoothing Estimates and Credible Intervals") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = smoother),col = c("red"), size = 2) +
geom_line(aes(y = y),col = c("black")) +
geom_ribbon(aes(ymin = lower, ymax = upper), alpha=0.3, linetype=1, colour="lightgreen", size=0.1,fill="green")
# -
# ### Linear Growth Model
# The local level model is a special case of as class of models defined al polynomial of order n. For instance, the random walk plus noise is a polynomial model of order 1. In general, polynomial models of order n have the following forecast function:
#
# $ f_t(k) = E(Y_{t+k} \mid \mathcal{D}_t) = a_{t,0} + a_{t,1}k + · · · + a_{t,n-1}k^{n−1}, k \geq 0$
#
# where $a_{t,0}, ..., a_{t,n-1}$ are linear functions of $ m_t = E(Y_{t} \mid \mathcal{D}_t)$ and are independent of k. Thus, the forecast function is a polynomial of order (n − 1) in k. In the random walk plus noise, the forecast function was simply:
#
# $f_t(k) = E(Y_{t+k} \mid \mathcal{D}_t) = a_{t,0} = m_t$
#
# A polynomial model of order 2 is defined linear growth model, or local linear trend model. The forecast function for a linear growth model has the following form:
#
# $f_t(k) = E(Y_{t+k} \mid \mathcal{D}_t) = a_{t,0} + a_{t,0} k$
#
# As we can see, this forecast function extrapolates and projects into the future a linear trend of the data. The local linear trend model is defined by the following equations:
#
# $Y_t = \mu_t + v_t, v_t \sim \mathcal{N}(0,V)$
#
# $\mu_t = \mu_{t-1} + \beta_{t-1} + w_{1,t}, w_{1,t} \sim \mathcal{N}(0,\sigma^2_{w_1})$
#
# $\beta_t = \beta_{t-1} + w_{2,t}, w_{2,t} \sim \mathcal{N}(0,\sigma^2_{w_2})$
#
# With uncorrelated errors. Let's try to fit this model to data that show a linear trend.
#
data("USeconomic")
log_gnp <- USeconomic[,"log(GNP)"]
autoplot(log_gnp, size = 1) + th
# The log og the GNP seems a very good time-series to fit a linear growth model as the trend component.
# +
# Model builder
build <- function(parm){
dlmModPoly(order = 2, dV = exp(parm[1]), dW = c(exp(parm[2]), exp(parm[3])))
}
ll_mle <- dlmMLE(log_gnp, c(0,0,0), build)
ll <- build(ll_mle$par)
# +
# Filter estimates
ll_filt <- dlmFilter(log_gnp, ll)
# Forecast
ll_forecast <- dlmForecast(ll_filt, 20)
# +
forecast <- ll_forecast$f
var_forecast <- unlist(ll_forecast$Q)
sd_forecast <- sqrt(var_forecast)
#Confidence interval under normality of errors
upper <- forecast+qnorm(0.975)*sd_forecast
lower <- forecast-qnorm(0.975)*sd_forecast
data <- ts.union(log_gnp, forecast, upper, lower)
data <- tk_tbl(data, rename_index = "time")
ggplot(data, aes(x = time)) +
th +
ggtitle("Local linear trend Forecast with credible intervals") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = forecast),col = c("red"), size = 1) +
geom_line(aes(y = log_gnp),col = c("black"), size = 1) +
geom_ribbon(aes(ymin = lower, ymax = upper), alpha=0.3, linetype=1, colour="lightgreen", size=0.1,fill="green")
# -
# ## Seasonality models
# Once we understood how to model trend using DLMs, it is time to learn how to model seasonalities. One of the coolest things about DLMs is the fact that you can specify different DLMs to describe different components and then simply combine them together to get a compound model. This is done seamlessly in dlm with the use of the + operator.
# Suppose that we also want to model the seasonalities of the US log GNP series. This can be done in a varieties of ways but I will just focus on the main 2 ways. I will model the detrended log gnp series as an example, then I will sum up the two models.
# Get detrended series
log_gnp_detrended <- log_gnp - ll_filt$m[,1]
print(mean(log_gnp_detrended))
autoplot(log_gnp_detrended, size = 1) + th
# ### Seasonal Factor Models
# This model takes data with a certain frequency, say 1/n, and estimates the n deviations from the mean that repeat every cycle of n observations. In our example, we have quarterly data (frequency = 1/4), so the seasonal factor model will estimate 4 factors, each related to a specific quarter, that will add up to the mean (in this case 0, since the series is detrended). In the static seasonal model, wt is degenerate on a vector of zeros (i.e., Wt = 0) More generally, the seasonal effects might change in time, so that Wt is nonzero and has to be carefully specified.
# +
# Model builder
build <- function(parm){
dlmModSeas(frequency = 4, dV = exp(parm[1]), dW = rep(0,3))
}
seas_mle <- dlmMLE(log_gnp_detrended, c(0,0,0), build)
seas <- build(seas_mle$par)
# -
seas_fil <- dlmFilter(log_gnp_detrended, seas)
one_step_ahead_forecasts <- seas_fil$f
# +
time = time(log_gnp_detrended)
data <- tibble(time, log_gnp_detrended, one_step_ahead_forecasts)
ggplot(data, aes(x = time)) +
th +
ggtitle("Seasonal Model with One step ahead forecasts") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = one_step_ahead_forecasts),col = c("red"), size = 1) +
geom_line(aes(y = log_gnp_detrended),col = c("black"), size = 1)
# -
# ### Fourier Seasonality Model
# These kind of models are especially used when the frequency of the data is wery low, as instead of estimating n seasonal factors, using cyclical functions leads to a more parsimonious representation.
# +
# Model builder
# s = freq, q = number of periodic functions
build <- function(parm){
dlmModTrig(s = 4, q = 2, dV = exp(parm[1]))
}
seas_fourier_mle <- dlmMLE(log_gnp_detrended, c(0,0,0), build)
seas_fourier <- build(seas_mle$par)
# +
seas_fourier_fil <- dlmFilter(log_gnp_detrended, seas_fourier)
one_step_ahead_forecasts <- seas_fourier_fil$f
time = time(log_gnp_detrended)
data <- tibble(time, log_gnp_detrended, one_step_ahead_forecasts) %>%
filter(time > 1955)
ggplot(data, aes(x = time)) +
th +
ggtitle("Seasonal Model with One step ahead forecasts") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = one_step_ahead_forecasts),col = c("red"), size = 1) +
geom_line(aes(y = log_gnp_detrended),col = c("black"), size = 1)
# -
# As we said before, the dlm package seamlessy allows us to sum two models, for example, let's add the linear trend model to the Fourier seasonality model and plot forecasts with credible intervals. In order to do so, we are going to assume the parameters we estimated prevously.
# +
# New model as a sum of models
build <- function(parm){
m1 <- dlmModPoly(order = 2, dV = exp(parm[1]), dW = c(exp(parm[2]), exp(parm[3])))
m2 <- dlmModSeas(frequency = 4, dV = exp(seas_mle$par[1]), dW = rep(0,3))
return(m1 + m2)
}
ll_seas <- build(ll_mle$par)
# +
# Filter estimates
ll_seas_filt <- dlmFilter(log_gnp, ll_seas)
# Forecast
ll_seas_forecast <- dlmForecast(ll_seas_filt, 20)
# +
forecast <- ll_seas_forecast$f
var_forecast <- unlist(ll_seas_forecast$Q)
sd_forecast <- sqrt(var_forecast)
#Confidence interval under normality of errors
upper <- forecast+qnorm(0.975)*sd_forecast
lower <- forecast-qnorm(0.975)*sd_forecast
data <- ts.union(log_gnp, forecast, upper, lower)
data <- tk_tbl(data, rename_index = "time")
ggplot(data, aes(x = time)) +
th +
ggtitle("Local linear trend Forecast with credible intervals") +
scale_y_continuous(expand = c(0,0)) +
xlab("") + ylab("") +
geom_line(aes(y = forecast),col = c("red"), size = 1) +
geom_line(aes(y = log_gnp),col = c("black"), size = 1) +
geom_ribbon(aes(ymin = lower, ymax = upper), alpha=0.3, linetype=1, colour="lightgreen", size=0.1,fill="green")
# -
# Let's run diagnostic on the one-step ahead forecasts residuals for this model
e <- log_gnp - ll_seas_filt$f
checkresiduals(e[30:length(e)])
|
Dynamic Linear Models and Kalman Filter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ei6FwQgd96lu"
# # Titanic: Machine Learning from Disaster (TensorFlow Linear Classifier)
#
# My first attempt to build a machine learning model to predict the survival of passengers on Kaggle's Titanic competition.
#
# The model implements a linear classifier in TensorFlow.
#
#
# + [markdown] id="fCwoqmiy3OcM"
# ### Setup and Imports
# + id="4RVHURKJ3U4V"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from google.colab import files
files.upload()
# + [markdown] id="0lPz83XF3o3h"
# ### Data
# + id="p3tj2Jjo3q4v"
# Construct pandas dataframe from csv files
dftrain = pd.read_csv('train.csv')
dfeval = pd.read_csv('test.csv')
# Preprocess the dataframes
def preprocess(df):
#df = df.dropna(subset=['Embarked'])
df['Deck'] = df['Cabin'].str.get(0)
df = df.drop(columns=['Name', 'Ticket', 'Cabin'])
df['Age'] = df['Age'].fillna(df['Age'].mean())
df['Fare'] = df['Fare'].fillna(df['Fare'].mean())
df['Deck'] = df['Deck'].fillna('M')
df['Embarked'] = df['Embarked'].fillna('M')
return df
dftrain = preprocess(dftrain)
y_train = dftrain.pop('Survived')
dfeval = preprocess(dfeval)
dfeval['Survived'] = np.nan
y_eval = dfeval.pop('Survived')
ids = dfeval.pop('PassengerId')
# + id="qOSc4rlKx-zA"
dftrain.head(10) # Check the first 10 entries in the training dataset
# + [markdown] id="WGTv3ArY_oKj"
# Plot out statistical data
# + id="KZh5J4lJGRiD"
dftrain.Fare.hist(bins=80).set_xlabel('Fare')
# + id="diBE_h60GZ67"
dftrain.Sex.value_counts().plot(kind='pie')
# + id="u69fkSAfGhyd"
dftrain.Pclass.value_counts().plot(kind='pie')
# + id="ms8m69bxHwQQ"
pd.concat([dftrain, y_train], axis=1).groupby('Sex').Survived.mean().plot(kind='barh').set_xlabel('% survive')
# + id="8noSWVFiS1D8"
pd.concat([dftrain, y_train], axis=1).groupby('Embarked').Survived.mean().plot(kind='barh').set_xlabel('% survive')
# + id="y0J5L9DPTAcm"
pd.concat([dftrain, y_train], axis=1).groupby('Deck').Survived.mean().plot(kind='barh').set_xlabel('% survive')
# + [markdown] id="xQ8xg_u5JBUV"
# ### Feature Columns
#
# Set up the relevant features that will be used in our linear classifier model
# + id="IXPu897EJNgU"
categorical_columns = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']
numeric_columns = ['Age', 'Fare']
feature_columns = []
for feature_name in categorical_columns:
vocabulary = dftrain[feature_name].unique()
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))
for feature_name in numeric_columns:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
# + [markdown] id="5EvybFGv8bvJ"
# ### Input Function
#
# Constructs input functions out of our dataframes so they may be processed with TensorFlow. The linear classifier uses mini-batch regression, so further expand and process the data by duplicating and shuffling the entries.
# + id="sI3wZc2c8fGF"
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
def input_function():
ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df))
if shuffle:
ds = ds.shuffle(1000)
ds = ds.batch(batch_size).repeat(num_epochs)
return ds
return input_function
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)
# + [markdown] id="6t08ZuSrA4As"
# Train the linear classifier.
# + id="PEcKslcO6waV"
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
linear_est.train(input_fn=train_input_fn)
# + [markdown] id="iJFTuFmnA6t7"
# Make predictions on the test data with our trained linear classifier.
# + id="e5pJc0bDaLW7"
predictions = list(linear_est.predict(eval_input_fn))
class_list = []
for pred in predictions:
class_list.append(pred['class_ids'][0])
len(class_list)
# + [markdown] id="Rfk3bnNvBI1g"
# Convert the test dataframe and predictions into a single csv file for submission.
# + id="G3YuUECIsM9Y"
submission = pd.DataFrame(ids)
submission['Survived'] = class_list
submission.reset_index(drop=True)
submission.to_csv('titanicLC_submission.csv', index=False)
files.download('titanicLC_submission.csv')
|
Titanic_LC.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mpl_toolkits
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.feature_selection import RFE
from sklearn import linear_model
from sklearn.cross_validation import train_test_split
# %matplotlib inline
pwd
# cd '/Users/Chanti/Desktop/Cookbook/Chapter 10'
pwd
dataframe = pd.read_csv("kc_house_data.csv", header='infer')
list(dataframe)
dataframe.head()
dataframe.tail()
dataframe.describe()
dataframe['bedrooms'].value_counts().plot(kind='bar')
plt.title('No. of bedrooms')
plt.xlabel('Bedrooms')
plt.ylabel('Count')
sns.despine
dataframe['bedrooms'].value_counts().plot(kind='pie')
plt.title('No. of bedrooms')
dataframe['floors'].value_counts().plot(kind='bar')
plt.title('Number of floors')
plt.xlabel('No. of floors')
plt.ylabel('Count')
sns.despine
plt.figure(figsize=(20,20))
sns.jointplot(x=dataframe.lat.values, y=dataframe.long.values, size=9)
plt.xlabel('Longitude', fontsize=10)
plt.ylabel('Latitude', fontsize=10)
plt.show()
sns.despine()
plt.figure(figsize=(20,20))
sns.jointplot(x=dataframe.lat.values, y=dataframe.long.values, size=9)
plt.xlabel('Longitude', fontsize=10)
plt.ylabel('Latitude', fontsize=10)
plt.show()
sns.despine()
plt.figure(figsize=(8,8))
plt.scatter(dataframe.price, dataframe.sqft_living)
plt.xlabel('Price')
plt.ylabel('Square feet')
plt.show()
plt.figure(figsize=(5,5))
plt.bar(dataframe.condition, dataframe.price)
plt.xlabel('Condition')
plt.ylabel('Price')
plt.show()
plt.figure(figsize=(8,8))
plt.scatter(dataframe.zipcode, dataframe.price)
plt.xlabel('Zipcode')
plt.ylabel('Price')
plt.show()
plt.figure(figsize=(10,10))
plt.scatter(dataframe.grade, dataframe.price)
plt.xlabel('Grade')
plt.ylabel('Price')
plt.show()
x_df = dataframe.drop(['id','date',], axis = 1)
x_df
y = dataframe[['price']].copy()
y_df = pd.DataFrame(y)
y_df
print('Price Vs Bedrooms: %s' % x_df['price'].corr(x_df['bedrooms']))
print('Price Vs Bathrooms: %s' % x_df['price'].corr(x_df['bathrooms']))
print('Price Vs Living Area: %s' % x_df['price'].corr(x_df['sqft_living']))
print('Price Vs Plot Area: %s' % x_df['price'].corr(x_df['sqft_lot']))
print('Price Vs No. of floors: %s' % x_df['price'].corr(x_df['floors']))
print('Price Vs Waterfront property: %s' % x_df['price'].corr(x_df['waterfront']))
print('Price Vs View: %s' % x_df['price'].corr(x_df['view']))
print('Price Vs Grade: %s' % x_df['price'].corr(x_df['grade']))
print('Price Vs Condition: %s' % x_df['price'].corr(x_df['condition']))
print('Price Vs Sqft Above: %s' % x_df['price'].corr(x_df['sqft_above']))
print('Price Vs Basement Area: %s' % x_df['price'].corr(x_df['sqft_basement']))
print('Price Vs Year Built: %s' % x_df['price'].corr(x_df['yr_built']))
print('Price Vs Year Renovated: %s' % x_df['price'].corr(x_df['yr_renovated']))
print('Price Vs Zipcode: %s' % x_df['price'].corr(x_df['zipcode']))
print('Price Vs Latitude: %s' % x_df['price'].corr(x_df['lat']))
print('Price Vs Longitude: %s' % x_df['price'].corr(x_df['long']))
x_df.corr().iloc[:,-19]
sns.pairplot(data=x_df,
x_vars=['price'],
y_vars=['bedrooms', 'bathrooms', 'sqft_living',
'sqft_lot', 'floors', 'waterfront','view',
'grade','condition','sqft_above','sqft_basement',
'yr_built','yr_renovated','zipcode','lat','long'],
size = 5)
x_df2 = x_df.drop(['price'], axis = 1)
reg=linear_model.LinearRegression()
x_train,x_test,y_train,y_test = train_test_split(x_df2,y_df,test_size=0.4,random_state=4)
reg.fit(x_train,y_train)
reg.coef_
predictions=reg.predict(x_test)
predictions
reg.score(x_test,y_test)
import xgboost
new_model = xgboost.XGBRegressor(n_estimators=750, learning_rate=0.01, gamma=0, subsample=0.55, colsample_bytree=1, max_depth=10)
from sklearn.model_selection import train_test_split
traindf, testdf = train_test_split(x_train, test_size = 0.2)
new_model.fit(x_train,y_train)
from sklearn.metrics import explained_variance_score
predictions = new_model.predict(x_test)
print(explained_variance_score(predictions,y_test))
|
Chapter08/CH08/Code/Real+Estate+Prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!-- dom:TITLE: Oscillations -->
# # Oscillations
# <!-- dom:AUTHOR: [<NAME>](http://mhjgit.github.io/info/doc/web/) at Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University, USA & Department of Physics, University of Oslo, Norway -->
# <!-- Author: -->
# **[<NAME>](http://mhjgit.github.io/info/doc/web/)**, Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University, USA and Department of Physics, University of Oslo, Norway
# <!-- dom:AUTHOR: [<NAME>](https://pa.msu.edu/profile/pratts/) at Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University, USA -->
# <!-- Author: --> **[<NAME>](https://pa.msu.edu/profile/pratts/)**, Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University, USA
# <!-- dom:AUTHOR: [<NAME>](https://pa.msu.edu/profile/schmidt/) at Department of Physics and Astronomy, Michigan State University, USA -->
# <!-- Author: --> **[<NAME>](https://pa.msu.edu/profile/schmidt/)**, Department of Physics and Astronomy, Michigan State University, USA
#
# Date: **Feb 10, 2020**
#
# Copyright 1999-2020, [<NAME>](http://mhjgit.github.io/info/doc/web/). Released under CC Attribution-NonCommercial 4.0 license
#
#
#
#
#
#
# ## Harmonic Oscillator
#
# The harmonic oscillator is omnipresent in physics. Although you may think
# of this as being related to springs, it, or an equivalent
# mathematical representation, appears in just about any problem where a
# mode is sitting near its potential energy minimum. At that point,
# $\partial_x V(x)=0$, and the first non-zero term (aside from a
# constant) in the potential energy is that of a harmonic oscillator. In
# a solid, sound modes (phonons) are built on a picture of coupled
# harmonic oscillators, and in relativistic field theory the fundamental
# interactions are also built on coupled oscillators positioned
# infinitesimally close to one another in space. The phenomena of a
# resonance of an oscillator driven at a fixed frequency plays out
# repeatedly in atomic, nuclear and high-energy physics, when quantum
# mechanically the evolution of a state oscillates according to
# $e^{-iEt}$ and exciting discrete quantum states has very similar
# mathematics as exciting discrete states of an oscillator.
#
# The potential energy for a single particle as a function of its position $x$ can be written as a Taylor expansion about some point $x_0$
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# V(x)=V(x_0)+(x-x_0)\left.\partial_xV(x)\right|_{x_0}+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0}
# +\frac{1}{3!}\left.\partial_x^3V(x)\right|_{x_0}+\cdots
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# If the position $x_0$ is at the minimum of the resonance, the first two non-zero terms of the potential are
# $$
# \begin{eqnarray}
# V(x)&\approx& V(x_0)+\frac{1}{2}(x-x_0)^2\left.\partial_x^2V(x)\right|_{x_0},\\
# \nonumber
# &=&V(x_0)+\frac{1}{2}k(x-x_0)^2,~~~~k\equiv \left.\partial_x^2V(x)\right|_{x_0},\\
# \nonumber
# F&=&-\partial_xV(x)=-k(x-x_0).
# \end{eqnarray}
# $$
# Put into Newton's 2nd law (assuming $x_0=0$),
# $$
# \begin{eqnarray}
# m\ddot{x}&=&-kx,\\
# x&=&A\cos(\omega_0 t-\phi),~~~\omega_0=\sqrt{k/m}.
# \end{eqnarray}
# $$
# Here $A$ and $\phi$ are arbitrary. Equivalently, one could have
# written this as $A\cos(\omega_0 t)+B\sin(\omega_0 t)$, or as the real
# part of $Ae^{i\omega_0 t}$. In this last case $A$ could be an
# arbitrary complex constant. Thus, there are 2 arbitrary constants
# (either $A$ and $B$ or $A$ and $\phi$, or the real and imaginary part
# of one complex constant. This is the expectation for a second order
# differential equation, and also agrees with the physical expectation
# that if you know a particle's initial velocity and position you should
# be able to define its future motion, and that those two arbitrary
# conditions should translate to two arbitrary constants.
#
# A key feature of harmonic motion is that the system repeats itself
# after a time $T=1/f$, where $f$ is the frequency, and $\omega=2\pi f$
# is the angular frequency. The period of the motion is independent of
# the amplitude. However, this independence is only exact when one can
# neglect higher terms of the potential, $x^3, x^4\cdots$. Once can
# neglect these terms for sufficiently small amplitudes, and for larger
# amplitudes the motion is no longer purely sinusoidal, and even though
# the motion repeats itself, the time for repeating the motion is no
# longer independent of the amplitude.
#
# One can also calculate the velocity and the kinetic energy as a function of time,
# $$
# \begin{eqnarray}
# \dot{x}&=&-\omega_0A\sin(\omega_0 t-\phi),\\
# \nonumber
# K&=&\frac{1}{2}m\dot{x}^2=\frac{m\omega_0^2A^2}{2}\sin^2(\omega_0t-\phi),\\
# \nonumber
# &=&\frac{k}{2}A^2\sin^2(\omega_0t-\phi).
# \end{eqnarray}
# $$
# The total energy is then
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# E=K+V=\frac{1}{2}m\dot{x}^2+\frac{1}{2}kx^2=\frac{1}{2}kA^2.
# \label{_auto2} \tag{2}
# \end{equation}
# $$
# The total energy then goes as the square of the amplitude.
#
#
# A pendulum is an example of a harmonic oscillator. By expanding the
# kinetic and potential energies for small angles find the frequency for
# a pendulum of length $L$ with all the mass $m$ centered at the end by
# writing the eq.s of motion in the form of a harmonic oscillator.
#
# The potential energy and kinetic energies are (for $x$ being the displacement)
# $$
# \begin{eqnarray*}
# V&=&mgL(1-\cos\theta)\approx mgL\frac{x^2}{2L^2},\\
# K&=&\frac{1}{2}mL^2\dot{\theta}^2\approx \frac{m}{2}\dot{x}^2.
# \end{eqnarray*}
# $$
# For small $x$ Newton's 2nd law becomes
# $$
# m\ddot{x}=-\frac{mg}{L}x,
# $$
# and the spring constant would appear to be $k=mg/L$, which makes the
# frequency equal to $\omega_0=\sqrt{g/L}$. Note that the frequency is
# independent of the mass.
#
#
# ## Damped Oscillators
#
# We consider only the case where the damping force is proportional to
# the velocity. This is counter to dragging friction, where the force is
# proportional in strength to the normal force and independent of
# velocity, and is also inconsistent with wind resistance, where the
# magnitude of the drag force is proportional the square of the
# velocity. Rolling resistance does seem to be mainly proportional to
# the velocity. However, the main motivation for considering damping
# forces proportional to the velocity is that the math is more
# friendly. This is because the differential equation is linear,
# i.e. each term is of order $x$, $\dot{x}$, $\ddot{x}\cdots$, or even
# terms with no mention of $x$, and there are no terms such as $x^2$ or
# $x\ddot{x}$. The equations of motion for a spring with damping force
# $-b\dot{x}$ are
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# m\ddot{x}+b\dot{x}+kx=0.
# \label{_auto3} \tag{3}
# \end{equation}
# $$
# Just to make the solution a bit less messy, we rewrite this equation as
# <!-- Equation labels as ordinary links -->
# <div id="eq:dampeddiffyq"></div>
#
# $$
# \begin{equation}
# \label{eq:dampeddiffyq} \tag{4}
# \ddot{x}+2\beta\dot{x}+\omega_0^2x=0,~~~~\beta\equiv b/2m,~\omega_0\equiv\sqrt{k/m}.
# \end{equation}
# $$
# Both $\beta$ and $\omega$ have dimensions of inverse time. To find solutions (see appendix C in the text) you must make an educated guess at the form of the solution. To do this, first realize that the solution will need an arbitrary normalization $A$ because the equation is linear. Secondly, realize that if the form is
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# x=Ae^{rt}
# \label{_auto4} \tag{5}
# \end{equation}
# $$
# that each derivative simply brings out an extra power of $r$. This
# means that the $Ae^{rt}$ factors out and one can simply solve for an
# equation for $r$. Plugging this form into Eq. ([4](#eq:dampeddiffyq)),
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# r^2+2\beta r+\omega_0^2=0.
# \label{_auto5} \tag{6}
# \end{equation}
# $$
# Because this is a quadratic equation there will be two solutions,
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# r=-\beta\pm\sqrt{\beta^2-\omega_0^2}.
# \label{_auto6} \tag{7}
# \end{equation}
# $$
# We refer to the two solutions as $r_1$ and $r_2$ corresponding to the
# $+$ and $-$ roots. As expected, there should be two arbitrary
# constants involved in the solution,
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# x=A_1e^{r_1t}+A_2e^{r_2t},
# \label{_auto7} \tag{8}
# \end{equation}
# $$
# where the coefficients $A_1$ and $A_2$ are determined by initial
# conditions.
#
# The roots listed above, $\sqrt{\omega_0^2-\beta_0^2}$, will be
# imaginary if the damping is small and $\beta<\omega_0$. In that case,
# $r$ is complex and the factor $e{rt}$ will have some oscillatory
# behavior. If the roots are real, there will only be exponentially
# decaying solutions. There are three cases:
#
#
#
# ### Underdamped: $\beta<\omega_0$
# $$
# \begin{eqnarray}
# x&=&A_1e^{-\beta t}e^{i\omega't}+A_2e^{-\beta t}e^{-i\omega't},~~\omega'\equiv\sqrt{\omega_0^2-\beta^2}\\
# \nonumber
# &=&(A_1+A_2)e^{-\beta t}\cos\omega't+i(A_1-A_2)e^{-\beta t}\sin\omega't.
# \end{eqnarray}
# $$
# Here we have made use of the identity
# $e^{i\omega't}=\cos\omega't+i\sin\omega't$. Because the constants are
# arbitrary, and because the real and imaginary parts are both solutions
# individually, we can simply consider the real part of the solution
# alone:
# <!-- Equation labels as ordinary links -->
# <div id="eq:homogsolution"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:homogsolution} \tag{9}
# x&=&B_1e^{-\beta t}\cos\omega't+B_2e^{-\beta t}\sin\omega't,\\
# \nonumber
# \omega'&\equiv&\sqrt{\omega_0^2-\beta^2}.
# \end{eqnarray}
# $$
# ### Critical dampling: $\beta=\omega_0$
#
# In this case the two terms involving $r_1$ and $r_2$ are identical
# because $\omega'=0$. Because we need to arbitrary constants, there
# needs to be another solution. This is found by simply guessing, or by
# taking the limit of $\omega'\rightarrow 0$ from the underdamped
# solution. The solution is then
# <!-- Equation labels as ordinary links -->
# <div id="eq:criticallydamped"></div>
#
# $$
# \begin{equation}
# \label{eq:criticallydamped} \tag{10}
# x=Ae^{-\beta t}+Bte^{-\beta t}.
# \end{equation}
# $$
# The critically damped solution is interesting because the solution
# approaches zero quickly, but does not oscillate. For a problem with
# zero initial velocity, the solution never crosses zero. This is a good
# choice for designing shock absorbers or swinging doors.
#
# ### Overdamped: $\beta>\omega_0$
# $$
# \begin{eqnarray}
# x&=&A_1\exp{-(\beta+\sqrt{\beta^2-\omega_0^2})t}+A_2\exp{-(\beta-\sqrt{\beta^2-\omega_0^2})t}
# \end{eqnarray}
# $$
# This solution will also never pass the origin more than once, and then
# only if the initial velocity is strong and initially toward zero.
#
#
#
#
# Given $b$, $m$ and $\omega_0$, find $x(t)$ for a particle whose
# initial position is $x=0$ and has initial velocity $v_0$ (assuming an
# underdamped solution).
#
# The solution is of the form,
# $$
# \begin{eqnarray*}
# x&=&e^{-\beta t}\left[A_1\cos(\omega' t)+A_2\sin\omega't\right],\\
# \dot{x}&=&-\beta x+\omega'e^{-\beta t}\left[-A_1\sin\omega't+A_2\cos\omega't\right].\\
# \omega'&\equiv&\sqrt{\omega_0^2-\beta^2},~~~\beta\equiv b/2m.
# \end{eqnarray*}
# $$
# From the initial conditions, $A_1=0$ because $x(0)=0$ and $\omega'A_2=v_0$. So
# $$
# x=\frac{v_0}{\omega'}e^{-\beta t}\sin\omega't.
# $$
# ## Our Sliding Block Code
# Here we study first the case without additional friction term and scale our equation
# in terms of a dimensionless time $\tau$.
#
# Let us remind ourselves about the differential equation we want to solve (the general case with damping due to friction)
# $$
# m\frac{d^2x}{dt^2} + b\frac{dx}{dt}+kx(t) =0.
# $$
# We divide by $m$ and introduce $\omega_0^2=\sqrt{k/m}$ and obtain
# $$
# \frac{d^2x}{dt^2} + \frac{b}{m}\frac{dx}{dt}+\omega_0^2x(t) =0.
# $$
# Thereafter we introduce a dimensionless time $\tau = t\omega_0$ (check
# that the dimensionality is correct) and rewrite our equation as
# $$
# \frac{d^2x}{d\tau^2} + \frac{b}{m\omega_0^2}\frac{dx}{d\tau}+x(\tau) =0,
# $$
# which gives us
# $$
# \frac{d^2x}{d\tau^2} + \frac{b}{k}\frac{dx}{d\tau}+x(\tau) =0.
# $$
# We then define $\gamma = b/2k$ and rewrite our equations as
# $$
# \frac{d^2x}{d\tau^2} + 2\gamma\frac{dx}{d\tau}+x(\tau) =0.
# $$
# This is the equation we will code below. The first version employs the Euler-Cromer method.
# +
# %matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
DeltaT = 0.001
#set up arrays
tfinal = 30 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, v, and x
t = np.zeros(n)
v = np.zeros(n)
x = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
x0 = 1.0
v0 = 0.0
x[0] = x0
v[0] = v0
gamma = 0.5
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
a = -2*gamma*v[i]-x[i]
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
x[i+1] = x[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('x[m]')
ax.set_xlabel('t[s]')
ax.plot(t, x)
fig.tight_layout()
save_fig("BlockEulerCromer")
plt.show()
# -
# When setting up the value of $\gamma$ we see that for $\gamma=0$ we get the simple oscillatory motion with no damping.
# Choosing $\gamma < 1/2$ leads to the classical underdamped case with oscillatory motion, but where the motion comes to an end.
#
# Choosing $\gamma =1/2$ leads to what normally is called critical damping and $\gamma> 1/2$ leads to critical overdamping.
# Try it out and try also to change the initial position and velocity.
#
# ## Sinusoidally Driven Oscillators
#
# Here, we consider the force
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# F=-kx-b\dot{x}+F_0\cos\omega t,
# \label{_auto8} \tag{11}
# \end{equation}
# $$
# which leads to the differential equation
# <!-- Equation labels as ordinary links -->
# <div id="eq:drivenosc"></div>
#
# $$
# \begin{equation}
# \label{eq:drivenosc} \tag{12}
# \ddot{x}+2\beta\dot{x}+\omega_0^2x=(F_0/m)\cos\omega t.
# \end{equation}
# $$
# Consider a single solution with no arbitrary constants, which we will
# call a {\it particular solution}, $x_p(t)$. It should be emphasized
# that this is {\bf A} particular solution, because there exists an
# infinite number of such solutions because the general solution should
# have two arbitrary constants. Now consider solutions to the same
# equation without the driving term, which include two arbitrary
# constants. These are called either {\it homogenous solutions} or {\it
# complementary solutions}, and were given in the previous section,
# e.g. Eq. ([9](#eq:homogsolution)) for the underdamped case. The
# homogenous solution already incorporates the two arbitrary constants,
# so any sum of a homogenous solution and a particular solution will
# represent the {\it general solution} of the equation. The general
# solution incorporates the two arbitrary constants $A$ and $B$ to
# accommodate the two initial conditions. One could have picked a
# different particular solution, i.e. the original particular solution
# plus any homogenous solution with the arbitrary constants $A_p$ and
# $B_p$ chosen at will. When one adds in the homogenous solution, which
# has adjustable constants with arbitrary constants $A'$ and $B'$, to
# the new particular solution, one can get the same general solution by
# simply adjusting the new constants such that $A'+A_p=A$ and
# $B'+B_p=B$. Thus, the choice of $A_p$ and $B_p$ are irrelevant, and
# when choosing the particular solution it is best to make the simplest
# choice possible.
#
# To find a particular solution, one first guesses at the form,
# <!-- Equation labels as ordinary links -->
# <div id="eq:partform"></div>
#
# $$
# \begin{equation}
# \label{eq:partform} \tag{13}
# x_p(t)=D\cos(\omega t-\delta),
# \end{equation}
# $$
# and rewrite the differential equation as
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# D\left\{-\omega^2\cos(\omega t-\delta)-2\beta\omega\sin(\omega t-\delta)+\omega_0^2\cos(\omega t-\delta)\right\}=\frac{F_0}{m}\cos(\omega t).
# \label{_auto9} \tag{14}
# \end{equation}
# $$
# One can now use angle addition formulas to get
# $$
# \begin{eqnarray}
# D\left\{(-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta)\cos(\omega t)\right.&&\\
# \nonumber
# \left.+(-\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta)\sin(\omega t)\right\}
# &=&\frac{F_0}{m}\cos(\omega t).
# \end{eqnarray}
# $$
# Both the $\cos$ and $\sin$ terms need to equate if the expression is to hold at all times. Thus, this becomes two equations
# $$
# \begin{eqnarray}
# D\left\{-\omega^2\cos\delta+2\beta\omega\sin\delta+\omega_0^2\cos\delta\right\}&=&\frac{F_0}{m}\\
# \nonumber
# -\omega^2\sin\delta-2\beta\omega\cos\delta+\omega_0^2\sin\delta&=&0.
# \end{eqnarray}
# $$
# After dividing by $\cos\delta$, the lower expression leads to
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# \tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
# \label{_auto10} \tag{15}
# \end{equation}
# $$
# Using the identities $\tan^2+1=\csc^2$ and $\sin^2+\cos\^2=1$, one can also express $\sin\delta$ and $\cos\delta$,
# $$
# \begin{eqnarray}
# \sin\delta&=&\frac{2\beta\omega}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}},\\
# \nonumber
# \cos\delta&=&\frac{(\omega_0^2-\omega^2)}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}
# \end{eqnarray}
# $$
# Inserting the expressions for $\cos\delta$ and $\sin\delta$ into the expression for $D$,
# <!-- Equation labels as ordinary links -->
# <div id="eq:Ddrive"></div>
#
# $$
# \begin{equation}
# \label{eq:Ddrive} \tag{16}
# D=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}.
# \end{equation}
# $$
# For a given initial condition, e.g. initial displacement and velocity,
# one must add the homogenous solution then solve for the two arbitrary
# constants. However, because the homogenous solutions decay with time
# as $e^{-\beta t}$, the particular solution is all that remains at
# large times, and is therefore the steady state solution. Because the
# arbitrary constants are all in the homogenous solution, all memory of
# the initial conditions are lost at large times, $t>>1/\beta$.
#
# The amplitude of the motion, $D$, is linearly proportional to the
# driving force ($F_0/m$), but also depends on the driving frequency
# $\omega$. For small $\beta$ the maximum will occur at
# $\omega=\omega_0$. This is referred to as a resonance. In the limit
# $\beta\rightarrow 0$ the amplitude at resonance approaches infinity.
#
# ## Alternative Derivation for Driven Oscillators
#
# Here, we derive the same expressions as in Equations ([13](#eq:partform)) and ([16](#eq:Ddrive)) but express the driving forces as
# $$
# \begin{eqnarray}
# F(t)&=&F_0e^{i\omega t},
# \end{eqnarray}
# $$
# rather than as $F_0\cos\omega t$. The real part of $F$ is the same as before. For the differential equation,
# <!-- Equation labels as ordinary links -->
# <div id="eq:compdrive"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:compdrive} \tag{17}
# \ddot{x}+2\beta\dot{x}+\omega_0^2x&=&\frac{F_0}{m}e^{i\omega t},
# \end{eqnarray}
# $$
# one can treat $x(t)$ as an imaginary function. Because the operations
# $d^2/dt^2$ and $d/dt$ are real and thus do not mix the real and
# imaginary parts of $x(t)$, Eq. ([17](#eq:compdrive)) is effectively 2
# equations. Because $e^{\omega t}=\cos\omega t+i\sin\omega t$, the real
# part of the solution for $x(t)$ gives the solution for a driving force
# $F_0\cos\omega t$, and the imaginary part of $x$ corresponds to the
# case where the driving force is $F_0\sin\omega t$. It is rather easy
# to solve for the complex $x$ in this case, and by taking the real part
# of the solution, one finds the answer for the $\cos\omega t$ driving
# force.
#
# We assume a simple form for the particular solution
# <!-- Equation labels as ordinary links -->
# <div id="_auto11"></div>
#
# $$
# \begin{equation}
# x_p=De^{i\omega t},
# \label{_auto11} \tag{18}
# \end{equation}
# $$
# where $D$ is a complex constant.
#
# From Eq. ([17](#eq:compdrive)) one inserts the form for $x_p$ above to get
# $$
# \begin{eqnarray}
# D\left\{-\omega^2+2i\beta\omega+\omega_0^2\right\}e^{i\omega t}=(F_0/m)e^{i\omega t},\\
# \nonumber
# D=\frac{F_0/m}{(\omega_0^2-\omega^2)+2i\beta\omega}.
# \end{eqnarray}
# $$
# The norm and phase for $D=|D|e^{-i\delta}$ can be read by inspection,
# <!-- Equation labels as ordinary links -->
# <div id="_auto12"></div>
#
# $$
# \begin{equation}
# |D|=\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}},~~~~\tan\delta=\frac{2\beta\omega}{\omega_0^2-\omega^2}.
# \label{_auto12} \tag{19}
# \end{equation}
# $$
# This is the same expression for $\delta$ as before. One then finds $x_p(t)$,
# <!-- Equation labels as ordinary links -->
# <div id="eq:fastdriven1"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:fastdriven1} \tag{20}
# x_p(t)&=&\Re\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
# \nonumber
# &=&\frac{(F_0/m)\cos(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
# \end{eqnarray}
# $$
# This is the same answer as before.
# If one wished to solve for the case where $F(t)= F_0\sin\omega t$, the imaginary part of the solution would work
# <!-- Equation labels as ordinary links -->
# <div id="eq:fastdriven2"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:fastdriven2} \tag{21}
# x_p(t)&=&\Im\frac{(F_0/m)e^{i\omega t-i\delta}}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}\\
# \nonumber
# &=&\frac{(F_0/m)\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
# \end{eqnarray}
# $$
# Consider the damped and driven harmonic oscillator worked out above. Given $F_0, m,\beta$ and $\omega_0$, solve for the complete solution $x(t)$ for the case where $F=F_0\sin\omega t$ with initial conditions $x(t=0)=0$ and $v(t=0)=0$. Assume the underdamped case.
#
# The general solution including the arbitrary constants includes both the homogenous and particular solutions,
# $$
# \begin{eqnarray*}
# x(t)&=&\frac{F_0}{m}\frac{\sin(\omega t-\delta)}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}
# +A\cos\omega't e^{-\beta t}+B\sin\omega't e^{-\beta t}.
# \end{eqnarray*}
# $$
# The quantities $\delta$ and $\omega'$ are given earlier in the
# section, $\omega'=\sqrt{\omega_0^2-\beta^2},
# \delta=\tan^{-1}(2\beta\omega/(\omega_0^2-\omega^2)$. Here, solving
# the problem means finding the arbitrary constants $A$ and
# $B$. Satisfying the initial conditions for the initial position and
# velocity:
# $$
# \begin{eqnarray*}
# x(t=0)=0&=&-\eta\sin\delta+A,\\
# v(t=0)=0&=&\omega\eta\cos\delta-\beta A+\omega'B,\\
# \eta&\equiv&\frac{F_0}{m}\frac{1}{\sqrt{(\omega_0^2-\omega^2)^2+4\beta^2\omega^2}}.
# \end{eqnarray*}
# $$
# The problem is now reduced to 2 equations and 2 unknowns, $A$ and $B$. The solution is
# $$
# \begin{eqnarray}
# A&=& \eta\sin\delta ,~~~B=\frac{-\omega\eta\cos\delta+\beta\eta\sin\delta}{\omega'}.
# \end{eqnarray}
# $$
# ## Resonance Widths; the $Q$ factor
#
# From the previous two sections, the particular solution for a driving force, $F=F_0\cos\omega t$, is
# $$
# \begin{eqnarray}
# x_p(t)&=&\frac{F_0/m}{\sqrt{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}}\cos(\omega_t-\delta),\\
# \nonumber
# \delta&=&\tan^{-1}\left(\frac{2\beta\omega}{\omega_0^2-\omega^2}\right).
# \end{eqnarray}
# $$
# If one fixes the driving frequency $\omega$ and adjusts the
# fundamental frequency $\omega_0=\sqrt{k/m}$, the maximum amplitude
# occurs when $\omega_0=\omega$ because that is when the term from the
# denominator $(\omega_0^2-\omega^2)^2+4\omega^2\beta^2$ is at a
# minimum. This is akin to dialing into a radio station. However, if one
# fixes $\omega_0$ and adjusts the driving frequency one minimize with
# respect to $\omega$, e.g. set
# <!-- Equation labels as ordinary links -->
# <div id="_auto13"></div>
#
# $$
# \begin{equation}
# \frac{d}{d\omega}\left[(\omega_0^2-\omega^2)^2+4\omega^2\beta^2\right]=0,
# \label{_auto13} \tag{22}
# \end{equation}
# $$
# and one finds that the maximum amplitude occurs when
# $\omega=\sqrt{\omega_0^2-2\beta^2}$. If $\beta$ is small relative to
# $\omega_0$, one can simply state that the maximum amplitude is
# <!-- Equation labels as ordinary links -->
# <div id="_auto14"></div>
#
# $$
# \begin{equation}
# x_{\rm max}\approx\frac{F_0}{2m\beta \omega_0}.
# \label{_auto14} \tag{23}
# \end{equation}
# $$
# $$
# \begin{eqnarray}
# \frac{4\omega^2\beta^2}{(\omega_0^2-\omega^2)^2+4\omega^2\beta^2}=\frac{1}{2}.
# \end{eqnarray}
# $$
# For small damping this occurs when $\omega=\omega_0\pm \beta$, so the $FWHM\approx 2\beta$. For the purposes of tuning to a specific frequency, one wants the width to be as small as possible. The ratio of $\omega_0$ to $FWHM$ is known as the {\it quality} factor, or $Q$ factor,
# <!-- Equation labels as ordinary links -->
# <div id="_auto15"></div>
#
# $$
# \begin{equation}
# Q\equiv \frac{\omega_0}{2\beta}.
# \label{_auto15} \tag{24}
# \end{equation}
# $$
# <!-- !split -->
# ## Principle of Superposition and Periodic Forces (Fourier Transforms)
#
# If one has several driving forces, $F(t)=\sum_n F_n(t)$, one can find
# the particular solution to each $F_n$, $x_{pn}(t)$, and the particular
# solution for the entire driving force is
# <!-- Equation labels as ordinary links -->
# <div id="_auto16"></div>
#
# $$
# \begin{equation}
# x_p(t)=\sum_nx_{pn}(t).
# \label{_auto16} \tag{25}
# \end{equation}
# $$
# This is known as the principal of superposition. It only applies when
# the homogenous equation is linear. If there were an anharmonic term
# such as $x^3$ in the homogenous equation, then when one summed various
# solutions, $x=(\sum_n x_n)^2$, one would get cross
# terms. Superposition is especially useful when $F(t)$ can be written
# as a sum of sinusoidal terms, because the solutions for each
# sinusoidal term is analytic, and are given in the previous two
# subsections.
#
# Driving forces are often periodic, even when they are not
# sinusoidal. Periodicity implies that for some time $\tau$
# $$
# \begin{eqnarray}
# F(t+\tau)=F(t).
# \end{eqnarray}
# $$
# One example of a non-sinusoidal periodic force is a square wave. Many
# components in electric circuits are non-linear, e.g. diodes, which
# makes many wave forms non-sinusoidal even when the circuits are being
# driven by purely sinusoidal sources.
#
# For the sinusoidal example studied in the previous subsections the
# period is $\tau=2\pi/\omega$. However, higher harmonics can also
# satisfy the periodicity requirement. In general, any force that
# satisfies the periodicity requirement can be expressed as a sum over
# harmonics,
# <!-- Equation labels as ordinary links -->
# <div id="_auto17"></div>
#
# $$
# \begin{equation}
# F(t)=\frac{f_0}{2}+\sum_{n>0} f_n\cos(2n\pi t/\tau)+g_n\sin(2n\pi t/\tau).
# \label{_auto17} \tag{26}
# \end{equation}
# $$
# From the previous subsection, one can write down the answer for
# $x_{pn}(t)$, by substituting $f_n/m$ or $g_n/m$ for $F_0/m$ into Eq.s
# ([20](#eq:fastdriven1)) or ([21](#eq:fastdriven2)) respectively. By
# writing each factor $2n\pi t/\tau$ as $n\omega t$, with $\omega\equiv
# 2\pi/\tau$,
# <!-- Equation labels as ordinary links -->
# <div id="eq:fourierdef1"></div>
#
# $$
# \begin{equation}
# \label{eq:fourierdef1} \tag{27}
# F(t)=\frac{f_0}{2}+\sum_{n>0}f_n\cos(n\omega t)+g_n\sin(n\omega t).
# \end{equation}
# $$
# The solutions for $x(t)$ then come from replacing $\omega$ with
# $n\omega$ for each term in the particular solution in Equations
# ([13](#eq:partform)) and ([16](#eq:Ddrive)),
# $$
# \begin{eqnarray}
# x_p(t)&=&\frac{f_0}{2k}+\sum_{n>0} \alpha_n\cos(n\omega t-\delta_n)+\beta_n\sin(n\omega t-\delta_n),\\
# \nonumber
# \alpha_n&=&\frac{f_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
# \nonumber
# \beta_n&=&\frac{g_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
# \nonumber
# \delta_n&=&\tan^{-1}\left(\frac{2\beta n\omega}{\omega_0^2-n^2\omega^2}\right).
# \end{eqnarray}
# $$
# Because the forces have been applied for a long time, any non-zero
# damping eliminates the homogenous parts of the solution, so one need
# only consider the particular solution for each $n$.
#
# The problem will considered solved if one can find expressions for the
# coefficients $f_n$ and $g_n$, even though the solutions are expressed
# as an infinite sum. The coefficients can be extracted from the
# function $F(t)$ by
# <!-- Equation labels as ordinary links -->
# <div id="eq:fourierdef2"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:fourierdef2} \tag{28}
# f_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\cos(2n\pi t/\tau),\\
# \nonumber
# g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\sin(2n\pi t/\tau).
# \end{eqnarray}
# $$
# To check the consistency of these expressions and to verify
# Eq. ([28](#eq:fourierdef2)), one can insert the expansion of $F(t)$ in
# Eq. ([27](#eq:fourierdef1)) into the expression for the coefficients in
# Eq. ([28](#eq:fourierdef2)) and see whether
# $$
# \begin{eqnarray}
# f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~\left\{
# \frac{f_0}{2}+\sum_{m>0}f_m\cos(m\omega t)+g_m\sin(m\omega t)
# \right\}\cos(n\omega t).
# \end{eqnarray}
# $$
# Immediately, one can throw away all the terms with $g_m$ because they
# convolute an even and an odd function. The term with $f_0/2$
# disappears because $\cos(n\omega t)$ is equally positive and negative
# over the interval and will integrate to zero. For all the terms
# $f_m\cos(m\omega t)$ appearing in the sum, one can use angle addition
# formulas to see that $\cos(m\omega t)\cos(n\omega
# t)=(1/2)(\cos[(m+n)\omega t]+\cos[(m-n)\omega t]$. This will integrate
# to zero unless $m=n$. In that case the $m=n$ term gives
# <!-- Equation labels as ordinary links -->
# <div id="_auto18"></div>
#
# $$
# \begin{equation}
# \int_{-\tau/2}^{\tau/2}dt~\cos^2(m\omega t)=\frac{\tau}{2},
# \label{_auto18} \tag{29}
# \end{equation}
# $$
# and
# $$
# \begin{eqnarray}
# f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~f_n/2\\
# \nonumber
# &=&f_n~\checkmark.
# \end{eqnarray}
# $$
# The same method can be used to check for the consistency of $g_n$.
#
#
# Consider the driving force:
# <!-- Equation labels as ordinary links -->
# <div id="_auto19"></div>
#
# $$
# \begin{equation}
# F(t)=At/\tau,~~-\tau/2<t<\tau/2,~~~F(t+\tau)=F(t).
# \label{_auto19} \tag{30}
# \end{equation}
# $$
# Find the Fourier coefficients $f_n$ and $g_n$ for all $n$ using Eq. ([28](#eq:fourierdef2)).
#
# Only the odd coefficients enter by symmetry, i.e. $f_n=0$. One can find $g_n$ integrating by parts,
# <!-- Equation labels as ordinary links -->
# <div id="eq:fouriersolution"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:fouriersolution} \tag{31}
# g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2}dt~\sin(n\omega t) \frac{At}{\tau}\\
# \nonumber
# u&=&t,~dv=\sin(n\omega t)dt,~v=-\cos(n\omega t)/(n\omega),\\
# \nonumber
# g_n&=&\frac{-2A}{n\omega \tau^2}\int_{-\tau/2}^{\tau/2}dt~\cos(n\omega t)
# +\left.2A\frac{-t\cos(n\omega t)}{n\omega\tau^2}\right|_{-\tau/2}^{\tau/2}.
# \end{eqnarray}
# $$
# The first term is zero because $\cos(n\omega t)$ will be equally
# positive and negative over the interval. Using the fact that
# $\omega\tau=2\pi$,
# $$
# \begin{eqnarray}
# g_n&=&-\frac{2A}{2n\pi}\cos(n\omega\tau/2)\\
# \nonumber
# &=&-\frac{A}{n\pi}\cos(n\pi)\\
# \nonumber
# &=&\frac{A}{n\pi}(-1)^{n+1}.
# \end{eqnarray}
# $$
# ## Response to Transient Force
#
# Consider a particle at rest in the bottom of an underdamped harmonic
# oscillator, that then feels a sudden impulse, or change in momentum,
# $I=F\Delta t$ at $t=0$. This increases the velocity immediately by an
# amount $v_0=I/m$ while not changing the position. One can then solve
# the trajectory by solving Eq. ([9](#eq:homogsolution)) with initial
# conditions $v_0=I/m$ and $x_0=0$. This gives
# <!-- Equation labels as ordinary links -->
# <div id="_auto20"></div>
#
# $$
# \begin{equation}
# x(t)=\frac{I}{m\omega'}e^{-\beta t}\sin\omega't, ~~t>0.
# \label{_auto20} \tag{32}
# \end{equation}
# $$
# Here, $\omega'=\sqrt{\omega_0^2-\beta^2}$. For an impulse $I_i$ that
# occurs at time $t_i$ the trajectory would be
# <!-- Equation labels as ordinary links -->
# <div id="_auto21"></div>
#
# $$
# \begin{equation}
# x(t)=\frac{I_i}{m\omega'}e^{-\beta (t-t_i)}\sin[\omega'(t-t_i)] \Theta(t-t_i),
# \label{_auto21} \tag{33}
# \end{equation}
# $$
# where $\Theta(t-t_i)$ is a step function, i.e. $\Theta(x)$ is zero for
# $x<0$ and unity for $x>0$. If there were several impulses linear
# superposition tells us that we can sum over each contribution,
# <!-- Equation labels as ordinary links -->
# <div id="_auto22"></div>
#
# $$
# \begin{equation}
# x(t)=\sum_i\frac{I_i}{m\omega'}e^{-\beta(t-t_i)}\sin[\omega'(t-t_i)]\Theta(t-t_i)
# \label{_auto22} \tag{34}
# \end{equation}
# $$
# Now one can consider a series of impulses at times separated by
# $\Delta t$, where each impulse is given by $F_i\Delta t$. The sum
# above now becomes an integral,
# <!-- Equation labels as ordinary links -->
# <div id="eq:Greeny"></div>
#
# $$
# \begin{eqnarray}\label{eq:Greeny} \tag{35}
# x(t)&=&\int_{-\infty}^\infty dt'~F(t')\frac{e^{-\beta(t-t')}\sin[\omega'(t-t')]}{m\omega'}\Theta(t-t')\\
# \nonumber
# &=&\int_{-\infty}^\infty dt'~F(t')G(t-t'),\\
# \nonumber
# G(\Delta t)&=&\frac{e^{-\beta\Delta t}\sin[\omega' \Delta t]}{m\omega'}\Theta(\Delta t)
# \end{eqnarray}
# $$
# The quantity
# $e^{-\beta(t-t')}\sin[\omega'(t-t')]/m\omega'\Theta(t-t')$ is called a
# Green's function, $G(t-t')$. It describes the response at $t$ due to a
# force applied at a time $t'$, and is a function of $t-t'$. The step
# function ensures that the response does not occur before the force is
# applied. One should remember that the form for $G$ would change if the
# oscillator were either critically- or over-damped.
#
# When performing the integral in Eq. ([35](#eq:Greeny)) one can use
# angle addition formulas to factor out the part with the $t'$
# dependence in the integrand,
# <!-- Equation labels as ordinary links -->
# <div id="eq:Greeny2"></div>
#
# $$
# \begin{eqnarray}
# \label{eq:Greeny2} \tag{36}
# x(t)&=&\frac{1}{m\omega'}e^{-\beta t}\left[I_c(t)\sin(\omega't)-I_s(t)\cos(\omega't)\right],\\
# \nonumber
# I_c(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\cos(\omega't'),\\
# \nonumber
# I_s(t)&\equiv&\int_{-\infty}^t dt'~F(t')e^{\beta t'}\sin(\omega't').
# \end{eqnarray}
# $$
# If the time $t$ is beyond any time at which the force acts,
# $F(t'>t)=0$, the coefficients $I_c$ and $I_s$ become independent of
# $t$.
#
#
# Consider an undamped oscillator ($\beta\rightarrow 0$), with
# characteristic frequency $\omega_0$ and mass $m$, that is at rest
# until it feels a force described by a Gaussian form,
# $$
# \begin{eqnarray*}
# F(t)&=&F_0 \exp\left\{\frac{-t^2}{2\tau^2}\right\}.
# \end{eqnarray*}
# $$
# For large times ($t>>\tau$), where the force has died off, find
# $x(t)$.\\ Solve for the coefficients $I_c$ and $I_s$ in
# Eq. ([36](#eq:Greeny2)). Because the Gaussian is an even function,
# $I_s=0$, and one need only solve for $I_c$,
# $$
# \begin{eqnarray*}
# I_c&=&F_0\int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}\cos(\omega_0 t')\\
# &=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-t^{\prime 2}/(2\tau^2)}e^{i\omega_0 t'}\\
# &=&\Re F_0 \int_{-\infty}^\infty dt'~e^{-(t'-i\omega_0\tau^2)^2/(2\tau^2)}e^{-\omega_0^2\tau^2/2}\\
# &=&F_0\tau \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}.
# \end{eqnarray*}
# $$
# The third step involved completing the square, and the final step used the fact that the integral
# $$
# \begin{eqnarray*}
# \int_{-\infty}^\infty dx~e^{-x^2/2}&=&\sqrt{2\pi}.
# \end{eqnarray*}
# $$
# To see that this integral is true, consider the square of the integral, which you can change to polar coordinates,
# $$
# \begin{eqnarray*}
# I&=&\int_{-\infty}^\infty dx~e^{-x^2/2}\\
# I^2&=&\int_{-\infty}^\infty dxdy~e^{-(x^2+y^2)/2}\\
# &=&2\pi\int_0^\infty rdr~e^{-r^2/2}\\
# &=&2\pi.
# \end{eqnarray*}
# $$
# Finally, the expression for $x$ from Eq. ([36](#eq:Greeny2)) is
# $$
# \begin{eqnarray*}
# x(t>>\tau)&=&\frac{F_0\tau}{m\omega_0} \sqrt{2\pi} e^{-\omega_0^2\tau^2/2}\sin(\omega_0t).
# \end{eqnarray*}
# $$
# ## Sliding Block tied to a Wall
# Another classical case is that of simple harmonic oscillations, here represented by a block sliding on a horizontal frictionless surface. The block is tied to a wall with a spring. If the spring is not compressed or stretched too far, the force on the block at a given position $x$ is
# $$
# F=-kx.
# $$
# The negative sign means that the force acts to restore the object to an equilibrium position. Newton's equation of motion for this idealized system is then
# $$
# m\frac{d^2x}{dt^2}=-kx,
# $$
# or we could rephrase it as
# <!-- Equation labels as ordinary links -->
# <div id="eq:newton1"></div>
#
# $$
# \frac{d^2x}{dt^2}=-\frac{k}{m}x=-\omega_0^2x,
# \label{eq:newton1} \tag{37}
# $$
# with the angular frequency $\omega_0^2=k/m$.
#
# The above differential equation has the advantage that it can be solved analytically with solutions on the form
# $$
# x(t)=Acos(\omega_0t+\nu),
# $$
# where $A$ is the amplitude and $\nu$ the phase constant. This provides in turn an important test for the numerical
# solution and the development of a program for more complicated cases which cannot be solved analytically.
#
#
#
#
# ## Simple Example, Block tied to a Wall
#
# With the position $x(t)$ and the velocity $v(t)=dx/dt$ we can reformulate Newton's equation in the following way
# $$
# \frac{dx(t)}{dt}=v(t),
# $$
# and
# $$
# \frac{dv(t)}{dt}=-\omega_0^2x(t).
# $$
# We are now going to solve these equations using first the standard forward Euler method. Later we will try to improve upon this.
#
#
# ## Simple Example, Block tied to a Wall
#
# Before proceeding however, it is important to note that in addition to the exact solution, we have at least two further tests which can be used to check our solution.
#
# Since functions like $cos$ are periodic with a period $2\pi$, then the solution $x(t)$ has also to be periodic. This means that
# $$
# x(t+T)=x(t),
# $$
# with $T$ the period defined as
# $$
# T=\frac{2\pi}{\omega_0}=\frac{2\pi}{\sqrt{k/m}}.
# $$
# Observe that $T$ depends only on $k/m$ and not on the amplitude of the solution.
#
#
# ## Simple Example, Block tied to a Wall
#
# In addition to the periodicity test, the total energy has also to be conserved.
#
# Suppose we choose the initial conditions
# $$
# x(t=0)=1\hspace{0.1cm} \mathrm{m}\hspace{1cm} v(t=0)=0\hspace{0.1cm}\mathrm{m/s},
# $$
# meaning that block is at rest at $t=0$ but with a potential energy
# $$
# E_0=\frac{1}{2}kx(t=0)^2=\frac{1}{2}k.
# $$
# The total energy at any time $t$ has however to be conserved, meaning that our solution has to fulfil the condition
# $$
# E_0=\frac{1}{2}kx(t)^2+\frac{1}{2}mv(t)^2.
# $$
# ## Simple Example, Block tied to a Wall
#
# An algorithm which implements these equations is included below.
# * Choose the initial position and speed, with the most common choice $v(t=0)=0$ and some fixed value for the position.
#
# * Choose the method you wish to employ in solving the problem.
#
# * Subdivide the time interval $[t_i,t_f] $ into a grid with step size
# $$
# h=\frac{t_f-t_i}{N},
# $$
# where $N$ is the number of mesh points.
# * Calculate now the total energy given by
# $$
# E_0=\frac{1}{2}kx(t=0)^2=\frac{1}{2}k.
# $$
# * Choose ODE solver to obtain $x_{i+1}$ and $v_{i+1}$ starting from the previous values $x_i$ and $v_i$.
#
# * When we have computed $x(v)_{i+1}$ we upgrade $t_{i+1}=t_i+h$.
#
# * This iterative process continues till we reach the maximum time $t_f$.
#
# * The results are checked against the exact solution. Furthermore, one has to check the stability of the numerical solution against the chosen number of mesh points $N$.
#
# ## Simple Example, Block tied to a Wall, python code
#
# The following python program performs essentially the same calculations as the previous c++ code.
# +
#
# This program solves Newtons equation for a block sliding on
# an horizontal frictionless surface.
# The block is tied to the wall with a spring, so N's eq takes the form:
#
# m d^2x/dt^2 = - kx
#
# In order to make the solution dimless, we set k/m = 1.
# This results in two coupled diff. eq's that may be written as:
#
# dx/dt = v
# dv/dt = -x
#
# The user has to specify the initial velocity and position,
# and the number of steps. The time interval is fixed to
# t \in [0, 4\pi) (two periods)
#
# Note that this is a highly simplifyed rk4 code, intended
# for conceptual understanding and experimentation.
import sys
import numpy, math
#Global variables
ofile = None;
E0 = 0.0
def sim(x_0, v_0, N):
ts = 0.0
te = 4*math.pi
h = (te-ts)/float(N)
t = ts;
x = x_0
v = v_0
while (t < te):
kv1 = -h*x
kx1 = h*v
kv2 = -h*(x+kx1/2)
kx2 = h*(v+kv1/2)
kv3 = -h*(x+kx2/2)
kx3 = h*(v+kv2/2)
kv4 = -h*(x+kx3/2)
kx4 = h*(v+kv3/2)
#Write the old values to file
output(t,x,v)
#Update
x = x + (kx1 + 2*(kx2+kx3) + kx4)/6
v = v + (kv1 + 2*(kv2+kv3) + kv4)/6
t = t+h
def output(t,x,v):
de = 0.5*x**2+0.5*v**2 - E0;
ofile.write("%15.8E %15.8E %15.8E %15.8E %15.8E\n"\
%(t, x, v, math.cos(t),de));
#MAIN PROGRAM:
#Get input
if len(sys.argv) == 5:
ofilename = sys.argv[1];
x_0 = float(sys.argv[2])
v_0 = float(sys.argv[3])
N = int(sys.argv[4])
else:
print "Usage:", sys.argv[0], "ofilename x0 v0 N"
sys.exit(0)
#Setup
ofile = open(ofilename, 'w')
E0 = 0.5*x_0**2+0.5*v_0**2
#Run simulation
sim(x_0,v_0,N)
#Cleanup
ofile.close()
# -
# ## The classical pendulum and scaling the equations
#
# The angular equation of motion of the pendulum is given by
# Newton's equation and with no external force it reads
# <!-- Equation labels as ordinary links -->
# <div id="_auto23"></div>
#
# $$
# \begin{equation}
# ml\frac{d^2\theta}{dt^2}+mgsin(\theta)=0,
# \label{_auto23} \tag{38}
# \end{equation}
# $$
# with an angular velocity and acceleration given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto24"></div>
#
# $$
# \begin{equation}
# v=l\frac{d\theta}{dt},
# \label{_auto24} \tag{39}
# \end{equation}
# $$
# and
# <!-- Equation labels as ordinary links -->
# <div id="_auto25"></div>
#
# $$
# \begin{equation}
# a=l\frac{d^2\theta}{dt^2}.
# \label{_auto25} \tag{40}
# \end{equation}
# $$
# ## More on the Pendulum
#
# We do however expect that the motion will gradually come to an end due a viscous drag torque acting on the pendulum.
# In the presence of the drag, the above equation becomes
# <!-- Equation labels as ordinary links -->
# <div id="eq:pend1"></div>
#
# $$
# \begin{equation}
# ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=0, \label{eq:pend1} \tag{41}
# \end{equation}
# $$
# where $\nu$ is now a positive constant parameterizing the viscosity
# of the medium in question. In order to maintain the motion against
# viscosity, it is necessary to add some external driving force.
# We choose here a periodic driving force. The last equation becomes then
# <!-- Equation labels as ordinary links -->
# <div id="eq:pend2"></div>
#
# $$
# \begin{equation}
# ml\frac{d^2\theta}{dt^2}+\nu\frac{d\theta}{dt} +mgsin(\theta)=Asin(\omega t), \label{eq:pend2} \tag{42}
# \end{equation}
# $$
# with $A$ and $\omega$ two constants representing the amplitude and
# the angular frequency respectively. The latter is called the driving frequency.
#
#
#
#
# ## More on the Pendulum
#
# We define
# $$
# \omega_0=\sqrt{g/l},
# $$
# the so-called natural frequency and the new dimensionless quantities
# $$
# \hat{t}=\omega_0t,
# $$
# with the dimensionless driving frequency
# $$
# \hat{\omega}=\frac{\omega}{\omega_0},
# $$
# and introducing the quantity $Q$, called the *quality factor*,
# $$
# Q=\frac{mg}{\omega_0\nu},
# $$
# and the dimensionless amplitude
# $$
# \hat{A}=\frac{A}{mg}
# $$
# ## More on the Pendulum
#
# We have
# $$
# \frac{d^2\theta}{d\hat{t}^2}+\frac{1}{Q}\frac{d\theta}{d\hat{t}}
# +sin(\theta)=\hat{A}cos(\hat{\omega}\hat{t}).
# $$
# This equation can in turn be recast in terms of two coupled first-order differential equations as follows
# $$
# \frac{d\theta}{d\hat{t}}=\hat{v},
# $$
# and
# $$
# \frac{d\hat{v}}{d\hat{t}}=-\frac{\hat{v}}{Q}-sin(\theta)+\hat{A}cos(\hat{\omega}\hat{t}).
# $$
# These are the equations to be solved. The factor $Q$ represents the number of oscillations of the undriven system that must occur before its energy is significantly reduced due to the viscous drag. The amplitude $\hat{A}$ is measured in units of the maximum possible gravitational torque while $\hat{\omega}$ is the angular frequency of the external torque measured in units of the pendulum's natural frequency.
|
doc/pub/harmonic/ipynb/.ipynb_checkpoints/harmonic-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # flavio tutorial
#
# ## Part 3: Wilson coefficients
# + [markdown] slideshow={"slide_type": "slide"}
# ### Assumptions about new physics (NP) in flavio
#
# - NP enters observables via local interactions (operators) among known particles
# - NP only affects *predictions*, not *measurements*
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Operators & Wilson coefficients
#
# - The *Wilson coefficients* are the couplings strengths of the local operators
# - NP is dealt with in flavio by providing the numerical values of these Wilson coefficients
# - In flavour physics we usually deal with Wilson coefficients in the weak effective theory (WET) below the electroweak scale
# - Electroweak precision observables or observables in Higgs physics cannot be defined in the WET and require the Standard Model effective field theory (SMEFT) above the electroweak scale
# - SMEFT can also be used for flavour physics, in which case the running and matching to the WET is done automatically
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Hierarchy of effective field theories (EFTs)
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Wilson coefficient exchange format (WCxf)
#
# Before discussing how to specify Wilson coefficients in flavio, let's have a look at a **new** data format defined in this write-up:
#
# > "WCxf: an exchange format for Wilson coefficients beyond the Standard Model"
# > <NAME>.
# > [arXiv:1712.05298](https://arxiv.org/abs/1712.05298)
#
# <https://wcxf.github.io>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### WCxf: motivation
#
# - WCxf allows to unambiguously define EFTs and bases of Wilson coefficients by depositing YAML definition files in a public repository
# - This allows codes like flavio to work with different bases, conventions, and even EFTs
# - flavio defines its own WCxf basis for WET, WET-4, and WET-3
#
# See <https://wcxf.github.io/bases.html>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Basis facts about WCs in WCxf and flavio
#
# - Wilson coefficients are always numbers (not matrices)
# - can be real or complex depending on operator
# - Wilson coefficients refer to **NP contributions only**, i.e. vanish in the SM
#
# Example: `C9_bsmumu = -1` means that $C_9=C_9^\text{SM} - 1$
#
# - Dimensionful coefficients are in appropriate powers of GeV
# + [markdown] slideshow={"slide_type": "slide"}
# ## `Wilson` class
#
# - Implementation of Wilson coefficients is based on the `Wilson` class of the Python package `wilson`
# - `wilson` is automatically installed together with `flavio`
# - `wilson` uses WCxf and can deal with various EFTs and bases
# - Running in the SMEFT and WET as well as matching between them is also done by `wilson`
# + [markdown] slideshow={"slide_type": "subslide"}
# We can define a Wilson coefficient in terms of an instance of the class `Wilson` from the `wilson` packages
# + slideshow={"slide_type": "-"}
from wilson import Wilson
w = Wilson({'C9_bsmumu': -1, 'CVLR_bsbs': 1e-6}, scale=160, eft='WET', basis='flavio')
w
# + [markdown] slideshow={"slide_type": "subslide"}
# A `Wilson` instance stores the Wilson coefficient in the attribute `wc` in terms of an instance of the `WC` class
# + slideshow={"slide_type": "-"}
w.wc
# + [markdown] slideshow={"slide_type": "subslide"}
# The `match_run` method of a `Wilson` instance can be used to run the Wilson coefficients to a different scale (or match them to a different EFT)
# -
wc = w.match_run(scale=4.2, eft='WET', basis='flavio', sectors=('sbsb',))
wc
# + [markdown] slideshow={"slide_type": "fragment"}
# The `match_run` method returns a `WC` instance
# + [markdown] slideshow={"slide_type": "subslide"}
# The `WC` instance can be used to construct a `wilson` instance
# + slideshow={"slide_type": "-"}
Wilson.from_wc(wc)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Using SMEFT
#
# The `wilson` Python package allows to automatically match SMEFT Wilson coefficients to the WET in the flavio basis.
#
# Example: $O_{qq}^{(1), 2323} = \left( \bar q_2 \gamma_\mu q_3 \right) \left( \bar q_2 \gamma^\mu q_3 \right)$
# -
w_smeft = Wilson({'qq1_2323': 1e-6}, scale=160,eft='SMEFT', basis='Warsaw')
wc_wet = w_smeft.match_run(scale=160, eft='WET', basis='flavio')
w_wet = Wilson.from_wc(wc_wet)
# + slideshow={"slide_type": "-"}
w_smeft
# + slideshow={"slide_type": "subslide"}
w_wet
# + [markdown] slideshow={"slide_type": "slide"}
# ## NP predictions
#
# Having defined a `Wilson` instance, predictions can be computed in the presence of NP.
#
# Example: $R_K=\text{BR}(B\to K\mu\mu)/\text{BR}(B\to Kee)$
# -
import flavio
flavio.sm_prediction('<Rmue>(B+->Kll)', q2min=1, q2max=6)
w_np = Wilson({'C9_bsmumu': -1.2}, scale=4.2, eft='WET', basis='flavio')
flavio.np_prediction('<Rmue>(B+->Kll)', q2min=1, q2max=6, wc_obj=w_np)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Impact of NP on uncertatinties
#
# In $R_K$, form factor uncertainties cancel out exactly in the SM, but not in the presence of NP in numerator *or* denominator *only*
# -
flavio.sm_uncertainty('<Rmue>(B+->Kll)', q2min=1, q2max=6)
flavio.np_uncertainty('<Rmue>(B+->Kll)', q2min=1, q2max=6, wc_obj=w_np)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Interfacing with other codes
#
# - In flavio, NP always enters through Wilson coefficients of local operators
# - To investigate dynamical NP *models*, Wilson coefficienst can be computed "by hand" or with a number of public codes
# - Some codes already support WCxf, e.g.: `SARAH`/`FlavorKit`, `SPheno`, `FormFlavor`
# (for full list of codes see https://wcxf.github.io/codes.html)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Example: importing a WCxf file
# -
with open('files/wcxf-flavio-example.yml', 'r') as f:
w = Wilson.load_wc(f)
w
# + [markdown] slideshow={"slide_type": "slide"}
# ### Exercise:
#
# add the binned prediction in the presence of new physics to your $P_5'$ plot
#
# + [markdown] slideshow={"slide_type": "slide"}
# Next: <a href="4 Likelihoods.ipynb">Likelihoods</a>
|
3 Wilson coefficients.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Write a program that asks for a filename, then opens that file and reads inside the file by searching for lines of the file:
#
# X-DSPAM-Confidence: 0.8475
# Count all rows and keep the Float values you find. Then calculate the mean of these values. Then create a printout as shown below. Do not use the function sum () or a variable named sum in your solution.
#
#
#
# Desired Output:
#
# Average X-DSPAM-Confidence: 0.7507185185185187
totalValue=0
counter=0
fname = "mbox-short.txt"
fh = open(fname)
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
print(line)
totalValue += float(line[line.find('0') + 0:])
counter += 1
print("Average X-DSPAM-Confidence:", totalValue/counter)
|
3.3_codeHS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import namedtuple
import random
users = 'ABCD'
lst = []
Entry = namedtuple('Entry',['name', 'value'])
for x in range(20):
lst.append(Entry(random.choice(users), random.randrange(101,222)))
lst.sort(key = lambda i: i.name)
lst
# -
# ##### How to find complete neighborhoods
# Given a set of objects with the properties "name" and some "value" I need to find out which full set of objects are close together.
# This example should make things clearer. The example objects are:
#
# +
from collections import namedtuple
Entry = namedtuple('Entry',['name', 'value'])
entries = [
Entry(name='A', value=12),
Entry(name='A', value=1012),
Entry(name='A', value=10012),
Entry(name='B', value=12),
Entry(name='B', value=13),
Entry(name='B', value=1013),
Entry(name='B', value=10013),
Entry(name='C', value=14),
Entry(name='C', value=1014),
Entry(name='C', value=10014),
Entry(name='D', value=15),
Entry(name='D', value=10016),
Entry(name='D', value=10017),
]
# Result should be
[
Entry(name='A', value=1012),
Entry(name='B', value=1013),
Entry(name='C', value=1014),
Entry(name='D', value=1016),
]
# How to find the group of all nearest neighbors
# -
# So the algorithm should find groups like these:
#
# A12 B12 C14 C15
# A12 B13 C14 D15
# A10012 B10013 C10014 D10016
# A10012 B10013 C10014 D10017
#
# But not A1012 B1013 C1014 D15, because the D object would be way too far apart.
#
# Ideally the algorithm would return just one full group of ABCD objects which are closest together "naturally", so they would be A10012, B10013, C10014 and D10016, since they are "naturally closer" together than A12 B12 C14 D15.
#
# My main problem is that any recursive loops are taking way too much time, since in reality I have about 20 different names where each of those named objects have about 100 different values.
#
# Which functions of in scipy/numpy or similar libraries could I peruse to solve my problem?
import statistics
statistics.stdev([1012,1013,1014])
# # stackoverflow posting
# https://stackoverflow.com/questions/67723507/
# Given a set of objects with the properties "name" and some "value" I need to find out which full set of objects are close together.
# This example should make things clearer. The example objects are (in Python):
#
# from collections import namedtuple
#
# Entry = namedtuple('Entry',['name', 'value'])
#
# entries = [
# Entry(name='A', value=12),
# Entry(name='A', value=1012),
# Entry(name='A', value=10012),
#
# Entry(name='B', value=12),
# Entry(name='B', value=13),
# Entry(name='B', value=1013),
# Entry(name='B', value=10013),
#
# Entry(name='C', value=14),
# Entry(name='C', value=1014),
# Entry(name='C', value=10014),
#
# Entry(name='D', value=15),
# Entry(name='D', value=10016),
# Entry(name='D', value=10017),
# ]
#
# So the algorithm should find fully populated groups (i.e. containing all "names") like these:
#
# A-12 B-12 C-14 C-15
# A-12 B-13 C-14 D-15
# A-10012 B-10013 C-10014 D-10016
# A-10012 B-10013 C-10014 D-10017
#
# But not `A-1012 B-1013 C-1014 D-15`, because the `D` object would be way too far apart from the other three objects.
#
# **Ideally** the algorithm would return just one full group of `A B C D` objects which are closest together "naturally", so they would be `A-10012 B-10013 C-10014 D-10016`, since they are "naturally closer" together than `A-12 B-12 C-14 D-15`.
#
# My main problem is that any recursive loops I tried are taking way too much time, since in reality I have about 50 different names where each of those named objects have about 100 different values.
#
# Which functions of in scipy/numpy or similar libraries could I peruse to solve (or just get near to a solution to) my problem?
|
devstuff/nearest_neighbors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="dOdCkKKdx_Fq"
# _Lambda School Data Science — Model Validation_
#
# # Begin the modeling process
#
# Objectives
# - Train/Validate/Test split
# - Cross-Validation
# - Begin with baselines
# + [markdown] colab_type="text" id="UJBBzSCmKtoC"
# ## Why care about model validation?
# + [markdown] colab_type="text" id="GenzpPA2Z_dt"
# <NAME>, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)
#
# > An all-too-common scenario: a seemingly impressive machine learning model is a complete failure when implemented in production. The fallout includes leaders who are now skeptical of machine learning and reluctant to try it again. How can this happen?
#
# > One of the most likely culprits for this disconnect between results in development vs results in production is a poorly chosen validation set (or even worse, no validation set at all).
# + [markdown] colab_type="text" id="YRu59wbmLMBa"
# <NAME>, [Winning Data Science Competitions](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions/8)
#
# > Good validation is _more important_ than good models.
# + [markdown] colab_type="text" id="3dHlyVq2KNZY"
# <NAME>, <NAME>, [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 2.2, Assessing Model Accuracy
#
# > In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about?
#
# > Suppose that we are interested test data in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price.
#
# > On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes.
# + [markdown] colab_type="text" id="JgayVUFCyC7f"
# ### We'll look at 4 methods of model validation
#
# - Performance estimation
# - 2-way holdout method (**train/test split**)
# - (Repeated) k-fold **cross-validation without independent test set**
# - Model selection (hyperparameter optimization) and performance estimation ← ***We usually want to do this***
# - 3-way holdout method (**train/validation/test split**)
# - (Repeated) k-fold **cross-validation with independent test set**
#
# <img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600">
#
# Source: https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html
# + [markdown] colab_type="text" id="X0OFS5mjRTm9"
# ## Why begin with baselines?
# + [markdown] colab_type="text" id="O8wqAD-E3Dhf"
# [My mentor](https://www.linkedin.com/in/jason-sanchez-62093847/) [taught me](https://youtu.be/0GrciaGYzV0?t=40s):
#
# >***Your first goal should always, always, always be getting a generalized prediction as fast as possible.*** You shouldn't spend a lot of time trying to tune your model, trying to add features, trying to engineer features, until you've actually gotten one prediction, at least.
#
# > The reason why that's a really good thing is because then ***you'll set a benchmark*** for yourself, and you'll be able to directly see how much effort you put in translates to a better prediction.
#
# > What you'll find by working on many models: some effort you put in, actually has very little effect on how well your final model does at predicting new observations. Whereas some very easy changes actually have a lot of effect. And so you get better at allocating your time more effectively.
#
# My mentor's advice is echoed and elaborated in several sources:
#
# [Always start with a stupid model, no exceptions](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa)
#
# > Why start with a baseline? A baseline will take you less than 1/10th of the time, and could provide up to 90% of the results. A baseline puts a more complex model into context. Baselines are easy to deploy.
#
# [Measure Once, Cut Twice: Moving Towards Iteration in Data Science](https://blog.datarobot.com/measure-once-cut-twice-moving-towards-iteration-in-data-science)
#
# > The iterative approach in data science starts with emphasizing the importance of getting to a first model quickly, rather than starting with the variables and features. Once the first model is built, the work then steadily focuses on continual improvement.
#
# [*Data Science for Business*](https://books.google.com/books?id=4ZctAAAAQBAJ&pg=PT276), Chapter 7.3: Evaluation, Baseline Performance, and Implications for Investments in Data
#
# > *Consider carefully what would be a reasonable baseline against which to compare model performance.* This is important for the data science team in order to understand whether they indeed are improving performance, and is equally important for demonstrating to stakeholders that mining the data has added value.
# + [markdown] colab_type="text" id="89PieuUx0fbN"
# ## Baseline is an overloaded term
#
# Baseline has multiple meanings, as discussed in the links above.
#
# #### The score you'd get by guessing a single value
#
# > A baseline for classification can be the most common class in the training dataset.
#
# > A baseline for regression can be the mean of the training labels. —[<NAME>](https://twitter.com/koehrsen_will/status/1088863527778111488)
#
# #### The score you'd get by guessing in a more granular way
#
# > A baseline for time-series regressions can be the value from the previous timestep.
#
# #### Fast, first models that beat guessing
#
# What my mentor was talking about.
#
# #### Complete, tuned "simpler" model
#
# Can be simpler mathematically and computationally. For example, Logistic Regression versus Deep Learning.
#
# Or can be simpler for the data scientist, with less work. For example, a model with less feature engineering versus a model with more feature engineering.
#
# #### Minimum performance that "matters"
#
# To go to production and get business value.
#
# #### Human-level performance
#
# Your goal may to be match, or nearly match, human performance, but with better speed, cost, or consistency.
#
# Or your goal may to be exceed human performance.
#
# + [markdown] colab_type="text" id="EdHSPwdZx3nI"
# ## Weather data — mean baseline
#
# Let's try baselines for regression.
#
# You can [get Past Weather by Zip Code from Climate.gov](https://www.climate.gov/maps-data/dataset/past-weather-zip-code-data-table). I downloaded the data for my town: Normal, Illinois.
# + colab={"base_uri": "https://localhost:8080/", "height": 290} colab_type="code" id="2O3o-fTgx-93" outputId="17e32da1-7a6e-4cc6-f05e-f69048f7e282"
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
url = 'https://raw.githubusercontent.com/rrherr/baselines/master/weather/weather-normal-il.csv'
weather = pd.read_csv(url, parse_dates=['DATE']).set_index('DATE')
weather['2015':'2018'].TMAX.plot()
plt.title('Daily high temperature in Normal, IL');
# + [markdown] colab_type="text" id="F8Z8FTwbzfol"
# Over the years, across the seasons, the average daily high temperature in my town is about 63 degrees.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="PEKdZ7YZzbnO" outputId="70990740-e9ad-48a2-c52d-c8a1880ec16a"
weather.TMAX.mean()
# + [markdown] colab_type="text" id="p5bzsFbfznUt"
# If I predicted that every day, the high will be 63 degrees, I'd be off by about 19 degrees on average.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="rpf8ubzaziiY" outputId="e464085b-33fc-4d4c-ba60-4c92c160bc0a"
from sklearn.metrics import mean_absolute_error
predicted = [weather.TMAX.mean()] * len(weather)
mean_absolute_error(weather.TMAX, predicted)
# + [markdown] colab_type="text" id="sJi-_JL1zywP"
# But, with time series data like this, we can get a better baseline.
#
# *Data Science for Business* explains,
#
# > Weather forecasters have two simple—but not simplistic—baseline models that they compare against. ***One (persistence) predicts that the weather tomorrow is going to be whatever it was today.*** The other (climatology) predicts whatever the average historical weather has been on this day from prior years. Each model performs considerably better than random guessing, and both are so easy to compute that they make natural baselines of comparison. Any new, more complex model must beat these.
# + [markdown] colab_type="text" id="VETZ_gJhz5qR"
# Let's predict that the weather tomorrow is going to be whatever it was today. Which is another way of saying that the weather today is going to be whatever it was yesterday.
#
# We can engineer this feature with one line of code, using the pandas [`shift`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shift.html) function.
#
# This new baseline is off by less than 6 degress on average.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1DKR_LoCzp6V" outputId="b2cde72d-1e28-42e8-9fd6-3ea499050683"
weather['TMAX_yesterday'] = weather.TMAX.shift(1)
weather.dropna(inplace=True) # Drops the first date, because it doesn't have a "yesterday"
mean_absolute_error(weather.TMAX, weather.TMAX_yesterday)
# + [markdown] colab_type="text" id="FhRix-ZG36c-"
# ## Adult Census Income — Train/Test Split — majority class baseline
# + [markdown] colab_type="text" id="fHtoULts4TBE"
# Load the data. It has already been split into train and test.
#
# https://archive.ics.uci.edu/ml/datasets/adult
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UZX4c6Gjx9pf" outputId="88a3ab3e-5bf4-4045-b0d7-7f1ae9d134cb"
names = ['age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'income']
train = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', header=None, names=names)
test = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test', header=None, names=names, skiprows=[0])
train.shape, test.shape
# + [markdown] colab_type="text" id="hZqdQTlb4ybZ"
# Assign to X and y
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="mQVulKmi4pC0" outputId="c767a215-c224-4f0d-8f0a-149500de2ded"
X_train = train.drop(columns='income')
y_train = train.income == ' >50K'
X_test = test.drop(columns='income')
y_test = test.income == ' >50K.'
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# + [markdown] colab_type="text" id="NQ2Ibkax5DOw"
# Majority class baseline
# + colab={} colab_type="code" id="GqfFfKEm42b5"
import numpy as np
majority_class = y_train.mode()[0]
y_pred = np.full(shape=y_test.shape, fill_value=majority_class)
# + [markdown] colab_type="text" id="KNMUHV3W5W5V"
# `y_pred` has the same shape as `y_test`
# + colab={} colab_type="code" id="A6mIx4PY5Qeo"
y_pred.shape, y_test.shape
# + [markdown] colab_type="text" id="mWG0oTHD5eBw"
# all predictions are the majority class
# + colab={} colab_type="code" id="Q3IIjxGt5WVZ"
all(y_pred == majority_class)
# + [markdown] colab_type="text" id="tUsgtt5T5qjR"
# Baseline accuracy is 76% by guessing the majority class for every prediction
# + colab={} colab_type="code" id="LGm7MelQ5c8w"
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
# + [markdown] colab_type="text" id="Crq-dk5u57o6"
# This makes sense, because the majority class occurs 76% of the time in the test dataset
# + colab={} colab_type="code" id="0FJBFVUE5r4q"
y_test.value_counts(normalize=True)
# + [markdown] colab_type="text" id="886PbzRJ6USc"
# ## Adult Census Income — Train/Test Split — fast first models
# + [markdown] colab_type="text" id="TgsO1op66j3b"
# Scikit-learn expects no nulls...
# + colab={} colab_type="code" id="cyfwVBt354U1"
def no_nulls(df):
return not any(df.isnull().sum())
no_nulls(X_train)
# + [markdown] colab_type="text" id="tbVhqsUP622c"
# Scikit-learn also expects all numeric features. (No strings / "object" datatypes.) ...
# + colab={} colab_type="code" id="VqOoYwO16vmu"
def all_numeric(df):
from pandas.api.types import is_numeric_dtype
return all(is_numeric_dtype(df[col]) for col in df)
all_numeric(X_train)
# + [markdown] colab_type="text" id="16s97jma7ULN"
# Instead of encoding nonnumeric features, we can just exclude them from the training data.
# + colab={} colab_type="code" id="V34xVYqD7Ayb"
X_train_numeric = X_train.select_dtypes(np.number)
all_numeric(X_train_numeric)
# + [markdown] colab_type="text" id="0KIrZHZm8pXn"
# We'll do the same with the test data.
# + colab={} colab_type="code" id="RehsWU5t8na0"
X_test_numeric = X_test.select_dtypes(np.number)
# + [markdown] colab_type="text" id="gEmZztuZ71zC"
# Then fit a Logistic Regression on the training data (only the numeric features).
#
# Test accuracy improves to 80%
# + colab={} colab_type="code" id="nlSO3C9t7ogI"
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver='lbfgs')
model.fit(X_train_numeric, y_train)
y_pred = model.predict(X_test_numeric)
accuracy_score(y_test, y_pred)
# + [markdown] colab_type="text" id="BKKgC2XJ8LfH"
# Let's try scaling our data first.
#
# Test accuracy improves to 81%
# + colab={} colab_type="code" id="KkYRW6Jm77Y0"
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
pipeline.fit(X_train_numeric, y_train)
y_pred = pipeline.predict(X_test_numeric)
accuracy_score(y_test, y_pred)
# + [markdown] colab_type="text" id="gGsNNA9k9BMr"
# One-hot-encode all the categorical featues.
# + [markdown] colab_type="text" id="wrIvN4X89dSJ"
# Install the Category Encoder library
#
# https://github.com/scikit-learn-contrib/categorical-encoding
# + colab={} colab_type="code" id="5tpuEpDa9YFr"
import category_encoders as ce
# + [markdown] colab_type="text" id="0LGitjUH9zFh"
# ##### Add the library's OneHotEncoder to a pipeline.
#
# Before, we fit on `X_train_numeric` and predict on `X_test_numeric`.
#
# Now, we fit on `X_train` and predict on `X_test` (the original dataframes which include categorical columns)
#
# Test accuracy improves to 85%
# + colab={} colab_type="code" id="s-WbXKo_9caL"
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
StandardScaler(),
LogisticRegression(solver='lbfgs')
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
accuracy_score(y_pred, y_test)
# + [markdown] colab_type="text" id="uljTyg29_QUd"
# Visualize coefficients
# + colab={} colab_type="code" id="ExAx_96L-FPX"
plt.figure(figsize=(10,30))
plt.title('Coefficients')
coefficients = pipeline.named_steps['logisticregression'].coef_[0]
feature_names = pipeline.named_steps['onehotencoder'].transform(X_train).columns
pd.Series(coefficients, feature_names).sort_values().plot.barh(color='gray');
# + [markdown] colab_type="text" id="tlFfn7Em_oag"
# ## Adult Census Income — Cross Validation with independent test set — fast first models
# + [markdown] colab_type="text" id="4SGmGHzsD4Jc"
# `cross_val_score(pipeline, X_train, y_train, cv=10)` repeats this process 10 times:
# - Use 9/10 of the training data to fit the model pipeline
# - Use 1/10 of the training data to predict and score the model pipeline
#
# The test data is not used here — it remains independent, held out.
# + colab={} colab_type="code" id="P5vYr738_Ujw"
from sklearn.model_selection import cross_val_score
scores = cross_val_score(pipeline, X_train, y_train,
scoring='accuracy', cv=10)
# + [markdown] colab_type="text" id="QMsxJIT4Bz7j"
# `cross_val_score(cv=10)` returns 10 scores
# + colab={} colab_type="code" id="qPMbMk2_Am4y"
scores
# + [markdown] colab_type="text" id="ATPvQ3CoBAEm"
# <img src="https://sebastianraschka.com/images/blog/2016/model-evaluation-selection-part3/loocv.png" width="400">
#
# Source: https://sebastianraschka.com/blog/2016/model-evaluation-selection-part3.html
# + [markdown] colab_type="text" id="2V7LTdNkBuCw"
# The scores have low variance.
# + colab={} colab_type="code" id="r52ICJ6OA66W"
scores.mean(), scores.std()
# + [markdown] colab_type="text" id="PCEEVP0wB9F3"
# <img src="https://sebastianraschka.com/images/blog/2016/model-evaluation-selection-part2/visual_bias_variance.png" width="400">
#
# Source: https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html
# + [markdown] colab_type="text" id="RJWSU31EF-gY"
# ## Adult Census Income — Train/Validation/Test split — fast first models
# + [markdown] colab_type="text" id="4_95UcNHGL0H"
# How to get from a two-way split ...
# + colab={} colab_type="code" id="oKJPJ9YNCADY"
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# + [markdown] colab_type="text" id="GIYe9n1CGRTq"
# ... to a three-way split?
#
# Can use the `sklearn.model_selection.train_test_split` function to split the training data into training and validation data.
# + colab={} colab_type="code" id="xj0gOpt3GKaB"
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train)
# + colab={} colab_type="code" id="_bVXKAMaHQwu"
X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape
# + [markdown] colab_type="text" id="xwP-E21FncyL"
# Fit on the training set.
#
# Predict and score with the validation set.
#
# Do not use the test set.
# + colab={} colab_type="code" id="PCYdaXn_nDUq"
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
accuracy_score(y_val, y_pred)
# + [markdown] colab_type="text" id="qWesgBNF1lqi"
# ## What to do with the test set?
#
# Hold it out. Keep it in a "vault." Don't touch it until you're done with your models.
# + [markdown] colab_type="text" id="X4-zR44iJrS0"
# Here's one way to save your test set for later, with the feather file format.
#
# https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#io-feather
# + colab={} colab_type="code" id="Rdcr60ytnQqb"
# !pip install -U feather-format
# + colab={} colab_type="code" id="U6OT4MKAJy4E"
X_test.reset_index(drop=True).to_feather('X_test.feather')
pd.DataFrame(y_test).reset_index(drop=True).to_feather('y_test.feather')
# + [markdown] colab_type="text" id="-vqa0dYPJ5Zs"
# You can save the files from Google Colab to your local machine
# + colab={} colab_type="code" id="_sNBr8djJ3hO"
from google.colab import files
files.download('X_test.feather')
files.download('y_test.feather')
# + [markdown] colab_type="text" id="DanimdzNJ-Vd"
# You can delete the variables from your notebook's runtime
# + colab={} colab_type="code" id="24YTaftEKBeP"
del X_test, y_test
# + [markdown] colab_type="text" id="xSXSLYtmJWzN"
# ## Why hold out an independent test set?
# + [markdown] colab_type="text" id="yTQB9VIaI3yu"
# <NAME>, [Winning Data Science Competitions](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions)
#
# - There are many ways to overfit. Beware of "multiple comparison fallacy." There is a cost in "peeking at the answer."
# - Good validation is _more important_ than good models. Simple training/validation split is _not_ enough. When you looked at your validation result for the Nth time, you are training models on it.
# - If possible, have "holdout" dataset that you do not touch at all during model build process. This includes feature extraction, etc.
# - What if holdout result is bad? Be brave and scrap the project.
# + [markdown] colab_type="text" id="YR1WLnpHJQ86"
# Hastie, Tibshirani, and Friedman, [The Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/), Chapter 7: Model Assessment and Selection
#
# > If we are in a data-rich situation, the best approach is to randomly divide the dataset into three parts: a training set, a validation set, and a test set. The training set is used to fit the models; the validation set is used to estimate prediction error for model selection; the test set is used for assessment of the generalization error of the final chosen model. Ideally, the test set should be kept in a "vault," and be brought out only at the end of the data analysis. Suppose instead that we use the test-set repeatedly, choosing the model with the smallest test-set error. Then the test set error of the final chosen model will underestimate the true test error, sometimes substantially.
# + [markdown] colab_type="text" id="NZ2D0fiapf2X"
# <NAME> and <NAME>, [Introduction to Machine Learning with Python](https://books.google.com/books?id=1-4lDQAAQBAJ&pg=PA270)
#
# > The distinction between the training set, validation set, and test set is fundamentally important to applying machine learning methods in practice. Any choices made based on the test set accuracy "leak" information from the test set into the model. Therefore, it is important to keep a separate test set, which is only used for the final evaluation. It is good practice to do all exploratory analysis and model selection using the combination of a training and a validation set, and reserve the test set for a final evaluation - this is even true for exploratory visualization. Strictly speaking, evaluating more than one model on the test set and choosing the better of the two will result in an overly optimistic estimate of how accurate the model is.
# + [markdown] colab_type="text" id="v5-2adVxpCBx"
# <NAME>, [R for Data Science](https://r4ds.had.co.nz/model-intro.html#hypothesis-generation-vs.hypothesis-confirmation)
#
# > There is a pair of ideas that you must understand in order to do inference correctly:
#
# > 1. Each observation can either be used for exploration or confirmation, not both.
#
# > 2. You can use an observation as many times as you like for exploration, but you can only use it once for confirmation. As soon as you use an observation twice, you’ve switched from confirmation to exploration.
#
# > This is necessary because to confirm a hypothesis you must use data independent of the data that you used to generate the hypothesis. Otherwise you will be over optimistic. There is absolutely nothing wrong with exploration, but you should never sell an exploratory analysis as a confirmatory analysis because it is fundamentally misleading.
#
# > If you are serious about doing an confirmatory analysis, one approach is to split your data into three pieces before you begin the analysis.
# + [markdown] colab_type="text" id="ZSZBWmkMMbJR"
# ## Should you shuffle?
#
# Sometimes yes, sometimes no. Experts may seem to give conflicting advice! Partly, that perception is true. Partly, the right choices depend on your data and your goals.
#
# [Hastie, Tibshirani, and Friedman](http://statweb.stanford.edu/~tibs/ElemStatLearn/) write,
# > randomly divide the dataset into three parts: a training set, a validation set, and a test set.
#
# But [<NAME>](
# https://www.fast.ai/2017/11/13/validation-sets/) asks, "When is a random subset not good enough?" and gives many good examples.
# > If your data is a time series, choosing a random subset of the data will be both too easy (you can look at the data both before and after the dates your are trying to predict) and not representative of most business use cases (where you are using historical data to build a model for use in the future).
#
# > In the Kaggle distracted driver competition, the independent data are pictures of drivers at the wheel of a car, and the dependent variable is a category such as texting, eating, or safely looking ahead. If you were the insurance company building a model from this data, note that you would be most interested in how the model performs on drivers you haven’t seen before (since you would likely have training data only for a small group of people).
#
# > A similar dynamic was at work in the Kaggle fisheries competition to identify the species of fish caught by fishing boats in order to reduce illegal fishing of endangered populations. The test set consisted of boats that didn’t appear in the training data. This means that you’d want your validation set to include boats that are not in the training set.
#
#
#
#
# [<NAME>](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions/10) recommends using your most recent data for your hold out test set, instead of choosing your test set with random shuffling. But he says you are free to use random splits to train and tune models.
#
# > Make validation dataset as realistic as possible. Usually this means "out-of-time" validation. You are free to use "in-time" random split to build models, tune parameters, etc. But hold out data should be out-of-time.
#
# Note that Owen Zhang's slide could be confusing, because of ambiguous terminology:
# - What he calls "validation dataset" and "hold out data" is what we're calling "test set."
# - He also says "cross-validation" is an "exception to the rule" to use "when data is extremely small." There he is talking about "Cross-validation _without_ independent test set", _not_ "Cross-validation _with_ independent test set."
# + [markdown] colab_type="text" id="ipwGmpd7Kk94"
# **Sometimes you need to shuffle, like in this next example:**
# + [markdown] colab_type="text" id="9Doto5c61sNj"
# ## Iris flowers — Train/Validation/Test split?
# + [markdown] colab_type="text" id="ow3Ev3JqK0cj"
# Load the Iris dataset
# + colab={} colab_type="code" id="ShfEoOxNKjV9"
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
# + [markdown] colab_type="text" id="Obd9wa7iK-ID"
# What would happen if you didn't shuffle this data?
# + colab={} colab_type="code" id="-TF5eIYeK_Rt"
y
# + [markdown] colab_type="text" id="ftRJA9b1LHEG"
# Let's try it!
#
# We'll do a train/validation/test split, with and without random shuffling.
#
# [<NAME>]( https://www.fast.ai/2017/11/13/validation-sets/) points out that "sklearn has a `train_test_split` method, but no `train_validation_test_split`."
#
# So we can write our own:
#
# + colab={} colab_type="code" id="8RKcKFA9Ll7z"
def train_validation_test_split(
X, y, train_size=0.8, val_size=0.1, test_size=0.1,
random_state=None, shuffle=True):
assert train_size + val_size + test_size == 1
X_train_val, X_test, y_train_val, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=val_size/(train_size+val_size),
random_state=random_state, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
# + [markdown] colab_type="text" id="C6rtSKRjM-IJ"
# Split Iris data into train/validation/test sets, _without_ random shuffling
# + colab={} colab_type="code" id="XZH057JSLmwH"
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(
X, y, shuffle=False)
# + [markdown] colab_type="text" id="GbW1sgSTUa3M"
# Look at the train, validation, and test targets
# + colab={} colab_type="code" id="kRWwDV4CLyZ2"
y_train
# + colab={} colab_type="code" id="5idO3xNcUXpe"
y_val
# + colab={} colab_type="code" id="H-uGeLEvUY1p"
y_test
# + [markdown] colab_type="text" id="0MqgrHtdUz3p"
# Split Iris data into train/validation/test sets, _with_ random shuffling
# + colab={} colab_type="code" id="LcCr7C3OUZ94"
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(
X, y, shuffle=True)
# + [markdown] colab_type="text" id="FJG8yg-bVBqa"
# Look at the train, validation, and test targets
# + colab={} colab_type="code" id="R0gY3R9LUk5t"
y_train
# + colab={} colab_type="code" id="G50zgzBkUuDF"
y_val
# + colab={} colab_type="code" id="hN-767fxUvHF"
y_test
# + [markdown] colab_type="text" id="a5E-ApbhVK7S"
# That's better, but there's just not enough data for a three-way split. In fact, there's not much data for a two-way split. The Iris dataset is a rare example where you probably do want to use cross-validation _without_ an independent test set.
# + [markdown] colab_type="text" id="IlR-JFMz1uJD"
# ## Bank Marketing — shuffled or split by time?
# + [markdown] colab_type="text" id="66D5zQJZVsOw"
# https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
#
# The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed.
#
# bank-additional-full.csv with all examples (41188) and 20 inputs, **ordered by date (from May 2008 to November 2010)**
# + colab={} colab_type="code" id="FNu4xmu1Vvtj"
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip
# + colab={} colab_type="code" id="cGsCdBSoVwZU"
# !unzip bank-additional.zip
# + colab={} colab_type="code" id="FmtEAZeWVx6B"
# %cd bank-additional
# + colab={} colab_type="code" id="8ucARvXRV2VG"
bank = pd.read_csv('bank-additional-full.csv', sep=';')
X = bank.drop(columns='y')
y = bank['y'] == 'yes'
# -
bank.head()
# + [markdown] colab_type="text" id="dLpHSzvOV7VV"
# ### Shuffled split?
# + colab={} colab_type="code" id="o73gJvtCWEEP"
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(X, y, shuffle=True)
# + colab={} colab_type="code" id="-1uTNhM1WHzY"
[array.shape for array in (X_train, X_val, X_test, y_train, y_val, y_test)]
# + colab={} colab_type="code" id="qLOpqjRnWIYG"
y_train.mean(), y_val.mean(), y_test.mean()
# + [markdown] colab_type="text" id="FmyRVRrhWP9W"
# ### Split by time?
# + colab={} colab_type="code" id="PJMAPBolWUFs"
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(X, y, shuffle=False)
# + colab={} colab_type="code" id="vbw2Sg-JWU-b"
[array.shape for array in (X_train, X_val, X_test, y_train, y_val, y_test)]
# + colab={} colab_type="code" id="QF7DiZbFWWGo"
y_train.mean(), y_val.mean(), y_test.mean()
# + colab={} colab_type="code" id="7rWfygKtWZPg"
y.rolling(500).mean().plot();
# + [markdown] colab_type="text" id="uD3xRARIWbnW"
# ***The "right" choice here is unclear, but either way, it will make a big difference!***
# + [markdown] colab_type="text" id="Ibhax45nNcDJ"
# # ASSIGNMENT options
#
# - Replicate the lesson code. [Do it "the hard way" or with the "Benjamin Franklin method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit)
# - Apply the lesson to other datasets you've worked with before, and compare results.
# - Choose how to split the Bank Marketing dataset. Train and validate baseline models.
# - Get weather data for your own area and calculate both baselines. _"One (persistence) predicts that the weather tomorrow is going to be whatever it was today. The other (climatology) predicts whatever the average historical weather has been on this day from prior years."_ What is the mean absolute error for each baseline? What if you average the two together?
# - When would this notebook's pipelines fail? How could you fix them? Add more [preprocessing](https://scikit-learn.org/stable/modules/preprocessing.html) and [imputation](https://scikit-learn.org/stable/modules/impute.html) to your [pipelines](https://scikit-learn.org/stable/modules/compose.html) with scikit-learn.
# - [This example from scikit-learn documentation](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html) demonstrates its improved `OneHotEncoder` and new `ColumnTransformer` objects, which can replace functionality from third-party libraries like category_encoders and sklearn-pandas. Adapt this example, which uses Titanic data, to work with another dataset.
#
#
#
#
#
|
module-1-begin-modeling-process/LS_DS_241_Begin_modeling_process.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The basics
#
# This tutorial demonstrates how to setup and solve a finite element modelling problem using OpenCMISS-Iron in python. For the purpose of this tutorial, we will be solving a Laplace equation over a 1-3 dimensional domain. In mathematics and physics, Laplace's equation is a second-order partial differential equation named after <NAME> who first studied its properties. In this basics tutorial, we will be making calls to the OpenCMISS-Iron's Python-bindings API to illustrate how we can interact with the library, while demonstrating the functionalities of OpenCMISS-Iron.
#
# See the [OpenCMISS-Iron tutorial documenation page](https://opencmiss-iron-tutorials.readthedocs.io/en/latest/tutorials.html#how-to-run-tutorials) for instructions on how to run this tutorial.
#
# ## Learning outcomes
#
# - Understand how to create a problem using OpenCMISS-Iron commands.
#
# - Solve a Laplace problem using the finite element method in OpenCMISS-Iron.
#
# ## The Laplace problem
#
# In this example we are solving the standard Laplace equation which is a member of the classical field equations set class and the Laplace equation type.
#
# $$\displaystyle \nabla ^{2}f={\frac {\partial ^{2}f}{\partial x^{2}}}+{\frac {\partial ^{2}f}{\partial y^{2}}}+{\frac {\partial ^{2}f}{\partial z^{2}}}=0.$$
#
#
# ## Conceptual visualisation of tutorial steps
#
# The figure below is a conceptual visualisation of the problem set up that this tutorial will walk you through. Here are key points that will help you interpret the diagram and appreciate the steps involved in creating a problem:
#
# 1. The numbering in the figure corresponds to the steps that we will take to set up the problem.
#
# 2. Each box represents a type of object that will be created for the problem.
#
# 3. A box within a box represents an object that is a component of the parent object. For example, the Generated Mesh object falls within the Region object because a Generated Mesh object must be contained within a Region object.
#
# 4. The arrows show the inter-object relationships. For example, a Generated Mesh object is associated with a Region object but it also needs a Basis Function object. The Decomposition object can only be created if there is a Mesh object associated with it.
#
# 5. Lets define a few objects that are shown in the figure. More details about each of the objects are given as the tutorial progresses. $f$ in the laplace equation above is the *Dependent Field* that we want to solve for over the *Geometric Field* defined by $x$, $y$, and $z$ coordinates. To solve for $f$ we need to define the *Coordinate System*, and the *Basis Functions* we want to use as the interpolation scheme. The *Region* object is a parent environment object in which the fields, geometry of the domain and equations are defined. We need to define the *mesh* that describes the geometry that we need to solve the equations over. *Equation set* and *Equations* objects that represent the discretised form, matrix form of the finite element implementation of the laplace problem are then set up using the different fields, mesh and basis functions we have defined. The *Problem* object contains information about the solution process used to solve the equations defined in the region. *Boundary Conditions* objects contain the boundary conditions that are defined for a particular simulation.
# + [markdown] pycharm={"name": "#%% md\n"}
# 
# -
# ## Loading the OpenCMISS-Iron library
#
# In order to use OpenCMISS we have to first import the opencmiss.iron module from the opencmiss package.
# Intialise OpenCMISS-Iron.
from opencmiss.iron import iron
# + [markdown] pycharm={"name": "#%% md\n"}
# Assuming OpenCMISS has been correctly built with the Python bindings by following the instructions in the installation instructions, we can now access
# all the OpenCMISS functions, classes and constants under the iron namespace. The next section describes how we can interact with the OpenCMISS-Iron library through an object-oriented API.
#
# ## Step by step guide
#
# ### 1. Creating a coordinate system
#
# First we construct a coordinate system that will be used to describe the geometry in our problem. The 3D geometry will exist in a 3D space, so we need a 3D coordinate system.
# +
# Create coordinate system object.
coordinate_system_user_number = 1
number_of_dimensions = 3
coordinate_system = iron.CoordinateSystem()
coordinate_system.CreateStart(coordinate_system_user_number)
coordinate_system.DimensionSet(number_of_dimensions)
coordinate_system.CreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that once an OpenCMISS-Iron object has been created with a specific user number, you will need to destroy it (i.e. `coordinate_system.Destroy`) before you can recreate it again with the same user number. This means that if you are running this tutorial as a Jupyter notebook, then trying to re-run cells will result in an error saying that an 'OpenCMISS-Iron object with that username has already been created'. Simply restart the Jupyter notebook kernel (resets the python interpreter), and use the Jupyter notebook shortcut to rerun all the cells above the current point to continue with where you left off. Note that restarting the Kernel will remove all variables that you have previously created from memory).
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 2. Creating basis functions
#
# The finite element description of our fields requires a basis function to interpolate field values over elements, so we create a 3D basis with linear Lagrange interpolation in both $\xi$ directions. Note that in coding practice, the greek symbol $\xi$ is represented as "Xi" ("Xi" is not read as "X sub i").
#
# - The `xi_interpolation` variable defines the basis interpolation scheme to use along each spatial direction.
# - The `number_of_guass_xi` variable defines the number of Gauss points in each finite element coordinate direction (xi) for numerical integration operations.
# + pycharm={"name": "#%%\n"}
# Define basis parameters.
if number_of_dimensions == 1:
xi_interpolation = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE]
number_of_guass_xi = [3]
elif number_of_dimensions == 2:
xi_interpolation = [
iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE,
iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE]
number_of_guass_xi = [3, 3]
elif number_of_dimensions == 3:
xi_interpolation = [
iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE,
iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE,
iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE]
number_of_guass_xi = [3, 3, 3]
# Create basis object.
basis_user_number = 1
basis = iron.Basis()
basis.CreateStart(basis_user_number)
basis.TypeSet(iron.BasisTypes.LAGRANGE_HERMITE_TP)
basis.NumberOfXiSet(number_of_dimensions)
basis.InterpolationXiSet(xi_interpolation)
basis.QuadratureNumberOfGaussXiSet(number_of_guass_xi)
basis.CreateFinish()
# -
# ### 3. Creating a region
#
# Next we create a region that our fields will be defined on and tell it to use the 2D coordinate system we created previously. The CreateStart method for a region requires another region as a parameter. We use the world region that is created by default so that our region is a subregion of the world region.
# + pycharm={"name": "#%%\n"}
# Create region object.
region_user_number = 1
region = iron.Region()
region.CreateStart(region_user_number, iron.WorldRegion)
region.CoordinateSystemSet(coordinate_system)
region.LabelSet("Region")
region.CreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 4. Setting up a simple cuboid mesh
#
# In this example we will use the GeneratedMesh class capabilities of OpenCMISS to create a 3D geometric mesh on which to solve the Laplace problem. We will create a regular mesh of size width x height x length and divide the mesh into a number of elements in the X, Y, and Z directions (specified in a `number_of_elements` variable). We will then tell it to use the basis we created previously:
# + pycharm={"name": "#%%\n"}
# Define mesh parameters.
if number_of_dimensions == 1:
number_of_elements = [2]
extent = [1.0] # Along X.
if number_of_dimensions == 2:
number_of_elements = [2, 2]
extent = [1.0, 1.0] # Along X and Y.
elif number_of_dimensions == 3:
number_of_elements = [2, 2, 2]
extent = [1.0, 1.0, 1.0] # Along X, Y, Z.
# Create iron.GeneratedMesh object.
generated_mesh_user_number = 1
generated_mesh = iron.GeneratedMesh()
generated_mesh.CreateStart(generated_mesh_user_number, region) # Notice how the mesh initialisation is associated with region.
generated_mesh.TypeSet(iron.GeneratedMeshTypes.REGULAR)
generated_mesh.BasisSet([basis])
generated_mesh.ExtentSet(extent)
generated_mesh.NumberOfElementsSet(number_of_elements)
# + [markdown] pycharm={"name": "#%% md\n"}
# Note the use of a list type to pass in the basis as an argument in the `BasisSet` method. We will see the power of this in the finite elasticity tutorial, where meshes with multiple bases will be simultaneously generated.
#
# The generated mesh is not itself a mesh, but is used to create a mesh. We construct the mesh object when we call the CreateFinish method of the generated mesh and pass in the mesh. This mesh object is just the same as if we had manually created the regular mesh.
#
# Here we have initialised a mesh but not called CreateStart or CreateFinish, instead the mesh creation is done when finishing the creation of the generated mesh.
# +
# Create mesh object from generated_mesh object.
mesh_user_number = 1
mesh = iron.Mesh()
generated_mesh.CreateFinish(mesh_user_number,mesh) #the GeneratedMesh object contains attributes that are then used to define the mesh object attributes at this line.
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 5. Decomposing the mesh
#
# Once the mesh has been created we can decompose it into a number of domains in order to allow for parallelism. We choose the options to let OpenCMISS calculate the best way to break up the mesh. We also set the number of domains to be equal to the number of computational nodes this example is running on. Note that if MPI infrastructure is not used, only single domain will be created. Look for our parallelisation example for an illustration of how to execute simulations using parallel processing techniques.
# + pycharm={"name": "#%%\n"}
# Perform mesh decomposition.
decomposition_user_number = 1
decomposition = iron.Decomposition()
decomposition.CreateStart(decomposition_user_number, mesh)
decomposition.CreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 6. Creating a geometric field
#
# Now that the mesh has been decomposed we are in a position to create fields. The first field we need to create is the geometric field. Here we create a field and partition the field to different computational nodes using the mesh decomposition that we have just created. Once we have finished creating the field we can change the field DOFs to give us our geometry. Since the mesh has been generated we can use the generated mesh object to calculate the geometric parameters of the regular mesh.
# + pycharm={"name": "#%%\n"}
# Create a field for the geometry.
geometric_field_user_number = 1
geometric_field = iron.Field()
geometric_field.CreateStart(geometric_field_user_number, region) #notice that the geometric field is associated with region in this function call.
geometric_field.LabelSet('Geometry')
geometric_field.MeshDecompositionSet(decomposition)
geometric_field.CreateFinish()
# -
# We have created a geometric field but all the field component values are currently set to zero by default. We can define the geometry using the generated mesh we created earlier:
# Set geometric field values from the generated mesh.
generated_mesh.GeometricParametersCalculate(geometric_field)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Visualising the geometry
#
# We now visualise the geometry using pythreejs.
# + pycharm={"name": "#%%\n"}
import sys
sys.path.insert(1, '../../tools/')
import threejs_visualiser
renderer = threejs_visualiser.visualise(
mesh, geometric_field, number_of_dimensions, xi_interpolation,
variable=iron.FieldVariableTypes.U, node_labels=True)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 7. Creating the dependent field
#
# For the Laplace equation we need a dependent field (our solution) to describe our dependent variable $f(x,y)$. Here haven't used the Field.CreateStart method to construct the dependent field because we let OpenCMISS create an appropriate dependent field for the Laplace equations being described.
#
# it is automatically constructed by the equations set.
#
# Here we do not define a field before the CreateStart and so we let OpenCMISS create an appropriate dependent field for the Laplace equations being described. Once the fields have been created we can set the field DOF values.
# + pycharm={"name": "#%%\n"}
# Create dependent field.
dependent_field_user_number = 2
dependent_field = iron.Field()
# -
# ### 8. Defining an equation set field
#
# We also need to create a new field called the equation set field, whose purpose is defined in the next section.
#
# +
# Initialise equation set field object.
equations_set_field_user_number = 3
equations_set_field = iron.Field()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 9. Defining the Laplace equation set
#
# We are now in a position to define the type of physics that we wish to solve. This is done by creating an equations set which is a container object for all the parameters we need to describe the physics. The specific equation set we are solving is defined by a list in the fourth argument to the CreateStart method. This list needs to contain the equations set class, type and subtype.
#
# The equation set field that we defined in the previous section is used by the OpenCMISS-Iron library to identify multiple equations sets of the same type on a region. As we only have one equation set in this example, we do not have to populate this field. All we need to do is pass the equation set field user number and the equation set field object when creating the equation set. Its field values will be automatically defined once the equation set is finalised.
# + pycharm={"name": "#%%\n"}
equations_set_user_number = 1
# Define Laplace equation specification.
equations_set_specification = [
iron.EquationsSetClasses.CLASSICAL_FIELD,
iron.EquationsSetTypes.LAPLACE_EQUATION,
iron.EquationsSetSubtypes.STANDARD_LAPLACE]
# Create equation set object.
equations_set = iron.EquationsSet()
equations_set.CreateStart(
equations_set_user_number, region, geometric_field,
equations_set_specification, equations_set_field_user_number,
equations_set_field)
equations_set.DependentCreateStart(
dependent_field_user_number, dependent_field)
equations_set.DependentCreateFinish()
equations_set.CreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# Once the equations set is defined, we create the equations that use our fields to construct equations matrices and vectors.
# + pycharm={"name": "#%%\n"}
# Create equations object.
equations = iron.Equations()
equations_set.EquationsCreateStart(equations)
equations_set.EquationsCreateFinish()
# -
# We can initialise our solution with a value we think will be close to the final solution. A field in OpenCMISS can contain multiple field variables, and each field variable can have multiple components. For the standard Laplace equation, the dependent field only has a U variable which has one component. Field variables can also have different field parameter sets, for example we can store values at a previous time step in dynamic problems. In this example we are only interested in the VALUES parameter set:
# + pycharm={"name": "#%%\n"}
# Initialise dependent field.
dependent_field.ComponentValuesInitialiseDP(
iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1, 0.5)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 10. Defining the problem
#
# Now that we have defined all the equations we will need we can create our problem to be solved by OpenCMISS. We create a standard Laplace problem, which is a member of the classical field problem class and Laplace equation problem type:
# + pycharm={"name": "#%%\n"}
# Create problem object.
problem_user_number = 1
problem = iron.Problem()
problem_specification = [
iron.ProblemClasses.CLASSICAL_FIELD,
iron.ProblemTypes.LAPLACE_EQUATION,
iron.ProblemSubtypes.STANDARD_LAPLACE]
problem.CreateStart(problem_user_number, problem_specification)
problem.CreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 11. Defining control loops
#
# The problem type defines a control loop structure that is used when solving the problem. The OpenCMISS control loop is a "supervisor" for the computational process. We may have multiple control loops with nested sub loops, and control loops can have different types, for example load incremented loops or time loops for dynamic problems. In this example a simple, single iteration loop is created without any sub loops. If we wanted to access the control loop and modify it we would use the problem.ControlLoopGet method before finishing the creation of the control loops, but we will just leave it with the default configuration:
# + pycharm={"name": "#%%\n"}
# Create control loops.
problem.ControlLoopCreateStart()
problem.ControlLoopCreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 12. Defining solvers
#
# After defining the problem structure we can create the solvers that will be run to actually solve our problem. The problem type defines the solvers to be set up so we call problem.SolversCreateStart() to create the solvers and then we can access the solvers to modify their properties. An iterative solver is used by default.
# + pycharm={"name": "#%%\n"}
# Create problem solvers.
solver = iron.Solver()
problem.SolversCreateStart()
problem.SolverGet([iron.ControlLoopIdentifiers.NODE], 1, solver)
solver.OutputTypeSet(iron.SolverOutputTypes.SOLVER)
problem.SolversCreateFinish()
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that we initialised a solver but didn't create it directly by calling its CreateStart() method, it was created with the call to SolversCreateStart() and then we obtain it with the call to SolverGet(). If we look at the help for the SolverGet method we see it takes three parameters:
#
# controlLoopIdentifiers: A list of integers used to identify the control loop to get a solver for. This always starts with the root control loop, given by CMISS.ControlLoopIdentifiers.NODE. In this example we only have the one control loop and no sub loops.
#
# solverIndex: The index of the solver to get, as a control loop may have multiple solvers. In this case there is only one solver in our root control loop.
#
# solver: An initialised solver object that hasn't been created yet, and on return it will be the solver that we asked for.
#
# Once we've obtained the solver we then set various properties before finishing the creation of all the problem solvers. A list of solver methods to configure the solver can be found [here](http://opencmiss.org/documentation/apidoc/iron/latest/python/classiron_1_1_solver.html)
#
# ### 13. Defining solver equations
#
# After defining our solver we can create the equations for the solver to solve by adding our equations sets to the solver equations. In this example we have just one equations set to add but for coupled problems we may have multiple equations sets in the solver equations.
# + pycharm={"name": "#%%\n"}
# Create solver equations object and add equations set object to it.
solver = iron.Solver()
solver_equations = iron.SolverEquations()
problem.SolverEquationsCreateStart()
problem.SolverGet([iron.ControlLoopIdentifiers.NODE], 1, solver)
solver.SolverEquationsGet(solver_equations)
solver_equations.EquationsSetAdd(equations_set)
problem.SolverEquationsCreateFinish()
# -
# ### 14. Defining the boundary conditions
#
# The final step in configuring the problem is to define the boundary conditions to be satisfied. The Dirichlet problem for Laplace's equation consists of finding a solution φ on some domain D such that φ on the boundary of D is equal to some given function. Since the Laplace operator appears in the heat equation, one physical interpretation of this problem is as follows: fix the temperature on the boundary of the domain according to the given specification of the boundary condition. Allow heat to flow until a stationary state is reached in which the temperature at each point on the domain doesn't change anymore. The temperature distribution in the interior will then be given by the solution to the corresponding Dirichlet problem.
#
# We will set the dependent field value at the first node to be 0, and at the last node to be 1.0. These nodes will correspond to opposite corners in our geometry.
#
# These values are set using the SetNode() method. The arguments to the SetNode() method are the field, field variable type, node version number, node user number, node derivative number, field component number, boundary condition type and boundary condition value. The version and derivative numbers are one as we aren't using versions and we are setting field values rather than derivative values. We can also only set derivative boundary conditions when using a Hermite basis type. There are a wide number of boundary condition types that can be set but many are only available for certain equation set types and in this example we simply want to fix the field value.
#
# When solverEquations.BoundaryConditionsCreateFinish() is called OpenCMISS will construct the solver matrices and vectors.
# + pycharm={"name": "#%%\n"}
# Identify first and last node number.
firstNodeNumber = 1
nodes = iron.Nodes()
region.NodesGet(nodes)
lastNodeNumber = nodes.NumberOfNodesGet()
# Create boundary conditions object and set first and last nodes to 0.0 and 1.0
boundary_conditions = iron.BoundaryConditions()
solver_equations.BoundaryConditionsCreateStart(boundary_conditions)
boundary_conditions.SetNode(
dependent_field, iron.FieldVariableTypes.U, 1, 1, firstNodeNumber,
1, iron.BoundaryConditionsTypes.FIXED, 0.0)
boundary_conditions.SetNode(
dependent_field, iron.FieldVariableTypes.U, 1, 1, lastNodeNumber,
1, iron.BoundaryConditionsTypes.FIXED, 1.0)
solver_equations.BoundaryConditionsCreateFinish()
# -
# ### 15. Solving the problem
#
# After our problem solver equations have been fully defined we are now ready to solve our problem. When we call the Solve method of the problem it will loop over the control loops and control loop solvers to solve our problem:
# + pycharm={"name": "#%%\n"}
# Solve the problem.
problem.Solve()
# -
# ## Visualising results
#
# We can now visualise the resulting solution using pythreejs.
# + pycharm={"name": "#%%\n"}
renderer = threejs_visualiser.visualise(
mesh, geometric_field, number_of_dimensions, xi_interpolation,
dependent_field=dependent_field,
variable=iron.FieldVariableTypes.U,
colour_map_dependent_component_number=1, resolution=8)
# -
# ## Exporting solutions
#
# Now we want to have the results of the run be stored for visualisation in Cmgui.
# + pycharm={"name": "#%%\n"}
# Export results in Exfile format.
fields = iron.Fields()
fields.CreateRegion(region)
fields.NodesExport("laplace_equation", "FORTRAN")
fields.ElementsExport("laplace_equation", "FORTRAN")
fields.Finalise()
# + [markdown] pycharm={"name": "#%% md\n"}
# The simulation results should stored in the local directory as two files: `laplace_equation.exnode` and laplace_equation.exelem. The `laplace_equation.exnode` contains the data of the solution $f(x,y)$ associated with each node. The `laplace_equation.exelem` file contains the topology of the mesh, and associates each element with its corresponding nodes.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Finalising session
#
# Let the library know that you are done with computations and the resources allocated for the problem can now be released.
# + pycharm={"name": "#%%\n"}
problem.Destroy()
coordinate_system.Destroy()
region.Destroy()
basis.Destroy()
iron.Finalise()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Modifying the simulation from the 3D to 2D Laplace problem
# To run a 1D or 2D Laplace problem using OpenCMISS-Iron, simple changes need to be made to above code in the `iron.CoordinateSystem()` class, the `iron.Basis()` class, and the `iron.GeneratedMesh()` class. However, for completeness, we summarise the code changes required to convert from a 3D Laplace problem to a 2D problem in the table below:
#
# | 2D | 3D |
# |-----------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
# | `coordinate_system.DimensionSet(2)` | `coordinate_system.DimensionSet(3)` |
# | `basis.NumberOfXiSet(2)` | `basis.NumberOfXiSet(3)` |
# | `basis.InterpolationXiSet([iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE]\*2)` | `basis.InterpolationXiSet([iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE]\*3)` |
# | `basis.quadratureNumberOfGaussXi([3,3])` | `basis.quadratureNumberOfGaussXi([3,3,3])` |
# | `generated_mesh.ExtentSet([width, height])` | `generated_mesh.ExtentSet([width, height, length])` |
# | `generated_mesh.NumberOfElementsSet([number_global_x_elements,number_global_y_elements])` | `generated_mesh.NumberOfElementsSet([number_global_x_elements,number_global_y_elements,number_global_z_elements])` |
#
# The same Boundary Conditions can be defined in this example as it is based on the first and last node. However, in general, care must be taken in how the boundary conditions are defined for the users problem.
#
# These changes have already been incorporated into this tutorial (you can see the required changes in the python `if` statements used in their corresponding sections of the code above). Simply change the `number_of_dimensions` variable defined when creating the OpenCMISS-Iron basis object to switch between 1D, 2D, or 3D Laplace problems. You can then restart the Jupyter notebook kernel and re-run the tutorial.
# -
|
tutorials/basics/basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("final_df_project_3_0502c.csv")
df
print(df["int_rate"].unique())
print(df.shape)
# +
print(df["pub_rec_bankruptcies"].unique())
print(df["pub_rec_bankruptcies"].value_counts())
# -
df["pub_rec_bankruptcies"] = df[["pub_rec_bankruptcies"]].astype(int)
df
#change bankruptcies to hot encoding
df["pub_rec_bankruptcies"].replace({2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1}, inplace=True)
print(df["pub_rec_bankruptcies"].value_counts())
# # Stats Summary
df.describe()
import seaborn as sns
sns.countplot(df["int_rate"], label="Count")
plt.show()
# select features which will be used as X values
test_df = df[["int_rate","annual_inc", "dti", "fico_range_low", "term",
"open_acc", "revol_bal", "loan_amnt","pub_rec_bankruptcies", "credit_start_date"]]
test_df
# +
# run a scatter matrix to see if there's any relationship between features
from pandas.plotting import scatter_matrix
scatter_matrix(test_df.drop("int_rate", axis=1), figsize=(30,20))
plt.show()
# -
#Model feature - test 1 - income, dti, fico score, open accounts, revolving balance, and loan amount
feature_names = ["annual_inc", "dti", "fico_range_low", "open_acc", "revol_bal", "loan_amnt"]
X = test_df[feature_names]
y = test_df["int_rate"]
X
y
# # Create training and test sets
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# -
X_train
X_test
# # Apply scaling
#try MinMaxScaling
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
X_train_scaled
X_test_scaled
# # Build Models
# +
## Logistic Regression
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train_scaled, y_train)
print('Accuracy of logistic regression on training', logreg.score(X_train_scaled, y_train))
print('Accuracy of logistic regression on testing', logreg.score(X_test_scaled, y_test))
# +
## Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train_scaled, y_train)
print('Accuracy of Decision tree on training', dt.score(X_train_scaled, y_train))
print('Accuracy of Decision tree on testing', dt.score(X_test_scaled, y_test))
# -
# Setting max decision tree depth to help avoid overfitting
dt2 = DecisionTreeClassifier(max_depth=5)
dt2.fit(X_train_scaled, y_train)
print('Accuracy of Decision tree on training', dt2.score(X_train_scaled, y_train))
print('Accuracy of Decision tree on testing', dt2.score(X_test_scaled, y_test))
# Setting max decision tree depth to help avoid overfitting
dt3 = DecisionTreeClassifier(max_depth=10)
dt3.fit(X_train_scaled, y_train)
print('Accuracy of Decision tree on training', dt3.score(X_train_scaled, y_train))
print('Accuracy of Decision tree on testing', dt3.score(X_test_scaled, y_test))
# K-nearest neighbor
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train_scaled, y_train)
print('Accuracy of Knn on training', knn.score(X_train_scaled, y_train))
print('Accuracy of Knn on testing', knn.score(X_test_scaled, y_test))
# Linear Discriminant Analysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis()
lda.fit(X_train_scaled,y_train)
print('Accuracy of Knn on training', lda.score(X_train_scaled, y_train))
print('Accuracy of Knn on testing', lda.score(X_test_scaled, y_test))
# Gaussian Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train_scaled, y_train)
print('Accuracy of GNB on training', gnb.score(X_train_scaled, y_train))
print('Accuracy of GNB on testing', gnb.score(X_test_scaled, y_test))
#Random Forest
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=200)
rf = rf.fit(X_train_scaled, y_train)
rf.score(X_test_scaled, y_test)
print('Accuracy of Random Forest on training', rf.score(X_train_scaled, y_train))
print('Accuracy of Random Forest on testing', rf.score(X_test_scaled, y_test))
# Support Vector Machine
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train_scaled, y_train)
print('Accuracy of SVM on training', svm.score(X_train_scaled, y_train))
print('Accuracy of SVM on testing', svm.score(X_test_scaled, y_test))
Overall, none of the results seemed accurate enough. We decided to test using a different mix of features.
|
models/Model 1 test - first attempt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Japanese Text Overlay #
# This is a Japanese hirigana text overlay
#
# First you download the bit file.
# +
from pynq.overlays.video import *
from pynq.lib.video import *
base = VideoOverlay("video.bit")
hdmi_in = base.video.hdmi_in
hdmi_out = base.video.hdmi_out
# -
# Then start up the PRControl, the video will not work otherwise. It initailzes the Video Axi Switch so HDMI runs through the VDMA.
from pynq.overlays.video import PRControl
pr_inst = PRControl()
# The best video sources are computers where you can control the resoltuion.
hdmi_in.configure()
hdmi_out.configure(hdmi_in.mode,PIXEL_BGR)
hdmi_out.start()
hdmi_in.start()
hdmi_in.tie(hdmi_out)
# Here is a frame in VDMA.
import PIL.Image
import cv2
frame = hdmi_in.readframe()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(frame)
image
# The Japanese Filter has to be loaded in. In can fit into L0,M0,M1,M2,S0,S1,S2,S3,S4,S5
# Connect the HDMI_IN to L0 and L0 to VDMA and VDMA to HDMI_OUT
pr_inst.connect("HDMI_IN","L0")
pr_inst.connect("L0","VDMA")
pr_inst.connect("VDMA","HDMI_OUT")
PartialBitstream("japanese_l0.bit").download()
hirigana = {' ': 0,
'a':1,'A':2,'i':3,'I':4,'u': 5, 'U':6,'e':7,'E':8,'o':9,'O':10,
'KA':11,'GA':12,'KI':13,'GI':14,'KU':15,'GU':16,'KE':17,'GE':18,'KO':19,'GO':20,
'SA':21,'ZA':22,'SI':23,'ZI':24,'SU':25,'ZU':26,'SE':27,'ZE':28,'SO':29,'ZO':30,
'TA':31,'DA':32,'TI':33,'DI':34,'tu':35,'TU':36,'DU':37,'TE':38,'DE':39,'TO':40, 'DO':41,
'NA':42,'NI':43,'NU':44,'NE':45,'NO':46,
'HA':47,'BA':48,'PA':49 ,
'HI':50,'BI':51,'PI':52,
'HU':53,'BU':54,'PU':55,
'HE':56,'BE':57,'PE':58,
'HO':59,'BO':60,'PO':61,
'MA':62,'MI':63,'MU':64,'ME':65,'MO':66,
'ya':67,'YA':68,'yu':69,'YU':70,'yo':71,'YO':72,
'RA':73,'RI':74,'RU':75,'RE':76,'RO':77,
'wa':78,'WA':79,'WI':80,'WE':81,'WO':82,
'N' :83,'VU':84 }
pr_inst.filter_cmd("L0",3,8)
pr_inst.filter_cmd("L0",6,5)
pr_inst.filter_cmd("L0",5,0)
pr_inst.filter_cmd("L0",4,hirigana["KO"])
pr_inst.filter_cmd("L0",4,hirigana["N"])
pr_inst.filter_cmd("L0",4,hirigana["NI"])
pr_inst.filter_cmd("L0",4,hirigana["TI"])
pr_inst.filter_cmd("L0",4,hirigana["HA"])
import PIL.Image
import cv2
frame = hdmi_in.readframe()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(frame)
image
# Now we need to load a string into the filter
# #### JAPANESE Filter Settings ####
# * 0 Set X Offset
# * 1 Set Y Offset
# * 2 Set Color
# * 3 Set Font Size
# * 4 Write Char & increment write index
# * 5 Set Write Index
# * 6 Set Str Size
#
# pr_inst.filter_cmd("filter name",cmd,data)
pr_inst.filter_cmd("L0",3,8)
pr_inst.filter_cmd("L0",6,4)
pr_inst.filter_cmd("L0",5,0)
pr_inst.filter_cmd("L0",4,hirigana["YO"])
pr_inst.filter_cmd("L0",4,hirigana["U"])
pr_inst.filter_cmd("L0",4,hirigana["KU"])
pr_inst.filter_cmd("L0",4,hirigana["SO"])
import PIL.Image
import cv2
frame = hdmi_in.readframe()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(frame)
image
hdmi_out.close()
hdmi_in.close()
|
Video_notebooks/Japanese Text Overlay.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.chdir('../')
from DeepPurpose import dataset
import numpy as np
import pandas as pd
# -
SMILES, Target_seq, y = dataset.load_process_DAVIS()
X_drug, X_target, y = dataset.process_BindingDB('../data/BindingDB_All.tsv',
y = 'Kd',
binary = False,
convert_to_log = True)
x = [i for i in np.unique(SMILES) if i in np.unique(X_drug)]
len(x)
# ## This means DAVIS and our pretrained dataset is not overlapped.
df_data = pd.DataFrame(zip(SMILES, Target_seq, y))
df_data.rename(columns={0:'SMILES',
1: 'Target Sequence',
2: 'Label'},
inplace=True)
df_1000 = df_data.sample(n = 1000, replace = False).reset_index(drop = True)
from DeepPurpose import oneliner
oneliner.virtual_screening(df_1000['Target Sequence'].values, target_name = None,
X_repurpose = df_1000['SMILES'].values,
drug_names = None,
save_dir = './save_folder',
pretrained_dir = './save_folder/pretrained_models/DeepPurpose_BindingDB/',
convert_y = True,
subsample_frac = 1,
pretrained = True,
split = 'random',
frac = [0.7,0.1,0.2],
agg = 'agg_mean_max',
output_len = 30)
# +
import pickle
from utils import convert_y_unit
with open('./save_folder/results_aggregation/logits_VS_mean_max.pkl', 'rb') as f:
list_ = pickle.load(f)
result = convert_y_unit(list_, 'nM', 'p')
from scipy.stats import pearsonr
pearsonr(result, df_1000.Label.values)
# +
import seaborn as sns
import matplotlib.pyplot as plt
fontsize = 17
sns.regplot(x=result, y=df_1000.Label.values, line_kws={"color":"r","alpha":0.7,"lw":5})
plt.xlabel('Predicted Kd', fontsize = fontsize)
plt.ylabel('True Kd', fontsize = fontsize)
plt.text(4.6, 9.5, "R-Squared = 0.7789", horizontalalignment='left', size='medium', color='red', fontsize = 15)
plt.savefig('./correlation.pdf')
# -
|
DEMO/Make-DAVIS-Correlation-Figure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + active=""
# title = "SimplePETSc"
# date = "2021-11-09"
# author = "<NAME>"
# notebook = "SimplePETSc.ipynb"
# [menu]
# [menu.benchmarks]
# parent = "elliptic"
# <!--eofm-->
# -
# The following shows running a simple steady-state diffusion benchmark running on 2 cores.
# +
import os
prj_name = "square_1e1_neumann"
data_dir = os.environ.get('OGS_DATA_DIR', '../../../Data')
prj_file = f"{data_dir}/EllipticPETSc/{prj_name}.prj"
out_dir = os.environ.get('OGS_TESTRUNNER_OUT_DIR', '_out')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
print(f"mpirun -np 2 ogs {prj_file} > out.txt")
# ! mpirun -np 2 ogs {prj_file} > out.txt
from datetime import datetime
print(datetime.now())
# +
import vtuIO
pvdfile = vtuIO.PVDIO(f"{prj_name}.pvd", dim=2)
time = pvdfile.timesteps
points={'pt0': (0.3,0.5,0.0), 'pt1': (0.24,0.21,0.0)}
pressure_linear = pvdfile.read_time_series("pressure", points)
import matplotlib.pyplot as plt
plt.plot(time, pressure_linear["pt0"], "b-", label="pt0 linear interpolated")
plt.plot(time, pressure_linear["pt1"], "r-", label="pt1 linear interpolated")
plt.legend()
plt.xlabel("t")
plt.ylabel("p")
|
Tests/Data/Notebooks/SimplePETSc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="RUDVcP-N517W" colab={"base_uri": "https://localhost:8080/"} outputId="49dc4879-0038-46c8-e8cd-d302012cf790"
# !pip install -q -U tensorflow-text --quiet
# + id="D8ymeT4J53fp" colab={"base_uri": "https://localhost:8080/"} outputId="f696ec96-bb1d-46c3-f11a-76a7d272dfaa"
# !pip install -q tf-models-official
# + id="0hDHNrjA54uH"
import os
import shutil
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from official.nlp import optimization # to create AdamW optimizer
import matplotlib.pyplot as plt
tf.get_logger().setLevel('ERROR')
# + id="_RZ4eR_g561e" colab={"base_uri": "https://localhost:8080/"} outputId="57ae7816-cdde-425b-f0be-75015afdd461"
url = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
# keras uitlity to download a file
dataset = tf.keras.utils.get_file('aclImdb_v1.tar.gz', url, untar=True, cache_dir='.', cache_subdir='')
# + id="8Hmf0i3t59au"
dataset_dir = '/content/aclImdb'
train_dir = os.path.join(dataset_dir,'train')
# + id="CmydkF-M5_1h"
# remove unused folders to make it easier to load the data
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
# + colab={"base_uri": "https://localhost:8080/"} id="O22kKmkX6RG1" outputId="cfbc5d6d-ab1a-4b3b-ad46-29e32e0bfe20"
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 32
seed = 123
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
class_names = raw_train_ds.class_names
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='validation',
seed=seed)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/test',
batch_size=batch_size)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
# + colab={"base_uri": "https://localhost:8080/"} id="l3ImaMmH6bfa" outputId="5e74f771-73d7-4fe0-e2dc-5ce29397a849"
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({class_names[label]})')
# + colab={"base_uri": "https://localhost:8080/"} id="Fm8sVgBMmG12" outputId="2a40dc6f-1789-4479-c6f4-7939bf0c6a45"
#@title Choose a BERT model to fine-tune
bert_model_name = 'bert_en_uncased_L-12_H-768_A-12' #@param ["bert_en_uncased_L-12_H-768_A-12", "bert_en_cased_L-12_H-768_A-12", "bert_multi_cased_L-12_H-768_A-12", "small_bert/bert_en_uncased_L-2_H-128_A-2", "small_bert/bert_en_uncased_L-2_H-256_A-4", "small_bert/bert_en_uncased_L-2_H-512_A-8", "small_bert/bert_en_uncased_L-2_H-768_A-12", "small_bert/bert_en_uncased_L-4_H-128_A-2", "small_bert/bert_en_uncased_L-4_H-256_A-4", "small_bert/bert_en_uncased_L-4_H-512_A-8", "small_bert/bert_en_uncased_L-4_H-768_A-12", "small_bert/bert_en_uncased_L-6_H-128_A-2", "small_bert/bert_en_uncased_L-6_H-256_A-4", "small_bert/bert_en_uncased_L-6_H-512_A-8", "small_bert/bert_en_uncased_L-6_H-768_A-12", "small_bert/bert_en_uncased_L-8_H-128_A-2", "small_bert/bert_en_uncased_L-8_H-256_A-4", "small_bert/bert_en_uncased_L-8_H-512_A-8", "small_bert/bert_en_uncased_L-8_H-768_A-12", "small_bert/bert_en_uncased_L-10_H-128_A-2", "small_bert/bert_en_uncased_L-10_H-256_A-4", "small_bert/bert_en_uncased_L-10_H-512_A-8", "small_bert/bert_en_uncased_L-10_H-768_A-12", "small_bert/bert_en_uncased_L-12_H-128_A-2", "small_bert/bert_en_uncased_L-12_H-256_A-4", "small_bert/bert_en_uncased_L-12_H-512_A-8", "small_bert/bert_en_uncased_L-12_H-768_A-12", "albert_en_base", "electra_small", "electra_base", "experts_pubmed", "experts_wiki_books", "talking-heads_base"]
map_name_to_handle = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4'
}
map_model_to_preprocess = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
}
tfhub_handle_encoder = map_name_to_handle[bert_model_name]
tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name]
print(f'BERT model selected : {tfhub_handle_encoder}')
print(f'Preprocess model auto-selected: {tfhub_handle_preprocess}')
# + id="AuKl1PlCng4W"
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess)
# + colab={"base_uri": "https://localhost:8080/"} id="YGW0m_U7ng6w" outputId="0d7b2a2a-cf75-4e6c-c2d3-4ee7aaa5fa9d"
text_test = ['this is such an amazing movie!. I hate the movie', 'hello world']
text_preprocessed = bert_preprocess_model(text_test)
print(f'Keys : {list(text_preprocessed.keys())}')
print(f'Shape : {text_preprocessed["input_word_ids"].shape}')
print(f'Word Ids : {text_preprocessed["input_word_ids"][0, :12]}')
print(f'Input Mask : {text_preprocessed["input_mask"]}')
print(f'Type Ids : {text_preprocessed["input_type_ids"]}')
# + id="ULHQA7UZng9F"
bert_model = hub.KerasLayer(tfhub_handle_encoder)
# + colab={"base_uri": "https://localhost:8080/"} id="sF6dR0j6n_aY" outputId="aaf98826-755f-4caf-e2a4-82ad7cee7dcf"
bert_results = bert_model(text_preprocessed)
print(f'Loaded BERT: {tfhub_handle_encoder}')
print(f'Pooled Outputs Shape:{bert_results["pooled_output"].shape}')
print(f'Pooled Outputs Values:{bert_results["pooled_output"][0, :12]}')
print(f'Sequence Outputs Shape:{bert_results["sequence_output"].shape}')
print(f'Sequence Outputs Values:{bert_results["sequence_output"][0, :12]}')
# + id="XUlRERZ8n_cz"
def build_classifier_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net)
return tf.keras.Model(text_input, net)
# + colab={"base_uri": "https://localhost:8080/"} id="GF9IMhdyn_fL" outputId="37f9769a-87e3-48a4-d0e1-c0cce0ca7b67"
classifier_model = build_classifier_model()
bert_raw_result = classifier_model(tf.constant(text_test))
print(tf.sigmoid(bert_raw_result))
# + id="YhrWeHPKn_hZ"
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
# + id="JlismRqpn_jp"
epochs = 3
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = int(0.1*num_train_steps)
init_lr = 3e-5
optimizer = optimization.create_optimizer(init_lr=init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
# + id="-6nEkJZknhAM"
classifier_model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# + colab={"base_uri": "https://localhost:8080/"} id="L6DdiV24nhB5" outputId="4e338ec5-18aa-419e-bbee-3077acca5e90"
print(f'Training model with {tfhub_handle_encoder}')
history = classifier_model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
# + colab={"base_uri": "https://localhost:8080/"} id="mSb8WyY8pHMg" outputId="87cde8bd-f55a-42a8-8518-58d31928b1b0"
loss, accuracy = classifier_model.evaluate(test_ds)
print(f'Loss: {loss}')
print(f'Accuracy: {accuracy}')
# + id="sY82BHYWEK_G"
# dataset_name = 'imdb'
# saved_model_path = './{}_bert'.format(dataset_name.replace('/', '_'))
# classifier_model.save(saved_model_path, include_optimizer=False)
# + id="8cWWoggaELmR"
# reloaded_model = tf.saved_model.load(saved_model_path)
# + id="Jo002Lq8kg7X"
import tensorflow_datasets as tfds
# + id="BBLaGTbdD8AT"
text = []
for text_batch, label_batch in test_ds.take(782):
text.append(text_batch.numpy())
# + id="ULveLk1mkS3G"
review = []
for i in range(782):
for x in text[i].tolist():
review.append(x)
# + id="uj684KOQ4yAF"
# classifier_model.predict(stringlist[:100])
# + id="1iNoa3Bl4tfW"
stringlist = [x.decode('utf-8') for x in review]
# + id="u5AhXMCq4uG2"
bert_raw_result = classifier_model(tf.constant(stringlist))
# + id="aXu0Hu4j_mPt"
pred_test = tf.sigmoid(bert_raw_result)
# + id="LsmgjRZvYQrh" colab={"base_uri": "https://localhost:8080/"} outputId="16f2e93f-8f42-4b91-b3aa-0e2f6135825b"
pred_test
# + id="U26o4NCEjOcu"
label = []
for text_batch, label_batch in test_ds.take(782):
label.append(label_batch.numpy())
# + id="WVM0I9MCjHW-"
real = []
for i in range(782):
for x in label[i].tolist():
real.append(x)
# + id="HEF3y-KHkYOw"
import pandas as pd
df = pd.DataFrame({"review": review, "pred": pred_test})
# + id="RI4uO5SNkaSU"
from google.colab import files
df.to_csv('output.csv', encoding = 'utf-8-sig')
files.download('output.csv')
|
415-final-project-sentiment-analysis-imdb-main/BERT/bert_final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 二维异构表 `pandas.DataFrame`
# +
# %matplotlib widget
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.rcParams['font.serif'] = ['STSong', 'SimSun', 'SimSun-ExtB'] + plt.rcParams['font.serif']
plt.rcParams['font.serif'] = ['STFangson', 'FangSong'] + plt.rcParams['font.serif']
plt.rcParams['font.serif'] = ['STKaiti', 'KaiTi'] + plt.rcParams['font.serif']
plt.rcParams['font.sans-serif'] = ['SimHei'] + plt.rcParams['font.sans-serif']
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] + plt.rcParams['font.sans-serif']
plt.rcParams['font.sans-serif'] = ['STXihei'] + plt.rcParams['font.sans-serif']
# -
# ___
# ## 加载数据
# ### 从 `.csv` 文件加载数据
df = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv", sep=",")
df
# ### 从列 `pandas.Series` 加载数据
s1 = pd.Series(['北京', '上海', '广州'])
s2 = pd.Series([16410, 6430, 7434])
df = pd.DataFrame({'城市': s1, '占地(平方千米)': s2})
df
# ___
# ## 展示数据
# ### 展示统计信息(`DataFrame.describe()`)
df.describe()
# ### 展示前几条数据(`DataFrame.head()`)
df.head()
# ### 绘制每列数据的直方图(`DataFrame.hist()`)
df.hist()
# ___
# ## 访问数据
# ### 访问一列
df['城市']
# ### 访问一项
df['城市'][1]
# ### 访问一行
df[0:1]
# ___
# ## 操控数据
# ### 添加一列
df['人口(万人)'] = pd.Series([2153.6, 2428.14, 1530.59])
df
# ### 添加一行
df.loc[df.shape[0]] = {'城市': '深圳', '占地(平方千米)': 1997, '人口(万人)': 1343.88}
df
# ### 随机重建索引
df.reindex(np.random.permutation(df.index))
|
rrpandas/ipynb/DataFrame.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
st = pd.read_csv('./data/StudentsPerformance.csv')
st.head()
st.hist()
st = st.rename(columns={"parental level of education" : 'parental_level_of_education',
'race/ethnicity' : 'race',
'test preparation course':'test_preparation_course',
'math score':'math_score',
'reading score':'reading_score',
'writing score':'writing_score'})
st
plt.figure()
st.math_score.hist(bins=50, color="g")
st.plot(kind="bar", stacked=True, alpha=0.5)
st.plot(kind="bar", stacked=True, alpha=0.5, orientation="horizontal")
color= {
"boxes": "Red",
"whiskers": "Orange",
"medians": "DarkBlue",
"caps": "Blue",
}
st.plot.box(color=color, vert=False,)
st.plot.area( alpha=0.2)
st.plot(x="parental_level_of_education",y="math_score")
st.plot.hexbin(x="math_score", y="writing_score", gridsize=25)
st.plot.scatter(x="math_score", y="writing_score")
|
st17_mathplotlib.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Predicting driving speed violations with the Amazon SageMaker DeepAR algorithm
# _**Using the Amazon SageMaker DeepAR algorithm to predict streets where motorists are most likely to drive above speed limits at different times of the year**_
#
# ---
#
# ---
#
#
# ## Contents
#
# 1. [Background](#Background)
# 1. [Setup](#Setup)
# 1. [Data](#Data)
# 1. [Train](#Train)
# 1. [Host](#Host)
# 1. [Evaluate](#Evaluate)
# 1. [Extensions](#Extensions)
#
# ---
#
# ## Background
#
# This notebook demonstrates time series forecasting using the Amazon SageMaker DeepAR algorithm by analyzing city of Chicago’s [Speed Camera Violation dataset](https://data.cityofchicago.org/Transportation/Red-Light-Camera-Violations/spqx-js37). The dataset is hosted by [Data.gov](https://data.gov), and is managed by the [U.S. General Services Administration, Technology Transformation Service](http://www.gsa.gov/portal/category/25729).
#
# These violations are captured by camera systems and available to improve the lives of public through the [city of Chicago data portal](https://data.cityofchicago.org/). The [Speed Camera Violation dataset](https://data.cityofchicago.org/Transportation/Red-Light-Camera-Violations/spqx-js37) can be used to discern patterns in the data and gain meaningful insights.
#
# The dataset contains multiple camera locations and daily violation counts. Each daily violations for a camera can be considered a separate time series. Amazon SageMaker’s DeepAR algorithm can be used to train a model for multiple streets simultaneously, and predict violation for multiple street cameras using the Amazon SageMaker’s [DeepAR algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html).
#
# ---
#
# ## Setup
#
# This notebook was created and tested on an ml.m4.xlarge notebook instance.
#
# Let's start by specifying:
#
# - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.
# - The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace `sagemaker.get_execution_role()` with the appropriate full IAM role arn string(s).
# +
import sagemaker
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'sagemaker/DEMO-deepar-chicago' # change to your desired S3 prefix
region = sess.boto_region_name
role = sagemaker.get_execution_role()
# -
# Now we import Python libraries like s3fs, matplotlib, pandas and numpy
# +
import io
import json
import requests
import time
import matplotlib.pyplot as plt
import pandas as pd
import s3fs
# -
# ---
#
# ## Data
#
#
#
# The speed violations are captured by camera systems and available to improve the lives of public from the city of Chicago data portal. The Speed Camera Violation dataset can be used to discern patterns in the data and gain meaningful insights.
#
# The dataset contains multiple camera locations and daily violation counts. If we imagine that each daily violations for a camera as one time series, we can use Amazon SageMaker’s DeepAR algorithm to train a model for multiple streets simultaneously, and predict violation for multiple street cameras using the Amazon SageMaker’s DeepAR algorithm.
#
# The dataset contains several columns, we use the address, violation date, violations for the forecasting.
#
# +
datafile = 'Chicago_Speed_Camera_Violations.csv'
speeding_violation_data_path = '{}/{}/{}.csv'.format(bucket, prefix, datafile)
s3_output_path = '{}/{}/output'.format(bucket, prefix)
train_data_path = '{}/{}/train/train.json'.format(bucket, prefix)
test_data_path = '{}/{}/test/test.json'.format(bucket, prefix)
url = 'https://data.cityofchicago.org/api/views/hhkd-xvj4/rows.csv?accessType=DOWNLOAD'
# get the data from City of Chicago site
r = requests.get(url, allow_redirects=True)
open(datafile, 'wb').write(r.content)
# read the input file, and display sample rows/columns
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 50)
df = pd.read_csv(open(datafile, 'rb'), encoding='utf-8')
# print first 10 lines to look at part of the dataset
df[['ADDRESS', 'VIOLATION DATE', 'VIOLATIONS']][0:10]
# -
# We convert the violation date from string format to date format, determine the range of violation dates, and look at how many unique street addresses/cameras we have in our dataset.
#
# The dataset contains multiple camera locations and daily violation counts. If we imagine that each camera's daily violations as one time series, we can use Amazon SageMaker’s DeepAR algorithm to train a model for multiple streets simultaneously, and predict the violation count for multiple street cameras using the Amazon SageMaker’s DeepAR algorithm.
#
# As described in [Amazon SageMaker DeepAR input/output interface](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html#deepar-inputoutput) section, we will convert the data into array, and use 0 for the violation count when data for a given camera on a given date is not available. Using the Matplotlib library we display each camera location as a timeseries to visualize the data
#
# +
df['VIOLATION_DT'] = pd.to_datetime(df['VIOLATION DATE'])
df[['ADDRESS', 'VIOLATION_DT', 'VIOLATIONS']]
unique_addresses = df.ADDRESS.unique()
idx = pd.date_range(df.VIOLATION_DT.min(), df.VIOLATION_DT.max())
number_of_addresses = len(unique_addresses)
print('Unique Addresses {}'.format(number_of_addresses))
print('Minimum violation date is {}, maximum violation date is {}'.format(df.VIOLATION_DT.min(), df.VIOLATION_DT.max()))
violation_list = []
for key in unique_addresses:
temp_df = df[['VIOLATION_DT', 'VIOLATIONS']][df.ADDRESS == key]
temp_df.set_index(['VIOLATION_DT'], inplace=True)
temp_df.index = pd.DatetimeIndex(temp_df.index)
temp_df = temp_df.reindex(idx, fill_value=0)
violation_list.append(temp_df['VIOLATIONS'])
plt.figure(figsize=(12,6), dpi=100, facecolor='w')
for key, address in enumerate(unique_addresses):
plt.plot(violation_list[key], label=address)
plt.ylabel('Violations')
plt.xlabel('Date')
plt.title('Chicago Speed Camera Violations')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=False, ncol=4)
plt.show()
# -
# We define prediction length as 30 days, and split the data with last 30 days of data as test data. We use rest of the data for training of the model. We can use the last 30 days of data to evaluate the accuracy of our trained model. We write the training and test data files in JSON format in the S3 bucket.
# +
prediction_length = 30
# Split the data for training and validation/hold out
violation_list_training = []
for i in violation_list:
violation_list_training.append((i[:-prediction_length]))
def series_to_obj(ts, cat=None):
obj = {'start': str(ts.index[0]), 'target': list(ts)}
if cat:
obj['cat'] = cat
return obj
def series_to_jsonline(ts, cat=None):
return json.dumps(series_to_obj(ts, cat))
encoding = 'utf-8'
s3filesystem = s3fs.S3FileSystem()
with s3filesystem.open(train_data_path, 'wb') as fp:
for ts in violation_list_training:
fp.write(series_to_jsonline(ts).encode(encoding))
fp.write('\n'.encode(encoding))
with s3filesystem.open(test_data_path, 'wb') as fp:
for ts in violation_list:
fp.write(series_to_jsonline(ts).encode(encoding))
fp.write('\n'.encode(encoding))
# -
# ---
#
# ## Train
#
# We use [SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/) to create an [estimator](https://sagemaker.readthedocs.io/en/stable/estimators.html) object to kick off training job. The train_use_spot parameter indicates the use of [managed spot training](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). The training will run at most 1 hour (3600 seconds).
#
# We use the [Automatic Model Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html) or Hyperparameter optimization for identifying the best values for the [DeepAR hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/deepar_hyperparameters.html). The Automatic Model Tuning job will kick of 10 parallel jobs (set by by max_parallel_jobs) to search the best hyperparameters for this dataset. The jobs will try to minimize the root mean square error on the test dataset using predicted and actual values.
#
# You can consider increasing the max_parallel_jobs and train_max_run and train_max_wait parameters to allow for finding better hyperparameters, and allow additional tuning of the hyperparameters.
#
#
# +
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region_name=region,
repo_name='forecasting-deepar')
deepar = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
train_use_spot_instances=True, # use spot instances
train_max_run=3600, # max training time in seconds
train_max_wait=3600, # seconds to wait for spot instance
output_path='s3://{}/{}'.format(bucket, s3_output_path),
sagemaker_session=sess)
freq = 'D'
context_length = 30
deepar.set_hyperparameters(time_freq=freq,
context_length=str(context_length),
prediction_length=str(prediction_length))
hyperparameter_ranges = {'mini_batch_size': IntegerParameter(100, 400),
'epochs': IntegerParameter(200, 400),
'num_cells': IntegerParameter(30,100),
'likelihood': CategoricalParameter(['negative-binomial', 'student-T']),
'learning_rate': ContinuousParameter(0.0001, 0.1)}
objective_metric_name = 'test:RMSE'
tuner = HyperparameterTuner(deepar,
objective_metric_name,
hyperparameter_ranges,
max_jobs=10,
strategy='Bayesian',
objective_type='Minimize',
max_parallel_jobs=10,
early_stopping_type='Auto')
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train/'.format(bucket, prefix),
content_type='json')
s3_input_test = sagemaker.s3_input(s3_data='s3://{}/{}/test/'.format(bucket, prefix),
content_type='json')
tuner.fit({'train': s3_input_train, 'test': s3_input_test},
include_cls_metadata=False)
tuner.wait()
# -
# ---
#
# ## Host
#
# We use the [HyperParameterTuner](https://sagemaker.readthedocs.io/en/stable/tuner.html) to host the best model using a single ml.m4.xlarge instance.
best_tuning_job_name = tuner.best_training_job()
endpoint_name = tuner.deploy(initial_instance_count=1,
endpoint_name=best_tuning_job_name,
instance_type='ml.m4.xlarge',
wait=True)
# ### Evaluate
#
# To evaluate the model, we define a DeepARPredictor class. This class extends the [RealTimePredictor](https://sagemaker.readthedocs.io/en/stable/predictors.html) class. Implementing encode and decode functions helps us make requests using `pandas.Series` objects rather than raw JSON strings.
#
# +
class DeepARPredictor(sagemaker.predictor.RealTimePredictor):
def set_prediction_parameters(self, freq, prediction_length):
"""Set the time frequency and prediction length parameters. This method **must** be
called before being able to use `predict`.
Parameters:
freq -- string indicating the time frequency
prediction_length -- integer, number of predicted time points
Return value: none.
"""
self.freq = freq
self.prediction_length = prediction_length
def predict(self, ts, cat=None, encoding='utf-8', num_samples=100, quantiles=['0.1', '0.5', '0.9']):
"""Requests the prediction of for the time series listed in `ts`, each with the
(optional) corresponding category listed in `cat`.
Parameters:
ts -- list of `pandas.Series` objects, the time series to predict
cat -- list of integers (default: None)
encoding -- string, encoding to use for the request (default: 'utf-8')
num_samples -- integer, number of samples to compute at prediction time (default: 100)
quantiles -- list of strings specifying the quantiles to compute (default: ['0.1', '0.5', '0.9'])
Return value: list of `pandas.DataFrame` objects, each containing the predictions
"""
prediction_times = [x.index[-1] + x.index.freq for x in ts]
req = self.__encode_request(ts, cat, encoding, num_samples, quantiles)
res = super(DeepARPredictor, self).predict(req)
return self.__decode_response(res, prediction_times, encoding)
def __encode_request(self, ts, cat, encoding, num_samples, quantiles):
instances = [series_to_obj(ts[k], cat[k] if cat else None) for k in range(len(ts))]
configuration = {'num_samples': num_samples, 'output_types': ['quantiles'], 'quantiles': quantiles}
http_request_data = {'instances': instances, 'configuration': configuration}
return json.dumps(http_request_data).encode(encoding)
def __decode_response(self, response, prediction_times, encoding):
response_data = json.loads(response.decode(encoding))
list_of_df = []
for k in range(len(prediction_times)):
prediction_index = pd.date_range(start=prediction_times[k], freq=self.freq, periods=self.prediction_length)
list_of_df.append(pd.DataFrame(data=response_data['predictions'][k]['quantiles'], index=prediction_index))
return list_of_df
predictor = DeepARPredictor(endpoint=best_tuning_job_name,
sagemaker_session=sess,
content_type='application/json')
# -
# Now we can use the previously created `predictor` object. We will predict only the first few time series, and compare the results with the actual data we kept in the test set.
predictor.set_prediction_parameters(freq, prediction_length)
list_of_df = predictor.predict(violation_list_training[:5])
actual_data = violation_list[:5]
for k in range(len(list_of_df)):
plt.figure(figsize=(12,6), dpi=75, facecolor='w')
plt.ylabel('Violations')
plt.xlabel('Date')
plt.title('Chicago Speed Camera Violations:' + unique_addresses[k])
actual_data[k][-prediction_length-context_length:].plot(label='target')
p10 = list_of_df[k]['0.1']
p90 = list_of_df[k]['0.9']
plt.fill_between(p10.index, p10, p90, color='y', alpha=0.5,label='80% confidence interval')
list_of_df[k]['0.5'].plot(label='prediction median')
plt.legend()
plt.show()
# ---
#
# ### Clean-up
#
# At the end of this exercise, delete the endpoint to avoid accumulating charges in your account.
predictor.delete_endpoint(endpoint_name)
|
introduction_to_applying_machine_learning/deepar_chicago_traffic_violations/deepar_chicago_traffic_violations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="http://cocl.us/pytorch_link_top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
# </a>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
# <h1>Prebuilt Datasets and Transforms</h1>
# <h2>Table of Contents</h2>
# <p>In this lab, you will use a prebuilt dataset and then use some prebuilt dataset transforms.</p>
# <ul>
# <li><a href="#Prebuilt_Dataset">Prebuilt Datasets</a></li>
# <li><a href="#Torchvision">Torchvision Transforms</a></li>
# </ul>
# <p>Estimated Time Needed: <strong>10 min</strong></p>
#
# <hr>
# <h2>Preparation</h2>
# The following are the libraries we are going to use for this lab. The <code>torch.manual_seed()</code> is for forcing the random function to give the same number every time we try to recompile it.
# +
# These are the libraries will be used for this lab.
import torch
import matplotlib.pylab as plt
import numpy as np
torch.manual_seed(0)
# -
# This is the function for displaying images.
# +
# Show data by diagram
def show_data(data_sample, shape = (28, 28)):
plt.imshow(data_sample[0].numpy().reshape(shape), cmap='gray')
plt.title('y = ' + str(data_sample[1].item()))
# -
# <!--Empty Space for separating topics-->
# <h2 id="Prebuilt_Dataset">Prebuilt Datasets</h2>
# You will focus on the following libraries:
# +
# Run the command below when you do not have torchvision installed
# # !conda install -y torchvision
import torchvision.transforms as transforms
import torchvision.datasets as dsets
# -
# We can import a prebuilt dataset. In this case, use MNIST. You'll work with several of these parameters later by placing a transform object in the argument <code>transform</code>.
# +
# Import the prebuilt dataset into variable dataset
dataset = dsets.MNIST(
root = './data',
train = False,
download = True,
transform = transforms.ToTensor()
)
# -
# Each element of the dataset object contains a tuple. Let us see whether the first element in the dataset is a tuple and what is in it.
# +
# Examine whether the elements in dataset MNIST are tuples, and what is in the tuple?
print("Type of the first element: ", type(dataset[0]))
print("The length of the tuple: ", len(dataset[0]))
print("The shape of the first element in the tuple: ", dataset[0][0].shape)
print("The type of the first element in the tuple", type(dataset[0][0]))
print("The second element in the tuple: ", dataset[0][1])
print("The type of the second element in the tuple: ", type(dataset[0][1]))
print("As the result, the structure of the first element in the dataset is (tensor([1, 28, 28]), tensor(7)).")
# -
# As shown in the output, the first element in the tuple is a cuboid tensor. As you can see, there is a dimension with only size 1, so basically, it is a rectangular tensor.<br>
# The second element in the tuple is a number tensor, which indicate the real number the image shows. As the second element in the tuple is <code>tensor(7)</code>, the image should show a hand-written 7.
# <!--Empty Space for separating topics-->
# Let us plot the first element in the dataset:
# +
# Plot the first element in the dataset
show_data(dataset[0])
# -
# As we can see, it is a 7.
# Plot the second sample:
# +
# Plot the second element in the dataset
show_data(dataset[1])
# -
# <!--Empty Space for separating topics-->
# <h2 id="Torchvision"> Torchvision Transforms </h2>
# We can apply some image transform functions on the MNIST dataset.
# As an example, the images in the MNIST dataset can be cropped and converted to a tensor. We can use <code>transform.Compose</code> we learned from the previous lab to combine the two transform functions.
# +
# Combine two transforms: crop and convert to tensor. Apply the compose to MNIST dataset
croptensor_data_transform = transforms.Compose([transforms.CenterCrop(20), transforms.ToTensor()])
dataset = dsets.MNIST(root = './data', train = False, download = True, transform = croptensor_data_transform)
print("The shape of the first element in the first tuple: ", dataset[0][0].shape)
# -
# We can see the image is now 20 x 20 instead of 28 x 28.
# <!--Empty Space for separating topics-->
# Let us plot the first image again. Notice that the black space around the <b>7</b> become less apparent.
# +
# Plot the first element in the dataset
show_data(dataset[0],shape = (20, 20))
# +
# Plot the second element in the dataset
show_data(dataset[1],shape = (20, 20))
# -
# In the below example, we horizontally flip the image, and then convert it to a tensor. Use <code>transforms.Compose()</code> to combine these two transform functions. Plot the flipped image.
# +
# Construct the compose. Apply it on MNIST dataset. Plot the image out.
fliptensor_data_transform = transforms.Compose([transforms.RandomHorizontalFlip(p = 1),transforms.ToTensor()])
dataset = dsets.MNIST(root = './data', train = False, download = True, transform = fliptensor_data_transform)
show_data(dataset[1])
# -
# <!--Empty Space for separating topics-->
# <h3>Practice</h3>
# Try to use the <code>RandomVerticalFlip</code> (vertically flip the image) with horizontally flip and convert to tensor as a compose. Apply the compose on image. Use <code>show_data()</code> to plot the second image (the image as <b>2</b>).
# +
# Practice: Combine vertical flip, horizontal flip and convert to tensor as a compose. Apply the compose on image. Then plot the image
# Type your code here
# -
# Double-click __here__ for the solution.
# <!--
# my_data_transform = transforms.Compose([transforms.RandomVerticalFlip(p = 1), transforms.RandomHorizontalFlip(p = 1), transforms.ToTensor()])
# dataset = dsets.MNIST(root = './data', train = False, download = True, transform = my_data_transform)
# show_data(dataset[1])
# -->
# <!--Empty Space for separating topics-->
# <a href="http://cocl.us/pytorch_link_bottom">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
# </a>
# <h2>About the Authors:</h2>
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/"><NAME></a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
# Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/"><NAME></a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
|
EdX/IBM DL0110EN - Deep Learning with Python and PyTorch/1.3.2_pre-Built Datasets_and_transforms_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
import argparse
from fastai.conv_learner import *
# -
from dataloader import PartDataset
from pointnet import PointNetDenseCls2
# +
num_points = 512
data_dir = 'E:/PROJECTS/NTUT/PointNet/pointnet1_pytorch/DATA/Shapenet/shapenetcore_partanno_segmentation_benchmark_v0'
trn_ds = PartDataset(root=data_dir, npoints=num_points, classification=False, class_choice=['Airplane'])
val_ds = PartDataset(root=data_dir, npoints=num_points, classification=False, class_choice=['Airplane'], train=False)
num_classes = 5
trn_dl = DataLoader(trn_ds, batch_size=4, shuffle=True, num_workers=0, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size=32, shuffle=False, num_workers=0, pin_memory=True)
tes_dl = None
model_data = ModelData('DATA/Shapenet/shapenetcore_partanno_segmentation_benchmark_v0', trn_dl, val_dl)
# +
model = PointNetDenseCls2(num_points=num_points, k=num_classes)
optimizer = optim.Adam
criterion = F.cross_entropy
learner = Learner(model_data, BasicModel(to_gpu(model)), opt_fn=optimizer, crit=criterion)
# -
learner.lr_find()
learner.sched.plot()
lr = 5e-3
learner.fit(lrs=lr, n_cycle=2, cycle_len=1)
# pred_logits, y = learner.TTA(is_test=False) # is_test=False -> test on validation dataset; is_test=True -> test on test dataset
pred_log = learner.predict(is_test=False)
pred_labels = np.argmax(pred_log, axis=1)
pred_probs = np.exp(pred_log) # measure probability values
acc = accuracy_np(preds=pred_probs, targs=model_data.val_ds.targs) # evaluate accuracy(only available on valid dataset)
metrics.log_loss(y_true=model_data.val_y, y_pred=pred_probs) # evaluate error(only available on valid dataset)
pred_logits, y = learner.TTA(is_test=False)
|
pointnet_fastai.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:rqalpha]
# language: python
# name: conda-env-rqalpha-py
# ---
# +
#coding=utf-8
from rqalpha.api import *
import traceback
from datetime import *
from sqlalchemy import create_engine
import pandas as pd
from rqalpha.utils.logger import user_log
def dependency():
return []
def market(market=90):
if market == 83:
return 'XSHG'
elif market == 90:
return 'XSHE'
else:
return ""
def compute(startdt,enddt,context):
'''
PE
:param startdt:
:param enddt:
:return:
'''
#context.config 对应配置的extra部分
jydbConf = context.config.jydb
_jyConnStr = "mysql+pymysql://%s:%s@%s:%s/%s"%(jydbConf.user,jydbConf.passwd,jydbConf.host,jydbConf.port,jydbConf.db)
engine = create_engine(_jyConnStr)
_category = [1, ]
_sectors = [1, 2, 6]
_sql = "SELECT p.TradingDay as date,p.TotalMV as value,a.SecuCode as code,a.SecuMarket" \
" FROM LC_DIndicesForValuation as p inner join secumain as a "\
"on a.innerCode=p.innerCode where a.SecuMarket in (83,90) " \
"and a.SecuCategory in (%s) and a.ListedSector in (%s) " \
"and a.ListedState!=9 and p.TradingDay between '%s' and '%s' order by p.TradingDay asc" % (
",".join([str(i) for i in _category]), ",".join([str(i) for i in _sectors])
,startdt.strftime('%Y-%m-%d'),
enddt.strftime('%Y-%m-%d'))
# print(_sql)
_res = pd.read_sql(sql=_sql,con = engine)
# market = {90:"XSHE",83:"XSHG"}
_res.code = _res.code + "." + _res.SecuMarket.apply(market)
_res = _res.drop(['SecuMarket'],axis=1).set_index(['date','code']).unstack(level=-1)
_res.columns = _res.columns.droplevel(level=0)
return _res
# -
# test
config= {"extra":{"jydb":{"host":"172.18.44.5","port":3306,"user":"liangh","passwd":"<PASSWORD>","db":"jydb"}}
,"factor_data_path":"../../factor_data","factor_data_init_date": "2017-01-01"}
from rqalpha.utils import RqAttrDict
conf = RqAttrDict(config)
from rqalpha.mod.rqalpha_mod_alphaStar_factors.factor_context import FactorContext
context = FactorContext(conf)
context.registerDepending(dependency())
res = compute(datetime(2017,1,1),datetime(2017,2,1),context)
res
# +
# evaluateFileDemo
from rqalpha.mod.rqalpha_mod_alphaStar_factors import evaluate_file
config = {
"base": {
"start_date": "2010-01-01",
"end_date": "2018-04-01",
},
}
factor_file_path = "./market_value.ipynb"
evaluate_file(factor_file_path=factor_file_path,config=config,config_file = "../config_factor.yml")
|
ipynbs/factors/market_value.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# To enable plotting graphs in Jupyter notebook
# %matplotlib inline
# +
# Numerical libraries
import numpy as np
# Import Linear Regression machine learning library
from sklearn.linear_model import LinearRegression
# to handle data in form of rows and columns
import pandas as pd
# importing ploting libraries
import matplotlib.pyplot as plt
import matplotlib.style
plt.style.use('classic')
#importing seaborn for statistical plots
import seaborn as sns
# -
# reading the CSV file into pandas dataframe
mpg_df = pd.read_csv("car-mpg.csv")
# Check top few records to get a feel of the data structure
mpg_df.head(50)
# drop the car name column as it is useless for the model
mpg_df = mpg_df.drop('car_name', axis=1)
mpg_df
# Replace the numbers in categorical variables with the actual country names in the origin col
mpg_df['origin'] = mpg_df['origin'].replace({1: 'america', 2: 'europe', 3: 'asia'})
mpg_df
# Convert categorical variable into dummy/indicator variables. As many columns will be created as distinct values
# This is also kown as one hot coding. The column names will be America, Europe and Asia... with one hot coding
mpg_df = pd.get_dummies(mpg_df, columns=['origin'])
mpg_df
#Lets analysze the distribution of the dependent (mpg) column
mpg_df.describe().transpose()
mpg_df.dtypes
# +
# Note: HP column is missing the describe output. That indicates something is not right with that column
# +
#Check if the hp column contains anything other than digits
# run the "isdigit() check on 'hp' column of the mpg_df dataframe. Result will be True or False for every row
# capture the result in temp dataframe and dow a frequency count using value_counts()
# There are six records with non digit values in 'hp' column
temp = pd.DataFrame(mpg_df.hp.str.isdigit()) # if the string is made of digits store True else False in the hp column
# in temp dataframe
temp[temp['hp'] == False] # from temp take only those rows where hp has false
# -
# On inspecting records number 32, 126 etc, we find "?" in the columns. Replace them with "nan"
#Replace them with nan and remove the records from the data frame that have "nan"
mpg_df = mpg_df.replace('?', np.nan)
# +
#Let us see if we can get those records with nan
mpg_df[mpg_df.isnull().any(axis=1)]
# +
# There are various ways to handle missing values. Drop the rows, replace missing values with median values etc.
# +
#of the 398 rows 6 have NAN in the hp column. We will drop those 6 rows. Not a good idea under all situations
#note: HP is missing becauses of the non-numeric values in the column.
#mpg_df = mpg_df.dropna()
# -
#instead of dropping the rows, lets replace the missing values with median value.
mpg_df.median()
# +
# replace the missing values in 'hp' with median value of 'hp' :Note, we do not need to specify the column names
# every column's missing value is replaced with that column's median respectively (axis =0 means columnwise)
#mpg_df = mpg_df.fillna(mpg_df.median())
mpg_df = mpg_df.apply(lambda x: x.fillna(x.median()),axis=0)
# -
mpg_df.dtypes
mpg_df['hp'] = mpg_df['hp'].astype('float64') # converting the hp column from object / string type to float
mpg_df.describe()
# +
# Let us do a correlation analysis among the different dimensions and also each dimension with the dependent dimension
# This is done using scatter matrix function which creates a dashboard reflecting useful information about the dimensions
# The result can be stored as a .png file and opened in say, paint to get a larger view
mpg_df_attr = mpg_df.iloc[:, 0:10]
#axes = pd.plotting.scatter_matrix(mpg_df_attr)
#plt.tight_layout()
#plt.savefig('d:\greatlakes\mpg_pairpanel.png')
sns.pairplot(mpg_df_attr, diag_kind='kde') # to plot density curve instead of histogram
#sns.pairplot(mpg_df_attr) # to plot histogram, the default
# +
#The data distribution across various dimensions except 'Acc' do not look normal
#Close observation between 'mpg' and other attributes indicate the relationship is not really linear
#relation between 'mpg' and 'hp' show hetroscedacity... which will impact model accuracy
#How about 'mpg' vs 'yr' surprising to see a positive relation
# +
# Copy all the predictor variables into X dataframe. Since 'mpg' is dependent variable drop it
X = mpg_df.drop('mpg', axis=1)
X = X.drop({'origin_america', 'origin_asia' ,'origin_europe'}, axis=1)
# Copy the 'mpg' column alone into the y dataframe. This is the dependent variable
y = mpg_df[['mpg']]
# +
#Let us break the X and y dataframes into training set and test set. For this we will use
#Sklearn package's data splitting function which is based on random function
from sklearn.model_selection import train_test_split
# +
# Split X and y into training and test set in 75:25 ratio
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30 , random_state=1)
# +
# invoke the LinearRegression function and find the bestfit model on training data
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
# +
# Let us explore the coefficients for each of the independent attributes
for idx, col_name in enumerate(X_train.columns):
print("The coefficient for {} is {}".format(col_name, regression_model.coef_[0][idx]))
# +
# Let us check the intercept for the model
intercept = regression_model.intercept_[0]
print("The intercept for our model is {}".format(intercept))
# -
regression_model.score(X_train, y_train)
# +
# Model score - R2 or coeff of determinant
# R^2=1–RSS / TSS = RegErr / TSS
regression_model.score(X_test, y_test)
# +
# So the model explains 85% of the variability in Y using X
# +
# ---------------------------------- Using Statsmodel library to get R type outputs -----------------------------
# +
# R^2 is not a reliable metric as it always increases with addition of more attributes even if the attributes have no
# influence on the predicted variable. Instead we use adjusted R^2 which removes the statistical chance that improves R^2
# Scikit does not provide a facility for adjusted R^2... so we use
# statsmodel, a library that gives results similar to
# what you obtain in R language
# This library expects the X and Y to be given in one single dataframe
data_train = pd.concat([X_train, y_train], axis=1)
data_train.head()
# -
import statsmodels.formula.api as smf
lm1 = smf.ols(formula= 'mpg ~ cyl+disp+hp+wt+acc+yr+car_type', data = data_train).fit()
lm1.params
print(lm1.summary()) #Inferential statistics
# +
# Let us check the sum of squared errors by predicting value of y for test cases and
# subtracting from the actual y for the test cases
mse = np.mean((regression_model.predict(X_test)-y_test)**2)
# +
# underroot of mean_sq_error is standard deviation i.e. avg variance between predicted and actual
import math
math.sqrt(mse)
# +
# so there is avg of 3.0 (roundoff) mpg difference from real mpg on an avg
# -
# predict mileage (mpg) for a set of attributes not in the training or test set
y_pred = regression_model.predict(X_test)
# +
# Since this is regression, plot the predicted y value vs actual y values for the test data
# A good model's prediction will be close to actual leading to high R and R2 values
#plt.rcParams['figure.dpi'] = 500
plt.scatter(y_test['mpg'], y_pred)
# +
# ------------------------------------------------- ITERATION 2 ---------------------------------------------------
# -
# How do we improve the model? the R^2 is .844, how do we improve it
# The indpendent attributes have different units and scales of measurement
# It is always a good practice to scale all the dimensions using z scores or someother methode to address the problem of different scales
# +
from scipy.stats import zscore
mpg_df_scaled = mpg_df.apply(zscore)
# +
#convert the numpy array back into a dataframe
mpg_df_scaled = pd.DataFrame(mpg_df_scaled, columns=mpg_df.columns)
# +
#browse the contents of the dataframe. Check that all the values are now z scores
mpg_df_scaled
# +
# Copy all the predictor variables into X dataframe. Since 'mpg' is dependent variable drop it
X = mpg_df_scaled.drop('mpg', axis=1)
X = X.drop({'origin_america', 'origin_asia' ,'origin_europe'}, axis=1)
# Copy the 'mpg' column alone into the y dataframe. This is the dependent variable
y = mpg_df_scaled[['mpg']]
# +
# Split X and y into training and test set in 75:25 ratio
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
# +
# invoke the LinearRegression function and find the bestfit model on training data
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
# +
# Let us explore the coefficients for each of the independent attributes
for idx, col_name in enumerate(X_train.columns):
print("The coefficient for {} is {}".format(col_name, regression_model.coef_[0][idx]))
# +
intercept = regression_model.intercept_[0]
print("The intercept for our model is {}".format(intercept))
# +
# Model score - R2 or coeff of determinant
# R^2=1–RSS / TSS
regression_model.score(X_test, y_test)
# +
# Let us check the sum of squared errors by predicting value of y for training cases and
# subtracting from the actual y for the training cases
mse = np.mean((regression_model.predict(X_test)-y_test)**2)
# +
# underroot of mean_sq_error is standard deviation i.e. avg variance between predicted and actual
import math
math.sqrt(mse)
# -
# predict mileage (mpg) for a set of attributes not in the training or test set
y_pred = regression_model.predict(X_test)
# Since this is regression, plot the predicted y value vs actual y values for the test data
# A good model's prediction will be close to actual leading to high R and R2 values
plt.scatter(y_test['mpg'], y_pred)
# +
#---------------------------------------Iteration 4 (try to fit quadratic curves) -------------------------------------------------------
# +
# Since the scatter matrix indicated a non-linear reverse relation between mpg and disp, hp, wt...
# reflecting that below. Observe the - infront of the disp, hp and wt attributes to indicte negative relation and raised to power 1/2
# may be raised to power 2 to check....
# Convert the year column to age by subtracting the value from year 2000 (anchor)
mpg_df_quadratic = pd.DataFrame([mpg_df["mpg"],mpg_df["cyl"], -(1/mpg_df["disp"]**1.1), -(1/mpg_df["hp"]**1.2), -(1/mpg_df["wt"]**1.3), (mpg_df["acc"]**1.1)]).T
mpg_df_quadratic["age"]= ((2000 - mpg_df["yr"] )- 1900)
# -
mpg_df_quadratic.head()
# +
# Copy all the predictor variables into X dataframe. Since 'mpg' is dependent variable drop it
X = mpg_df_quadratic.drop('mpg', axis=1)
# Copy the 'mpg' column alone into the y dataframe. This is the dependent variable
y = mpg_df_quadratic[['mpg']]
# Split X and y into training and test set in 75:25 ratio
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
# +
# invoke the LinearRegression function and find the bestfit model on training data
#regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
# +
# Let us check the sum of squared errors by predicting value of y for test cases and
# subtracting from the actual y for the test cases
mse = np.mean((regression_model.predict(X_train)-y_train)**2)
# +
# underroot of mean_sq_error is standard deviation i.e. avg variance between predicted and actual
import math
math.sqrt(mse)
# +
# Model score - R2 or coeff of determinant
# R^2=1–RSS / TSS
regression_model.score(X_train, y_train)
# -
# predict mileage (mpg) for a set of attributes not in the training or test set
y_pred = regression_model.predict(X_test)
# Since this is regression, plot the predicted y value vs actual y values for the test data
# A good model's prediction will be close to actual leading to high R and R2 values
plt.scatter(y_test['mpg'], y_pred)
# +
# Model score - R2 or coeff of determinant
# R^2=1–RSS / TSS
regression_model.score(X_test, y_test)
# -
lm1 = smf.ols(formula= 'mpg ~ cyl-disp^2-hp^2+wt^2+acc+yr+car_type', data = data_train).fit()
lm1.params
print(lm1.summary())
|
foundation/applied-statistics/class_material_day_3/Linear_regression_mpg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pdb
import os
import json
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as tf_bk
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import (Adam, Nadam, )
from tensorflow.keras.utils import plot_model
from hypermodel_utils import load_args, load_data_chunks
from sklearn.model_selection import StratifiedShuffleSplit
from metrics import margin_loss
import mlflow
from mlflow import keras as mlflow_keras
from promoter_data import PromoterData, DataChunk
from my_generator import AugmentedGeneratorMultipleInputs
import mymodels.parent_models as mymodels
from ml_logs import MlLogs
from promoter_utils import (
train_test,
get_model,
get_test_stats,
)
from hypermodel_utils import (
load_args,
)
import pprint
# Set seeds
np.random.seed(1337)
if int(str(tf.__version__).split('.')[0]) >= 2:
from tensorflow import random as tf_random
tf_random.set_seed(3)
else:
from tensorflow import set_random_seed
set_random_seed(3)
# + pycharm={"name": "#%%\n"}
# Load arguments and experiment configurations
config_file_path = 'bacillus_exp_07.yaml'
args_str = '--config {}'.format(config_file_path)
args = load_args(args_str)
# + pycharm={"name": "#%%\n"} endofcell="--"
# -
for arg in vars(args):
print(arg, getattr(args, arg))
# --
|
promoter_project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install "pydicom<2.0.0"
# !pip install dicompyler-core==0.5.5
# +
from urllib import request
import numpy as np
import matplotlib.pyplot as plt
from dicompylercore import dicomparser, dvh, dvhcalc
# +
dose_url = 'https://zenodo.org/record/4042842/files/RD.2.16.840.1.114337.1.1.1600065398.2_Anonymised.dcm?download=1'
structure_url = 'https://zenodo.org/record/4042842/files/RS.1.2.840.10008.5.1.4.1.1.481.3.1600145017_Anonymised.dcm?download=1'
dose_filepath = 'dose.dcm'
structure_filepath = 'structure.dcm'
# -
request.urlretrieve(dose_url, dose_filepath)
request.urlretrieve(structure_url, structure_filepath)
dp = dicomparser.DicomParser(structure_filepath)
structures = dp.GetStructures()
# +
# structures
# -
structure_names_to_pull = ['11', '12', '13', '14', '15', '16', '17', '18']
name_to_id = {
item['name']: key
for key, item in structures.items()
}
name_to_id
# +
ids_to_pull = [
name_to_id[name]
for name in structure_names_to_pull
]
ids_to_pull
# -
rtdose = dicomparser.DicomParser(dose_filepath)
rtdose.GetDVHs()
# +
dvhs = []
for name in structure_names_to_pull:
structure_id = name_to_id[name]
dvhs.append(dvh.DVH.from_dicom_dvh(rtdose.ds, structure_id))
# -
dd = rtdose.GetDoseData()
dose_dataset = pydicom.read_file(dose_filepath, force=True)
structure_dataset = pydicom.read_file(structure_filepath, force=True)
# +
structure_names = [
item.ROIName for item in structure_dataset.StructureSetROISequence
]
structure_names
# -
(dose_z, dose_y, dose_x), dose = pymedphys.dicom.zyx_and_dose_from_dataset(dose_dataset)
# +
def pull_coords_from_contour_sequence(contour_sequence):
contours_by_slice_raw = [item.ContourData for item in contour_sequence]
x = [np.array(item[0::3]) for item in contours_by_slice_raw]
y = [np.array(item[1::3]) for item in contours_by_slice_raw]
z = [np.array(item[2::3]) for item in contours_by_slice_raw]
return x, y, z
def get_roi_contour_sequence_by_name(structure_name, dcm_struct):
ROI_name_to_number_map = {
structure_set.ROIName: structure_set.ROINumber
for structure_set in dcm_struct.StructureSetROISequence
}
ROI_number_to_contour_map = {
contour.ReferencedROINumber: contour
for contour in dcm_struct.ROIContourSequence
}
try:
ROI_number = ROI_name_to_number_map[structure_name]
except KeyError:
raise ValueError("Structure name not found (case sensitive)")
roi_contour_sequence = ROI_number_to_contour_map[ROI_number]
return roi_contour_sequence
def pull_structure(structure_name, dcm_struct):
roi_contour_sequence = get_roi_contour_sequence_by_name(structure_name, dcm_struct)
contour_sequence = roi_contour_sequence.ContourSequence
x, y, z = pull_coords_from_contour_sequence(contour_sequence)
return x, y, z
# -
stucture_x, structure_y, structure_z = pull_structure('11', structure_dataset)
z_value = np.unique(structure_z[0])
z_value
index = np.where(dose_z == z_value)[0][0]
index
# +
plt.contourf(dose_x, dose_y, dose[index,:,:], 100)
plt.plot(stucture_x[0], structure_y[0])
plt.ylim([-50, -350])
plt.axis('equal')
# +
position = dose_dataset.ImagePositionPatient
spacing = dose_dataset.PixelSpacing
orientation = dose_dataset.ImageOrientationPatient
dx, dy = spacing
Cx, Cy, *_ = position
Ox, Oy = orientation[0], orientation[4]
r = (y - Cy) / dy * Oy
c = (x - Cx) / dx * Ox
# -
dose_dataset.pixel_array.shape
skimage.draw.polygon2mask(dose_dataset.pixel_array.shape, np.array(zip(r, c)))
|
prototyping/lectures/archive/01-extract-mean-doses-from-structure.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xcpp14
// ---
// # The `Array` class
// The `Array` template class is very similar to `Matrix` class. However, the former is capable of perform coefficient-wise operations in a fashion that does not makes sense for Linear Algebra, while the last must respect all Linear Algebra requirements. I will provide a brief overview of `Array` class and its the main features.
//
// As usual, all the text is heavily based -- sometimes entirily copied -- from the [Eigen's documentation](https://eigen.tuxfamily.org/dox/group__TutorialArrayClass.html).
// First of all, let's include our necessary libs:
// +
#include <iostream>
#include <eigen3/Eigen/Dense>
using namespace Eigen;
// -
// ## Array types
// As aforecited, the `Array` class is quite similar to `Matrix` class. The template parameters are the same and, like `Matrix`, the first three parameters are mandatory. Just a recall, the `Array` class has:
//
// ```C++
// Array<typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime>
// ```
//
// The last three parameters are omitted, but they are the same as for `Matrix` (see `matrix-class.ipynb`).
//
// Eigen provides some convenience `typedef`s for `Array`s as well. Nonetheless, there are some differences. For 1D arrays, the pattern `ArrayNt` is used, where `N` and `t` are the size and the scalar type, respectively. For 2D arrays, the pattern is `ArrayNNt`, explicit saying the size of the two dimensions. See some examples below. The initialization is the same as for `Matrix`.
// +
ArrayXd arr1 = ArrayXd::Random(3);
std::cout << "arr1 =\n" << arr1;
// +
Array33f arr2;
arr2 << 1, 2, 3,
4, 5, 6,
7, 8, 9;
std::cout << "arr2 =\n" << arr2;
// +
auto arr3 = (Array2f::Random(2)).eval();
std::cout << "arr3 =\n" << arr3;
// -
// ## Accessing values
// As for `Matrix`, (overloaded) parenthesis operator is provided to write and read the coefficients of an array.
// +
Array22f arr4 = Array22f::Random();
std::cout << "Element (1, 1) of arr4 is " << arr4(1, 1) << std::endl;
std::cout << "and the whole arr4 is:\n" << arr4;
// -
// And defining an array:
// +
ArrayXXd arr5(2, 2);
arr5(0, 0) = 2; arr5(0, 1) = -arr5(0, 0);
arr5(1, 0) = 1; arr5(1, 1) = arr5(0, 0) * arr5(0, 1) - arr5(1, 0);
std::cout << "arr5 =\n" << arr5;
// -
// ## Addition and subtraction
// This operations remain the same as for `Matrix`. The only requirement is that arrays under operations have the same size, so the addition or subtraction is performed coefficient-wise.
// +
ArrayXXf arr6(3, 3), arr7(3, 3);
arr6 << 1, 2, 3, 4, 5, 6, 7, 8, 9;
arr7 << 1, 2, 3,
1, 2, 3,
1, 2, 3;
std::cout << "arr6 + arr7 =\n" << arr6 + arr7;
// -
// A feature which is not directly available for `Matrix` is the addition of a scalar into an array. This is done in coefficient-wise manner. See:
std::cout << "arr6 - 2 =\n" << arr6 - 2;
// ## Array multiplication
// The multiplication of an `Array` by a scalar is the same as for `Matrix`. However, when you multiply two `Array`s, the behavior is not the same as if you multiply two `Matrix`s! For the last, multiplication is interpreted as "matrix product", while for `Array`s it is a coefficient-wise product. See below:
// +
Array22d arr8, arr9;
Matrix2d mat1, mat2;
// Array version
arr8 << 1, 2,
3, 4;
arr9 << 5, 6,
7, 8;
// Matrix version
mat1 << 1, 2,
3, 4;
mat2 << 5, 6,
7, 8;
// -
// * Array multiplication:
std::cout << "arr8 * arr9 =\n" << arr8 * arr9;
// * Matrix multiplication:
std::cout << "mat1 * mat2 =\n" << mat1 * mat2;
// Note that for array multiplication, the dimension of the arrays must match!
// ## Other coefficient-wise operations
// In fact, all operations over `Array`s are done coefficient-wise, including methods. Thus, every operation/method will be performed in a coefficient-wise manner. Check the example below:
// +
ArrayXf arr10 = ArrayXf::Random(5);
std::cout << "arr10 =\n" << arr10;
// -
// * Absolute value of the entries:
std::cout << "abs(arr10) =\n" << arr10.abs();
// * The square of the absolute values:
std::cout << "sqrt(abs(arr10)) =\n" << arr10.abs().sqrt();
// * The min values in each entry of two arrays:
// +
auto arr11 = arr10.abs().sqrt();
std::cout << "min entries =\n" << arr10.min(arr11);
// -
// A list with coefficient-wise operations can be found in the [Eigen documentation](https://eigen.tuxfamily.org/dox/group__QuickRefPage.html).
// ## Converting between `Array` and `Matrix` expressions
// Sometimes you may want to operate like `Array`, in a coefficient-wise fashion, and other times you may need to operate as `Matrix`, in a Linear Algebra context. If this is the case, Eigen provide methods to convert `Array` expressions in `Matrix` expressions and vice-versa. `Array` expressions have the method `.matrix()`, while `Matrix` expressions have `.array()`. Through these methods, you can access all necessary methods which are proper to each class! Let's see a quick example comparing multiplications:
// * A regular matrix multiplication:
// +
MatrixXf a(2, 2), b(2, 2);
a << 1, 2,
3, 4;
b << 5, 6,
7, 8;
std::cout << "a * b =\n" << a * b;
// -
// * A component-wise multiplication through convertion to array:
std::cout << "a * b =\n" << a.array() * b.array();
// * Alternative way to perform coefficient-wise operations with prefix `cwise` in `Matrix`'s methods:
std::cout << "a * b =\n" << a.cwiseProduct(b);
// This last example show a very useful way to perform coefficient-wise operations. Some of the coefficient-wise methods can be accessed by `Matrix` expressions just considering the prefix `cwise`. See the documentation for available methods.
//
// Analogously, you can convert `Array`s. For instance:
// +
ArrayXXf c(2, 2), d(2, 2);
c << 1, 2,
3, 4;
d << 5, 6,
7, 8;
std::cout << "c * d =\n" << c * d;
// -
// Now converted:
std::cout << "c * d =\n" << a.matrix() * b.matrix();
// Such conversions can occurs in any expressions. But be aware that Eigen forbids operations between `Array` objects and `Matrix` objects. You must convert all operands to a same object class.
// It also works in 1D arrays:
// +
ArrayXd test(2);
test << 1, 2;
std::cout << "test =\n" << test;
// -
std::cout << "test as matrix (vector) =\n" << test.matrix();
|
eigen/notebooks/04-array-class.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sympy import *
import matplotlib.pyplot as plt
import numpy as np
# +
alpha, gamma, a, b, c, d = symbols(
'alpha gamma a b c d', float=True
)
t = Symbol('t')
p = Function('p', is_real = true)(t)
D = Function('D', is_real = true)(p)
S = Function('S', is_real = true)(p)
D = -a*p + b
S = c*p + d
z = Function('z', is_real = true)(p)
z = D - S
class BasicOperationsForGivenODE:
"""
У конструкторі наведені допоміжні аргументи для автономного рівняння
p'(t) = alpha * F(z(p(t))),
де z(p) = D(p) - S(p) = (b-d)-(a+c)p, p = p(t), t >= 0
a, b, c, d > 0 - параметри лінійних функцій попиту та пропозиції
gamma > 0 таке, що p(0) = gamma
F така, що F(0) = 0, F(x) = y, sign(x) = sign(y)
"""
def __init__(self, F):
self.F = Function('F', is_real = true)(z)
self.F = F
self.diffeq = Eq(p.diff(t), alpha * self.F)
self.sol_non = dsolve(self.diffeq)
self.sol_chy = dsolve(self.diffeq, ics={p.subs(t, 0): gamma})
# Надалі:
# s - набір чисел для кожного параметра.
# (Можна знехтувати, якщо потрібно отримати загальний результат)
# chy - чи врахувати початкову умову автономного рівняння чи ні
def get_solution(self, chy: bool = False, s: dict = {}):
"""
Метод розв'язує задане ДР з урахуванням (або без) задачі Коші
"""
sol = self.sol_chy if chy else self.sol_non
if isinstance(sol, Equality):
return sol.subs(s)
for i, sl in enumerate(sol):
sol[i] = sl.subs(s)
return sol
def get_equation(self, s: dict = {}):
"""
Метод повертає загальний вигляд диференціального рівняння
з урахуванням вхідних даних
"""
return factor(self.diffeq).subs(s)
def get_stable_points(self, s: dict = {}):
"""
Метод розв'язує алгебричне рівняння відносно функції ціни,
повертає точку рівноваги (розв'язок)
"""
return solveset(z, p).subs(s)
@staticmethod
def rhs_solution_lambdify(diffeq_sol, input_array, alph, params_dict, chy: bool = True):
"""
Метод для перетворення розв'язку ДР на функцію, яку можна
використовувати на масивах бібліотеки numpy
"""
#sol = self.sol_chy if chy else self.sol_non
sol = diffeq_sol
sol_rhs = sol.rhs.subs(params_dict).subs(
{alpha: alph}
)
return lambdify(t, sol_rhs, 'numpy')(input_array)
# -
def fast_plot(x, array_of_alphas, case_string, ode_cls, sol = None):
"""
Функція забезпечує зображення графіків функції p(t)
в залежності від можливих параметрів адаптації з множини
array_of_alphas
"""
plt.figure(figsize=(16, 10))
plt.grid(1)
plt.xlabel("Time, t", fontdict={'fontsize': 14})
plt.ylabel("Price, p(t)", fontdict={'fontsize': 14})
diffeq_sol = ode_cls.get_solution(chy = True, s = {}) if sol is None else sol
for alph in array_of_alphas:
plt.plot(x, ode_cls.rhs_solution_lambdify(diffeq_sol, x, alph, params_cases[case_string]), label='α = %.2f' % alph)
plt.legend(loc='upper right', prop={'size': 16})
plt.title(
"Price behaviour depending on adaptation coefficient change",
fontdict={'fontsize': 16}
)
plt.show()
# +
t_space = np.linspace(0, 1.5, 100)
gamma_global = 10
alphas = [0.25, 1, 1.75]
params_cases = {
'case1': {a: 10, b: 15, c: 5, d: 10, gamma: gamma_global},
'case2': {a: 8, b: 12, c: 8, d: 10, gamma: gamma_global},
'case3': {a: 6, b: 5, c: 7, d: 5, gamma: gamma_global}
}
F1 = Function('F1', is_real = true)(z)
F1 = z
F2 = Function('F2', is_real = true)(z)
F2 = z*z*z
# -
sd = BasicOperationsForGivenODE(F1)
F1
sd.get_solution({})
fast_plot(t_space, alphas, 'case1', sd)
hd = BasicOperationsForGivenODE(F2)
F2
sol1, sol2 = hd.get_solution(chy=True, s={})
sol1
Eq(p.diff(t), alpha*(b-a*p)**3)
ss = dsolve(Eq(p.diff(t), alpha*(b-a*p)**3), p)
ss[0]
ss[1]
ssc = dsolve(Eq(p.diff(t), (b-a*p)**3), p, ics = {p.subs(t, 0): 10})
ssc[0]
(1/(2*(a+c)**3))*((gamma-((b-d)/(a+c)))**(-2))
diffeq_sol_z3 = Eq(p, ((b-d)/(a+c)) + 1/sqrt(2*((a+c)**3)*(alpha*t*(1/(2*(a+c)**3))*((gamma-((b-d)/(a+c)))**(-2)))))
diffeq_sol_z3
t_space[0] = t_space[0] - 0.0001
fast_plot(t_space, alphas, 'case1', sd, sol=diffeq_sol_z3)
|
diffeq/diffeq.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Numpy...
import numpy as np
# <img src="https://image.slidesharecdn.com/numpytalksiam-110305000848-phpapp01/95/numpy-talk-at-siam-17-728.jpg?cb=1299283822">
# # Boolen Operation
a = np.arange(1,28).reshape((3,3,3))
a
a % 3 == 0
print(a)
a[a%3==0] #InLine Operation
print(a)
a[(a%3==0) & (a%5==0)] #InLine Operation
print(a)
a[~a%3==0]
print(a)
a[~(a%3==0)]
print(a)
a[a%3!=0]
a = np.arange(1,7).reshape((2,3,1))
a
# ## Fancy Slicing
a = np.arange(10,3*3*3+10).reshape(3,3,3)
print(a)
print(a)
a[1,2,0] #simple Slicing
print(a)
a[::-1]
print(a)
a[:,::-1,:]
print(a)
a[[1,2,0]] # Fancy slicing ---> rearranging #InLine Operation
print(a)
a[[0,1,2],[0,1,2]]
print(a)
a[[0,1,2],[0,1,2],]
print(a)
a[[0,1,2],[0,2,1],]
print(a)
a[[1,2,0]][:,[2,1,0]]
print(a)
a[[1,2,0]][:,[-1,0]]
print(a)
a[[1,2,0]][:,[-1]]
print(a)
a[[1,2,0]][:,::-1]
print(a)
a[[1,2,0]][:,-1]
print(a)
a[[1,2,0],:,::-1]
# +
names = np.array(["Haseeb", "Qasim","Hamza","Ali","Haseeb","Abid"])
data = np.random.randn(6,4)
print(names, data, sep="\n\n\n")
# -
names == "Haseeb"
data[[ True, False, False, False, True, False]]
data[names=="Haseeb"]
data[names=="Ali"]
data[(names=="Haseeb") | (names=="Abid")]
data[(names!="Haseeb") | (names=="Abid")]
data[(names!="Haseeb") | (names!="Abid")]
data[(names=="Haseeb") | (names=="Abid")][:,-1]
data[(names=="Haseeb") | (names=="Abid")][:,[-1]]
data[(names=="Haseeb") | (names=="Abid")][:,[0, -1]]
l = [1,3,5,3,5]
l[:3]
l[:3] = 5 #you can't assign more than 1 value without iteration in list
l = [1,3,5,3,5]
l[0] = 5
l
l = np.array([1,3,5,3,5])
l[:3]
l[:3] = 5 #you can assign more than 1 value without iteration in np-array
l
l
# +
a = np.arange(8*4).reshape((8,4))
print(a)
for i in range(8):
a[i] = i
print(a)
# +
a = np.arange(8*4).reshape((8,4))
print(a)
for i in range(len(a)):
a[i] = i
print(a)
# +
a = np.arange(8*4).reshape((8,4))
print(a)
for i in range(a.shape[0]):
a[i] = i
print(a)
# +
a = np.arange(8*4).reshape((8,4))
print(a)
for i, _ in enumerate(a):
a[i] = i
print(a)
|
Day_33(Numpy).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
module_path = os.path.abspath('..')
sys.path.append(module_path)
from lc.measurements import CurveMeasurements
from lc.variance import ErrorMeanVarianceEstimator
import matplotlib
import matplotlib.pyplot as plt
# Load error measurements using `CurveMeasurements`. See `notebooks/measurements.ipynb` for more about reading error measurements.
curvems = CurveMeasurements()
curvems.load_from_json('../data/no_pretr_ft.json')
print(curvems)
# `ErrorMeanVarianceEstimator` computes the mean and variance estimates needed for computing a learning curve. Two kinds of variances are computed for each train set size -
# - `variance`: unbiased sample variance of errors
# - `smoothed_variance`: variance estimated by fitting $v_0 + v_1/n$ to sample variances. Here, $n$ denotes the number of training samples. We set $v_0$, the variance at infinite training samples, to a small value such as $0.02$ and then estimate $v_1$ using a weighted least squares fit. Since variance of sample variance is inversely proportional to $N-1$, where $N$ is the number of samples used to compute the sample variance, we use `num_ms`$-1$ as weights.
err_mean_var_estimator = ErrorMeanVarianceEstimator(v_0=0.02)
err_mean_var_estimator.estimate(curvems)
print(curvems)
print('v_0:',err_mean_var_estimator.v_0)
print('v_1:',err_mean_var_estimator.v_1)
err_mean_var_estimator.visualize(curvems)
|
notebooks/variance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#<NAME>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('ex1data2.txt',header=None)
X=data.iloc[:,[0,1]]
Y=data.iloc[:,[2]]
data.head(3)# first column size of flat,second column no of bedrooms,third column price of the flat
X=np.array(X)
X=(X-np.mean(X))/np.std(X)
Y=np.array(Y)
m=len(Y)
ones=np.ones((m,1))
X=np.hstack((ones,X))
alpha=0.01
theta=np.zeros((X.shape[1],1))
iteration=400
# gradient calculation
def grad_cal(theta,X,Y):
temp=(X.dot(theta)-Y)
temp=X.T.dot(temp)
return temp
# theta updata and cost calculation
cost=np.zeros((iteration,1))
for i in range(iteration):
theta_old=grad_cal(theta,X,Y)
theta+=-(alpha/m)*theta_old
# print(theta)
z=X.dot(theta)-Y
cost[i,0]=np.sum(np.power(z,2))/(2*m)
##Plotting the cost with each iteraion
xa=np.arange(iteration)
plt.scatter(xa,cost)
plt.show
## Plotting the predicted price VS Original prices
xa=np.arange(m)
plt.plot(xa,Y)
plt.plot(xa,X.dot(theta))
plt.show
|
Multivariate Linear Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
dirname = './data/'
files = [f for f in os.listdir(dirname) if f.endswith('.csv')]
files
dfs = []
for f in files:
day = f[:-4]
df = pd.read_csv(dirname + f, sep=';')
df['customer_no'] = day + '_' + df['customer_no'].astype(str)
dfs.append(df)
df = pd.concat(dfs)
df.shape
df.head()
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.set_index('timestamp', inplace=True)
df.head(2)
# Add missing timeslots for each customer & add 'prev_location'
df = df.groupby('customer_no')[['location']].resample(rule='60S').ffill()
df['prev_location'] = df['location'].shift()
df.head()
# Clean 'prev_location' column
mask1 = df['prev_location'] == 'checkout'
mask2 = df['location'] != 'checkout'
df.loc[mask1 & mask2, 'prev_location'] = np.nan
df['prev_location'].fillna('entrance', inplace=True)
df.head(10)
# Crosstab
TM = pd.crosstab(df['location'], df['prev_location'], normalize=1)
TM.sum()
# Final transition matrix
TM
# Longer version without crosstab
df['dummy'] = 1
TM_abs = df.groupby(['location', 'prev_location']).count().unstack()
TM_abs.fillna(0, inplace=True)
TM_abs
TM2 = TM_abs.apply(lambda x: x/x.sum(), axis=0)
TM2
TM2.sum()
filename = 'transition_probabilities.csv'
TM.to_csv(filename)
|
transition-probabilities.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: graco
# language: python
# name: graco
# ---
# +
from scipy.spatial.distance import squareform, pdist, cdist
from itertools import islice, combinations, product
from pyclustering.cluster.kmedoids import kmedoids
from collections import defaultdict
from sklearn.cluster import KMeans
from scipy.stats import hypergeom
from goatools import obo_parser
from functools import partial
import os
import time
import graco
import numpy as np
import pandas as pd
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
# +
# %matplotlib inline
sns.set()
pd.set_option("display.max_columns", 50)
DATA_DIRECTORY = "/home/clusterduck123/Desktop/git/supplements/data"
CPP_DIRECTORY = "/home/clusterduck123/Desktop/git/graco/graco/cpp"
RAW_DATA_DIRECTORY = f"{DATA_DIRECTORY}/raw_data"
PPI_DIRECTORY = f"{DATA_DIRECTORY}/PPI"
ANNOTATIONS_DIRECTORY = f"{DATA_DIRECTORY}/annotations"
MATRIX_DIRECTORY = f"{DATA_DIRECTORY}/matrix"
CLUSTERS_DIRECTORY = f"{DATA_DIRECTORY}/individual_clusters"
if not os.path.exists(DATA_DIRECTORY):
os.makedirs(DATA_DIRECTORY)
if not os.path.exists(RAW_DATA_DIRECTORY):
os.makedirs(RAW_DATA_DIRECTORY)
if not os.path.exists(PPI_DIRECTORY):
os.makedirs(PPI_DIRECTORY)
if not os.path.exists(ANNOTATIONS_DIRECTORY):
os.makedirs(ANNOTATIONS_DIRECTORY)
if not os.path.exists(MATRIX_DIRECTORY):
os.makedirs(MATRIX_DIRECTORY)
if not os.path.exists(CLUSTERS_DIRECTORY):
os.makedirs(CLUSTERS_DIRECTORY)
# -
# # Distances
# Preparation
PPI_nx = nx.read_edgelist(f"{PPI_DIRECTORY}/BioGRID_sc.txt")
GDV = graco.orbits(PPI_nx)
GCV = graco.coefficients(GDV)
# ## TVD
# ### Classic GCV
for tvd in ['0', '1', '2', '3']:
t1 = time.time()
D = cdist(np.array(GCV['-1'][tvd]), np.array(GCV['-1'][tvd]), graco.functions.tvd)
t2 = time.time()
print(f'{tvd}: {t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_tvd{tvd}.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# ### Higher order GCV
# +
t1 = time.time()
for (a,b) in [(0,0), (1,1), (3,3), (1,2), (2,1)]:
D = cdist(np.array(GCV[str(a)][str(b)]), np.array(GCV[str(a)][str(b)]), graco.functions.tvd)
t2 = time.time()
print(f'{a}-{b}:{t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_{a}tvd{b}.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# -
# ## Hellinger
# +
_SQRT2 = np.sqrt(2)
def hellinger(p, q):
return np.sqrt(np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) / _SQRT2
# -
# ### Classical GCV
for tvd in ['0', '1', '2', '3']:
t1 = time.time()
D = cdist(np.array(GCV['-1'][tvd]), np.array(GCV['-1'][tvd]), hellinger)
t2 = time.time()
print(f'{tvd}: {t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_hell{tvd}.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# ### Higher order GCV
t1 = time.time()
for (a,b) in [(0,0), (1,1), (3,3), (1,2), (2,1)]:
D = cdist(np.array(GCV[str(a)][str(b)]), np.array(GCV[str(a)][str(b)]), hellinger)
t2 = time.time()
print(f'{a}-{b}: {t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_{a}hell{b}.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# # Mahalanobis
# +
gcv = GCV.fillna(GCV.mean())
t1 = time.time()
D = cdist(np.array(gcv), np.array(gcv), 'mahalanobis')
t2 = time.time()
print(f'{t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_GCV_malahanobis_mean.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# +
gcv = GCV.T.dropna().T
t1 = time.time()
D = cdist(np.array(gcv), np.array(gcv), 'mahalanobis')
t2 = time.time()
print(f'{t2-t1:.2f}sec')
np.savetxt(f"{MATRIX_DIRECTORY}/sc_BioGRID_malahanobisNa.txt", D,
fmt='%.7f', header=' '.join(PPI_nx), comments='')
# -
# # Clustering
# +
METHOD = "kmedoids"
if not os.path.exists(f"{CLUSTERS_DIRECTORY}/{METHOD}"):
os.makedirs(f"{CLUSTERS_DIRECTORY}/{METHOD}")
# -
# ### Mahalanobis
# +
# Individual
MATRIX_NAME = "sc_BioGRID_GCV_malahanobis_mean"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_malahanobisNa"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# -
# ### TVD
# +
# Individual
MATRIX_NAME = "sc_BioGRID_all_tvd"
D0_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_tvd0.txt", delimiter=' ')
D1_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_tvd1.txt", delimiter=' ')
D2_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_tvd2.txt", delimiter=' ')
D3_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_tvd3.txt", delimiter=' ')
D00_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_0tvd0.txt", delimiter=' ')
D11_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_1tvd1.txt", delimiter=' ')
D33_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_3tvd3.txt", delimiter=' ')
D12_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_1tvd2.txt", delimiter=' ')
D21_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_2tvd1.txt", delimiter=' ')
D = np.nanmean([D0_df.values, D1_df.values, D2_df.values, D3_df.values,
D00_df.values, D11_df.values, D33_df.values, D12_df.values, D21_df.values], axis=0)
# +
D_df = pd.DataFrame(D, index=D0_df.columns, columns=D0_df.columns)
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_0tvd0"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_1tvd1"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_3tvd3"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_1tvd2"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_2tvd1"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# -
# ### Hellinger
# +
# Individual
MATRIX_NAME = "sc_BioGRID_all_hell"
D0_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_hell0.txt", delimiter=' ')
D1_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_hell1.txt", delimiter=' ')
D2_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_hell2.txt", delimiter=' ')
D3_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_hell3.txt", delimiter=' ')
D00_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_0hell0.txt", delimiter=' ')
D11_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_1hell1.txt", delimiter=' ')
D33_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_3hell3.txt", delimiter=' ')
D12_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_1hell2.txt", delimiter=' ')
D21_df = pd.read_csv(f"{MATRIX_DIRECTORY}/sc_BioGRID_2hell1.txt", delimiter=' ')
D = np.nanmean([D0_df.values, D1_df.values, D2_df.values, D3_df.values,
D00_df.values, D11_df.values, D33_df.values, D12_df.values, D21_df.values], axis=0)
D_df = pd.DataFrame(D, index=D0_df.columns, columns=D0_df.columns)
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_0hell0"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_1hell1"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_3hell3"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_1hell2"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# +
# Individual
MATRIX_NAME = "sc_BioGRID_2hell1"
D_df = pd.read_csv(f"{MATRIX_DIRECTORY}/{MATRIX_NAME}.txt", delimiter=' ')
D_df.index = D_df.columns
d = ~D_df.isna().all()
D_df = D_df[d].T[d]
int2gene = dict(enumerate(D_df.columns))
t1 = time.time()
for n_clusters in range(2,100):
initial_medoids = range(n_clusters)
kmedoids_instance = kmedoids(np.array(D_df), initial_medoids, data_type='distance_matrix')
kmedoids_instance.process()
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'w') as f:
for cluster in kmedoids_instance.get_clusters():
f.write(' '.join(map(int2gene.get,cluster)) + '\n')
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# -
# # Enrichement
# ## Annotation
# +
distance = "all_hell"
METHOD = "kmedoids"
MATRIX_NAME = f"sc_BioGRID_{distance}"
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_2.txt", 'r') as f:
clusters = list(map(str.split, f))
population = {gene for cluster in clusters for gene in cluster}
# -
# ### Preparation
# +
# Load PPI
PPI = nx.read_edgelist(f"{PPI_DIRECTORY}/BioGRID_sc.txt")
PPI_induced = PPI.subgraph(population)
# Load obo file
GO_FILENAME = "go-basic.obo"
GO_FILEPATH = f"{RAW_DATA_DIRECTORY}/{GO_FILENAME}"
go_dag = obo_parser.GODag(GO_FILEPATH)
all_BP_annotations_df = pd.read_csv(f"{ANNOTATIONS_DIRECTORY}/SGD_BP_sc.csv")
all_MF_annotations_df = pd.read_csv(f"{ANNOTATIONS_DIRECTORY}/SGD_MF_sc.csv")
all_CC_annotations_df = pd.read_csv(f"{ANNOTATIONS_DIRECTORY}/SGD_CC_sc.csv")
# -
PPI_BP_annotations_df = all_BP_annotations_df[all_BP_annotations_df.Systematic_ID.isin(population)]
PPI_MF_annotations_df = all_MF_annotations_df[all_MF_annotations_df.Systematic_ID.isin(population)]
PPI_CC_annotations_df = all_CC_annotations_df[all_CC_annotations_df.Systematic_ID.isin(population)]
# ### Definition
# +
# Chose namespace
annotation_df = PPI_BP_annotations_df
GO_population = {go_id for go_id in set(annotation_df.GO_ID)
if (5 <= len(annotation_df[annotation_df.GO_ID == go_id]) <= 500 and
go_dag[go_id].level > -1)}
annotation_df = annotation_df[annotation_df.GO_ID.isin(GO_population)]
# +
# Conversion dictionaries
int2GO = dict(enumerate(GO_population))
GO2int = dict(zip(int2GO.values(), int2GO.keys()))
GO2genes = {go_id:set(annotation_df.Systematic_ID[annotation_df.GO_ID == go_id])
for go_id in GO_population}
gene2GOs = {gene :set(annotation_df.GO_ID[annotation_df.Systematic_ID == gene])
for gene in PPI}
# -
# ## Here we GO
# +
def gene_enriched_in_cluster(gene, cluster, enrichment):
return bool(gene2GOs[gene] & set(GO_index[enrichment[cluster]]))
def get_enrichment_df(alpha, p_values):
m = p_values.size
c = np.log(m) + np.euler_gamma + 1/(2*m)
sorted_p_values = np.sort(p_values.values.flatten())
for k,P_k in enumerate(sorted_p_values,1):
if P_k > k/(m*c) * alpha:
break
threshold = sorted_p_values[k-2]
return p_values_df < threshold
# +
# List of success states
list_of_success_states = list(GO2genes.values())
# This will be our K, see below. Reshped to fit the shape of k 'array_of_observed_successes'
array_of_total_successes = np.array(list(map(len,list_of_success_states))).reshape(-1,1)
# +
MIN_CLUSTERS = 2
MAX_CLUSTERS = 100
confidences = {'hc':0.01,
'mc':0.05,
'lc':0.1}
cluster_coverages = defaultdict(dict)
GO_coverages = defaultdict(dict)
gene_coverages = defaultdict(dict)
# -
for distance in [
'GCV_malahanobis_mean'
]:
print(distance)
MATRIX_NAME = f"sc_BioGRID_{distance}"
for confidence in confidences:
cluster_coverages[distance][confidence] = np.zeros(MAX_CLUSTERS-MIN_CLUSTERS)
GO_coverages[distance][confidence] = np.zeros(MAX_CLUSTERS-MIN_CLUSTERS)
gene_coverages[distance][confidence] = np.zeros(MAX_CLUSTERS-MIN_CLUSTERS)
t1 = time.time()
for i, n_clusters in enumerate(range(2, MAX_CLUSTERS)):
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'r') as f:
list_of_experiments = [set(line.split()) for line in f]
clusters = dict(enumerate(list_of_experiments))
# For each GO term and cluster we get an experiment
array_of_observed_successes = np.array([[len(draws & success_states) for draws in list_of_experiments]
for success_states in list_of_success_states])
K = array_of_total_successes # defined in section 'Preparation'
n = list(map(len, list_of_experiments)) # cluster lengths
k = array_of_observed_successes # number of annotated genes found in cluster
N = sum(n) # PPI size, i.e. number of all genes that appear in a cluster
# scipy has a really messed up nomeclature...
p_values_array = 1-hypergeom.cdf(k=k-1, M=N, N=n, n=K)
p_values_df = pd.DataFrame(p_values_array, index=GO_population)
GO_index = p_values_df.index
m = p_values_array.size
enrichment_dfs = {confidence:get_enrichment_df(alpha,p_values_df)
for confidence, alpha in confidences.items()}
for confidence in confidences:
cluster_coverages[distance][confidence][i] = sum(enrichment_dfs[confidence].any()) /n_clusters
GO_coverages[distance][confidence][i] = sum(enrichment_dfs[confidence].any(axis=1))/len(GO_population)
gene_coverages[distance][confidence][i] = sum(1 for cluster in clusters for gene in clusters[cluster]
if gene_enriched_in_cluster(gene, cluster, enrichment_dfs[confidence]))/N
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
print()
# # Plots
# +
plot_distances = ["malahanobis0", "all_hell", "GDV_mahalanobis"]
name2string = {"tvd0": "TVD_{0}",
"tvd1": "TVD_{1}",
"tvd2": "TVD_{2}",
"tvd3": "TVD_{3}",
"2tvd0": "2TVD_{0}",
"tvd0123": "TVD_{0123}",
"all_tvd": "all \;TVDs",
"hell0": "hellinger_{0}",
"hell1": "hellinger_{1}",
"hell2": "hellinger_{2}",
"hell3": "hellinger_{3}",
"2hell0": "2hellinger_{0}",
"hell0123": "hellinger_{0123}",
"all_hell": "all \;hellingers",
"tijana": "Tijana",
"GDV_mahalanobis":"GDV_{Mahalanobis}",
"GCV_malahanobis_mean":"GCV_{Mahalanobis}-mean",
"malahanobis0":"GCV_{Mahalanobis}-0",
"malahanobisNa":"GCV_{Mahalanobis}"}
# +
#Cluster coverage
figname = 'all'
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
Blues = iter(sns.color_palette("Blues",6)[::-1])
Reds = iter(sns.color_palette("Reds", 6)[::-1])
for distance in plot_distances:
ax.plot(range(2,MAX_CLUSTERS), 100*cluster_coverages[distance]['mc'],
label=f'${name2string[distance]}$',
linewidth=2.5,
alpha=0.75);
ax.fill_between(range(2,MAX_CLUSTERS),
100*cluster_coverages[distance]['hc'],
100*cluster_coverages[distance]['lc'],
alpha=0.1,);
ax.set_title('Clusters enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_cluster.png")
# +
#Cluster coverage
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
for distance in plot_distances:
ax.plot(range(2,MAX_CLUSTERS), 100*GO_coverages[distance]['mc'],
label=f'${name2string[distance]}$',
linewidth=2.5);
ax.fill_between(range(2,MAX_CLUSTERS),
100*GO_coverages[distance]['hc'],
100*GO_coverages[distance]['lc'],
alpha=0.1);
ax.set_title('GO-terms enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_go.png")
# +
#Cluster coverage
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
for distance in plot_distances:
ax.plot(range(2,MAX_CLUSTERS), 100*gene_coverages[distance]['mc'],
label=f'${name2string[distance]}$',
linewidth=2.5);
ax.fill_between(range(2,MAX_CLUSTERS),
100*gene_coverages[distance]['hc'],
100*gene_coverages[distance]['lc'],
alpha=0.1);
ax.set_title('gene-terms enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_gene.png")
# -
# # Test
p_values_df
len(population)
# +
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/sc_BioGRID_all_hell_2.txt", 'r') as f:
list_of_experiments = [set(line.split()) for line in f]
clusters = dict(enumerate(list_of_experiments))
# For each GO term and cluster we get an experiment
array_of_observed_successes = np.array([[len(draws & success_states) for draws in list_of_experiments]
for success_states in list_of_success_states])
K = array_of_total_successes # defined in section 'Preparation'
n = list(map(len, list_of_experiments)) # cluster lengths
k = array_of_observed_successes # number of annotated genes found in cluster
N = sum(n) # PPI size, i.e. number of all genes that appear in a cluster
# scipy has a really messed up nomeclature...
p_values_array = 1-hypergeom.cdf(k=k-1, M=N, N=n, n=K)
p_values_df = pd.DataFrame(p_values_array, index=GO_population)
GO_index = p_values_df.index
m = p_values_array.size
enrichment_dfs = {confidence:get_enrichment_df(alpha,p_values_df)
for confidence, alpha in confidences.items()}
# -
N
|
individual PPIs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to Neural Networks
#
# ## Exercise: neurons as logic gates
# In this exercise we will experiment with neuron computations. We will show how to represent basic logic functions like AND, OR, and XOR using single neurons (or more complicated structures). Finally, at the end we will walk through how to represent neural networks as a string of matrix computations.
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ### Sigmoid function:
#
# $$
# \sigma = \frac{1}{1 + e^{-x}}
# $$
#
# $\sigma$ ranges from (0, 1). When the input $x$ is negative, $\sigma$ is close to 0. When $x$ is positive, $\sigma$ is close to 1. At $x=0$, $\sigma=0.5$
## Quickly define the sigmoid function
def sigmoid(x):
"""Sigmoid function"""
return 1.0 / (1.0 + np.exp(-x))
# Plot the sigmoid function
vals = np.linspace(-10, 10, num=100, dtype=np.float32)
activation = sigmoid(vals)
fig = plt.figure(figsize=(12,6))
plt.plot(vals, activation)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.yticks()
plt.ylim([-0.5, 1.5]);
# ### Thinking of neurons as boolean logic gates
#
# A logic gate takes in two boolean (true/false or 1/0) inputs, and returns either a 0 or 1 depending on its rule. The truth table for a logic gate shows the outputs for each combination of inputs, (0, 0), (0, 1), (1,0), and (1, 1). For example, let's look at the truth table for an "OR" gate:
#
# ### OR Gate
#
# <table>
#
# <tr>
# <th colspan="3">OR gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# </table>
#
# A neuron that uses the sigmoid activation function outputs a value between (0, 1). This naturally leads us to think about boolean values. Imagine a neuron that takes in two inputs, $x_1$ and $x_2$, and a bias term:
#
# 
#
# By limiting the inputs of $x_1$ and $x_2$ to be in $\left\{0, 1\right\}$, we can simulate the effect of logic gates with our neuron. The goal is to find the weights (represented by ? marks above), such that it returns an output close to 0 or 1 depending on the inputs.
#
# What numbers for the weights would we need to fill in for this gate to output OR logic? Remember: $\sigma(z)$ is close to 0 when $z$ is largely negative (around -10 or less), and is close to 1 when $z$ is largely positive (around +10 or greater).
#
# $$
# z = w_1 x_1 + w_2 x_2 + b
# $$
#
# Let's think this through:
#
# * When $x_1$ and $x_2$ are both 0, the only value affecting $z$ is $b$. Because we want the result for (0, 0) to be close to zero, $b$ should be negative (at least -10)
# * If either $x_1$ or $x_2$ is 1, we want the output to be close to 1. That means the weights associated with $x_1$ and $x_2$ should be enough to offset $b$ to the point of causing $z$ to be at least 10.
# * Let's give $b$ a value of -10. How big do we need $w_1$ and $w_2$ to be?
# * At least +20
# * So let's try out $w_1=20$, $w_2=20$, and $b=-10$!
#
# 
# +
def logic_gate(w1, w2, b):
# Helper to create logic gate functions
# Plug in values for weight_a, weight_b, and bias
return lambda x1, x2: sigmoid(w1 * x1 + w2 * x2 + b)
def test(gate):
# Helper function to test out our weight functions.
for a, b in (0, 0), (0, 1), (1, 0), (1, 1):
print("{}, {}: {}".format(a, b, np.round(gate(a, b))))
# -
or_gate = logic_gate(20, 20, -10)
test(or_gate)
# <table>
#
# <tr>
# <th colspan="3">OR gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# </table>
#
# This matches! Great! Now you try finding the appropriate weight values for each truth table. Try not to guess and check- think through it logically and try to derive values that work.
#
# ### AND Gate
#
# <table>
#
# <tr>
# <th colspan="3">AND gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# </table>
# ## Exercise
# Try to figure out what values for the neurons would make this function as an AND gate.
# +
# TO DO: Fill in the w1, w2, and b parameters such that the truth table matches
and_gate = logic_gate(0,0,0)
#and_gate = logic_gate(15,15, -20)
test(and_gate)
# -
# ## Exercise
# Do the same for the NOR gate and the NAND gate.
# ### NOR (Not Or) Gate
#
# <table>
#
# <tr>
# <th colspan="3">NOR gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>0</td>
# </tr>
#
# </table>
# +
# TO DO: Fill in the w1, w2, and b parameters such that the truth table matches
nor_gate = logic_gate(0, 0, 0)
#nor_gate = logic_gate(-20, -20, 10)
test(nor_gate)
# -
# ### NAND (Not And) Gate
#
# <table>
#
# <tr>
# <th colspan="3">NAND gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>0</td>
# </tr>
#
# </table>
# +
# TO DO: Fill in the w1, w2, and b parameters such that the truth table matches
nand_gate = logic_gate(0, 0, 0)
#nand_gate = logic_gate(-15, -15, 20)
test(nand_gate)
# -
# ## The limits of single neurons
#
# If you've taken computer science courses, you may know that the XOR gates are the basis of computation. They can be used as so-called "half-adders", the foundation of being able to add numbers together. Here's the truth table for XOR:
#
# ### XOR (Exclusive Or) Gate
#
# <table>
#
# <tr>
# <th colspan="3">XOR gate truth table</th>
# </tr>
#
# <tr>
# <th colspan="2">Input</th>
# <th>Output</th>
# </tr>
#
# <tr>
# <td>0</td>
# <td>0</td>
# <td>0</td>
# </tr>
#
# <tr>
# <td>0</td>
# <td>1</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>0</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>1</td>
# <td>1</td>
# <td>0</td>
# </tr>
#
# </table>
#
# Now the question is, can you create a set of weights such that a single neuron can output this property?
#
# It turns out that you cannot. Single neurons can't correlate inputs, so it's just confused. So individual neurons are out. Can we still use neurons to somehow form an XOR gate?
#
# What if we tried something more complex:
#
# 
#
# Here, we've got the inputs going to two separate gates: the top neuron is an OR gate, and the bottom is a NAND gate. The output of these gates then get passed to another neuron, which is an AND gate. If you work out the outputs at each combination of input values, you'll see that this is an XOR gate!
# +
# Make sure you have or_gate, nand_gate, and and_gate working from above!
def xor_gate(a, b):
c = or_gate(a, b)
d = nand_gate(a, b)
return and_gate(c, d)
test(xor_gate)
# -
# ## Feedforward Networks as Matrix Computations
#
# We discussed previously how the feed-forward computation of a neural network can be thought of as matrix calculations and activation functions. We will do some actual computations with matrices to see this in action.
#
# 
#
#
# ## Exercise
# Provided below are the following:
#
# - Three weight matrices `W_1`, `W_2` and `W_3` representing the weights in each layer. The convention for these matrices is that each $W_{i,j}$ gives the weight from neuron $i$ in the previous (left) layer to neuron $j$ in the next (right) layer.
# - A vector `x_in` representing a single input and a matrix `x_mat_in` representing 7 different inputs.
# - Two functions: `soft_max_vec` and `soft_max_mat` which apply the soft_max function to a single vector, and row-wise to a matrix.
#
# The goals for this exercise are:
# 1. For input `x_in` calculate the inputs and outputs to each layer (assuming sigmoid activations for the middle two layers and soft_max output for the final layer.
# 2. Write a function that does the entire neural network calculation for a single input
# 3. Write a function that does the entire neural network calculation for a matrix of inputs, where each row is a single input.
# 4. Test your functions on `x_in` and `x_mat_in`.
W_1 = np.array([[2,-1,1,4],[-1,2,-3,1],[3,-2,-1,5]])
W_1
W_2 = np.array([[3,1,-2,1],[-2,4,1,-4],[-1,-3,2,-5],[3,1,1,1]])
W_3 = np.array([[-1,3,-2],[1,-1,-3],[3,-2,2],[1,2,1]])
x_in = np.array([.5,.8,.2])
x_in
x_mat_in = np.array([[.5,.8,.2],[.1,.9,.6],[.2,.2,.3],[.6,.1,.9],[.5,.5,.4],[.9,.1,.9],[.1,.8,.7]])
x_mat_in
# +
def soft_max_vec(vec):
return np.exp(vec)/(np.sum(np.exp(vec)))
def soft_max_mat(mat):
return np.exp(mat)/(np.sum(np.exp(mat),axis=1).reshape(-1,1))
# -
# +
## Student to do the calculations below
# -
|
intel-DL101-Class2/.ipynb_checkpoints/02_Intro_NN_HW-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nandini-sundar/cs230-waste-classification-and-detection/blob/master/Waste_Object_Detection_Using_Tensorflow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uQCnYPVDrsgx" colab_type="text"
# # Waste Object Detection (Tensorflow)
# + id="NP0hAL0XgnkE" colab_type="code" outputId="deb51a58-8a76-42f0-d1df-ff0461f5c1ef" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="yhzxsJb3dpWq" colab_type="text"
# ## Configs and Hyperparameters
#
# + id="gnNXNQCjdniL" colab_type="code" colab={}
repo_url = 'https://github.com/nandini-sundar/cs230_waste_object_detection_data.git'
# Number of training steps.
num_steps = 5000 # 200000
# Number of evaluation steps.
num_eval_steps = 250
MODELS_CONFIG = {
'ssd_mobilenet_v2': {
'model_name': 'ssd_mobilenet_v2_coco_2018_03_29',
'pipeline_file': 'ssd_mobilenet_v2_coco.config',
'batch_size': 12
},
'faster_rcnn_inception_v2': {
'model_name': 'faster_rcnn_inception_v2_coco_2018_01_28',
'pipeline_file': 'faster_rcnn_inception_v2_pets.config',
'batch_size': 12
},
'rfcn_resnet101': {
'model_name': 'rfcn_resnet101_coco_2018_01_28',
'pipeline_file': 'rfcn_resnet101_pets.config',
'batch_size': 8
}
}
# Pick the model you want to use
# Select a model in `MODELS_CONFIG`.
selected_model = 'faster_rcnn_inception_v2'
# Name of the object detection model to use.
MODEL = MODELS_CONFIG[selected_model]['model_name']
# Name of the pipline file in tensorflow object detection API.
pipeline_file = MODELS_CONFIG[selected_model]['pipeline_file']
# Training batch size fits in Colabe's Tesla K80 GPU memory for selected model.
batch_size = MODELS_CONFIG[selected_model]['batch_size']
# + [markdown] id="w4V-XE6kbkc1" colab_type="text"
# ## Clone the `object_detection_demo` repository or your fork.
# + id="dxc3DmvLQF3z" colab_type="code" outputId="f89e7ca9-918c-4087-8433-88d6bcfa3ad8" colab={"base_uri": "https://localhost:8080/", "height": 85}
import os
# !pwd
# %cd /content
repo_dir_path = os.path.abspath(os.path.join('.', os.path.basename(repo_url).split('.')[0]))
# !git clone {repo_url}
# %cd {repo_dir_path}
# + [markdown] id="bI8__uNS8-ns" colab_type="text"
# ## Install required packages
# + id="ecpHEnka8Kix" colab_type="code" outputId="7d85434b-43ac-4f63-8196-a4fc487c26f0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %cd /content
# !rm -rf models
# !git clone --quiet https://github.com/tensorflow/models.git
# !apt-get install -qq protobuf-compiler python-pil python-lxml python-tk
# !pip install -q Cython contextlib2 pillow lxml matplotlib
# !pip install -q pycocotools
# %cd /content/models/research
# !protoc object_detection/protos/*.proto --python_out=.
import os
os.environ['PYTHONPATH'] += ':/content/models/research/:/content/models/research/slim/'
# !python object_detection/builders/model_builder_test.py
# + [markdown] id="u-k7uGThXlny" colab_type="text"
# ## Prepare `tfrecord` files
#
# Use the following scripts to generate the `tfrecord` files.
# ```bash
# # Convert train folder annotation xml files to a single csv file,
# # generate the `label_map.pbtxt` file
# python xml_to_csv.py -i images/train_set -o annotations/train_labels.csv -l annotations
#
# # Convert test folder annotation xml files to a single csv.
# python xml_to_csv.py -i images/test_set -o annotations/test_labels.csv
#
# # Generate `train.record`
# python generate_tfrecord.py --csv_input=annotations/train_labels.csv --output_path=annotations/train.record --img_path=images/train_set --label_map annotations/label_map.pbtxt
#
# # Generate `test.record`
# python generate_tfrecord.py --csv_input=annotations/test_labels.csv --output_path=annotations/test.record --img_path=images/test_set --label_map annotations/label_map.pbtxt
# ```
# + id="ezGDABRXXhPP" colab_type="code" outputId="2783e14d-427a-4294-cd4d-3ffc22074464" colab={"base_uri": "https://localhost:8080/", "height": 496}
# %cd {repo_dir_path}
# !pwd
# Convert train folder annotation xml files to a single csv file,
# generate the `label_map.pbtxt` file to `data/` directory as well.
# !python ./xml_to_csv.py -i images/train_set -o annotations/train_labels.csv -l annotations
# Convert test folder annotation xml files to a single csv.
# !python ./xml_to_csv.py -i images/val_set -o annotations/val_labels.csv
# Generate `train.record`
# !python ./generate_tfrecord.py --csv_input=annotations/train_labels.csv --output_path=annotations/train.record --img_path=images/train_set --label_map annotations/label_map.pbtxt
# Generate `test.record`
# !python ./generate_tfrecord.py --csv_input=annotations/val_labels.csv --output_path=annotations/val.record --img_path=images/val_set --label_map annotations/label_map.pbtxt
# + id="tgd-fzAIkZlV" colab_type="code" colab={}
val_record_fname = '/content/cs230_trash_object_detection_data/annotations/val.record'
train_record_fname = '/content/cs230_trash_object_detection_data/annotations/train.record'
label_map_pbtxt_fname = '/content/cs230_trash_object_detection_data/annotations/label_map.pbtxt'
# + [markdown] id="iCNYAaC7w6N8" colab_type="text"
# ## Download base model
# + id="orDCj6ihgUMR" colab_type="code" outputId="111cc3f5-0001-4833-f1e0-0aad64d21208" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd /content/models/research
import os
import shutil
import glob
import urllib.request
import tarfile
MODEL_FILE = MODEL + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
DEST_DIR = '/content/models/research/pretrained_model'
if not (os.path.exists(MODEL_FILE)):
urllib.request.urlretrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar = tarfile.open(MODEL_FILE)
tar.extractall()
tar.close()
os.remove(MODEL_FILE)
if (os.path.exists(DEST_DIR)):
shutil.rmtree(DEST_DIR)
os.rename(MODEL, DEST_DIR)
# + id="pGhvAObeiIix" colab_type="code" outputId="cb989fca-63df-44ff-b66a-0fa3cfa4216d" colab={"base_uri": "https://localhost:8080/", "height": 204}
# !echo {DEST_DIR}
# !ls -alh {DEST_DIR}
# + id="UHnxlfRznPP3" colab_type="code" outputId="92eb5b00-54b4-4ff7-d040-4e0209202983" colab={"base_uri": "https://localhost:8080/", "height": 34}
fine_tune_checkpoint = os.path.join(DEST_DIR, "model.ckpt")
fine_tune_checkpoint
# + [markdown] id="MvwtHlLOeRJD" colab_type="text"
# ## Configuring a Training Pipeline
# + id="dIhw7IdpLuiU" colab_type="code" outputId="16123c50-0462-49d7-8659-d9b28c7713da" colab={"base_uri": "https://localhost:8080/", "height": 34}
import os
pipeline_fname = os.path.join('/content/models/research/object_detection/samples/configs/', pipeline_file)
print(pipeline_fname)
assert os.path.isfile(pipeline_fname), '`{}` not exist'.format(pipeline_fname)
# + id="fG1nCNpUXcRU" colab_type="code" colab={}
def get_num_classes(pbtxt_fname):
from object_detection.utils import label_map_util
label_map = label_map_util.load_labelmap(pbtxt_fname)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return len(category_index.keys())
# + id="YjtCbLF2i0wI" colab_type="code" outputId="4720a389-bc5e-4742-f3f7-a0e015f91f19" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import re
num_classes = get_num_classes(label_map_pbtxt_fname)
print(num_classes)
with open(pipeline_fname) as f:
s = f.read()
print(s)
with open(pipeline_fname, 'w') as f:
# fine_tune_checkpoint
s = re.sub('fine_tune_checkpoint: ".*?"',
'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s)
# tfrecord files train and test.
s = re.sub(
'(input_path: ".*?)(train.record)(.*?")', 'input_path: "{}"'.format(train_record_fname), s)
s = re.sub(
'(input_path: ".*?)(val.record)(.*?")', 'input_path: "{}"'.format(val_record_fname), s)
# label_map_path
s = re.sub(
'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s)
# Set training batch_size.
s = re.sub('batch_size: [0-9]+',
'batch_size: {}'.format(batch_size), s)
# Set training steps, num_steps
s = re.sub('num_steps: [0-9]+',
'num_steps: {}'.format(num_steps), s)
# Set number of classes num_classes.
s = re.sub('num_classes: [0-9]+',
'num_classes: {}'.format(num_classes), s)
print("*************************")
print(s)
f.write(s)
# + id="GH0MEEanocn6" colab_type="code" outputId="9658e32d-e881-4df6-8523-b778e1e795d5" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !cat {pipeline_fname}
# + id="f11w0uO3jFCB" colab_type="code" colab={}
model_dir = 'training/'
# Optionally remove content in output model directory to fresh start.
# !rm -rf {model_dir}
os.makedirs(model_dir, exist_ok=True)
# + [markdown] id="JDddx2rPfex9" colab_type="text"
# ## Train the model
# + id="CjDHjhKQofT5" colab_type="code" outputId="3f0f946f-8062-469b-ae42-1b57169b96ec" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !python /content/models/research/object_detection/model_main.py \
# --pipeline_config_path={pipeline_fname} \
# --model_dir={model_dir} \
# --alsologtostderr \
# --num_train_steps={num_steps} \
# --num_eval_steps={num_eval_steps}
# + [markdown] id="-JaGIVXJKwvW" colab_type="text"
# **Interrupted training early as the model reached our expected baseline loss of ~1.0 : Loss = 1.0686216 at step 2000**
# + id="KP-tUdtnRybs" colab_type="code" outputId="5360fd20-cc58-4a3d-f88d-4e4e6d4cef83" colab={"base_uri": "https://localhost:8080/", "height": 340}
# !ls /content/models/research/training
# !cp -r {pipeline_fname} "/content/drive/My Drive/Waste_Object_Detection/"
# + [markdown] id="OmSESMetj1sa" colab_type="text"
# ## Exporting a Trained Inference Graph
# Once your training job is complete, you need to extract the newly trained inference graph, which will be later used to perform the object detection. This can be done as follows:
# + id="DHoP90pUyKSq" colab_type="code" outputId="597ed02d-8c92-43e4-86f8-cd7414c72522" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import re
import numpy as np
output_directory = './fine_tuned_model'
lst = os.listdir(model_dir)
lst = [l for l in lst if 'model.ckpt-' in l and '.meta' in l]
steps=np.array([int(re.findall('\d+', l)[0]) for l in lst])
last_model = lst[steps.argmax()].replace('.meta', '')
last_model_path = os.path.join(model_dir, last_model)
print(last_model_path)
# !python /content/models/research/object_detection/export_inference_graph.py \
# --input_type=image_tensor \
# --pipeline_config_path={pipeline_fname} \
# --output_directory={output_directory} \
# --trained_checkpoint_prefix={last_model_path}
# + id="usgBZvkz0nqD" colab_type="code" outputId="13b7c93f-8712-4ad7-e93e-cfbc663a4b27" colab={"base_uri": "https://localhost:8080/", "height": 68}
# !ls {output_directory}
# + id="CnDo1lonKgFr" colab_type="code" colab={}
import os
pb_fname = os.path.join(os.path.abspath(output_directory), "frozen_inference_graph.pb")
assert os.path.isfile(pb_fname), '`{}` not exist'.format(pb_fname)
# + id="lHqWkLBINYoI" colab_type="code" outputId="7943679f-5971-4d8c-e3cb-a90c9d00f4be" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls -alh {pb_fname}
# + [markdown] id="mz1gX19GlVW7" colab_type="text"
# ## Run inference test
# Test with 3 images in repository `cs230_trash_object_detection_data/images/test_set` directory.
# + id="Pzj9A4e5mj5l" colab_type="code" outputId="f700956b-c1a9-4303-cee3-f8af5ca21de9" colab={"base_uri": "https://localhost:8080/", "height": 54}
import os
import glob
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = pb_fname
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = label_map_pbtxt_fname
# If you want to test the code with your images, just add images files to the PATH_TO_TEST_IMAGES_DIR.
PATH_TO_TEST_IMAGES_DIR = os.path.join(repo_dir_path, "images/test_set/")
assert os.path.isfile(pb_fname)
assert os.path.isfile(PATH_TO_LABELS)
TEST_IMAGE_PATH_ALL = [os.path.join(PATH_TO_TEST_IMAGES_DIR, file) for file in os.listdir(PATH_TO_TEST_IMAGES_DIR) if file.endswith('.png')]
TEST_IMAGE_PATHS = TEST_IMAGE_PATH_ALL[0:3]
assert len(TEST_IMAGE_PATHS) > 0, 'No image found in `{}`.'.format(PATH_TO_TEST_IMAGES_DIR)
print(TEST_IMAGE_PATHS)
# + id="CG5YUMdg1Po7" colab_type="code" outputId="1f7ed44c-77a3-4afd-ad03-d2f0517502fc" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %cd /content/models/research/object_detection
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import imageio
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
# This is needed to display the images.
# %matplotlib inline
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height, channels) = image.shape
return np.array(image).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {
output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(
tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(
tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(
tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [
real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [
real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(
output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
#image = Image.open(image_path)
image_4 = imageio.imread(image_path)
print(image_4.shape)
image = image_4[:,:,:3]
print(image.shape)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
|
Object_Detection/Waste_Object_Detection_(Milestone_1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn.functional as F
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, GNNExplainer
import numpy as np
import pandas as pd
import more_itertools as mit
from typing import List
from torch_geometric.data import InMemoryDataset
import matplotlib.pyplot as plt
# -
def reasonable_notebook_defaults():
r"""Notbook defaults"""
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context("paper", font_scale=1.5)
plt.rcParams['figure.figsize'] = [20, 10]
reasonable_notebook_defaults()
doc0 = ["The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog"]
doc1 = ["Welcome", "to", "the", "dog", "days", "of", "summer"]
docs = [doc0, doc1]
vocab = set(doc0 + doc1)
vocab_to_int = {term: token for token, term in enumerate(vocab)}
int_to_vocab = {token: term for term, token in vocab_to_int.items()}
# +
def tokenize(sent):
"""Map sentence to tokens"""
return [vocab_to_int[word] for word in sent]
def untokenize(sent):
"""Map tokens to sentence"""
return [int_to_vocab[token] for token in sent]
def one_hot_encode(sent):
"""One hot encode a sentence"""
return np.stack([token_to_one_hot(x) for x in sent])
def decode_one_hot(encoding) -> List[str]:
"""Decode a one hot encoding back to sentence"""
return [one_hot_word_to_token(x) for x in encoding]
def token_to_one_hot(token):
"""One hot encode a token"""
word = np.zeros(len(vocab), dtype=int)
word[token] = 1
return word
def one_hot_word_to_token(one_hot_word):
"""Convert one hot encoded word to token"""
return int_to_vocab[np.argmax(one_hot_word)]
# -
tokenized = [tokenize(x) for x in docs]
sentences = [untokenize(x) for x in tokenized]
sentences
onehot_encoded = one_hot_encode(tokenized[0])
one_hot_token = token_to_one_hot(tokenized[0][0])
one_hot_token
one_hot_word_to_token(one_hot_token)
onehot_encoded
decode_one_hot(onehot_encoded)
# ### Integer Tokens:
doc0 = [x for x in range(10)]
doc1 = [x for x in range(5, 15)]
docs = [doc0, doc1]
vocab = set(doc0 + doc1)
vocab_to_int = {term: token for token, term in enumerate(vocab)}
int_to_vocab = {token: term for term, token in vocab_to_int.items()}
vocab
docs
tokenized = [tokenize(x) for x in docs]
sentences = [untokenize(x) for x in tokenized]
onehot_encoded = one_hot_encode(tokenized[0])
one_hot_token = token_to_one_hot(tokenized[0][0])
one_hot_word_to_token(one_hot_token)
onehot_encoded
# ## Edge List
doc0 = ["The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog"]
doc1 = ["Welcome", "to", "the", "dog", "days", "of", "summer"]
docs = [doc0, doc1]
vocab = set(doc0 + doc1)
doc = ["The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog"]
doc_idxs = [x for x in range(len(doc))]
vocab_to_int = {term: token for token, term in enumerate(vocab)}
int_to_vocab = {token: term for term, token in vocab_to_int.items()}
def create_edge_list(doc_idxs, num_neighbors=1):
"""Create an edge list for a document
Iteratively build a list of graph edges.
We connect `neighbors` in a document, the
words to the left and right of a document
by `num_neighbors` distance.
Args:
doc_idxs: list of document indices
num_neighbors: number of neighbors to
connect to each node.
Returns:
edge_list: list of connected nodes
for a Pytorch Geometric graph
"""
edge_list = []
for neighbor in range(num_neighbors):
edge_list.extend(
get_neighbors(doc_idxs, neighbor+1)
)
reverse_pairs = get_reverse_pairs(edge_list)
edge_list.extend(reverse_pairs)
edge_list.sort()
return edge_list
def get_neighbors(doc_idxs, neighbor_distance):
"""Get pairs of neighbors at a given neighbor_distance
For every index in `doc_idxs`, we get its neighbor a given
distance away.
Args:
doc_idxs: list of document indices
neighbor_distance: distance between neighboring terms to be paired
Returns:
neighbors: list of neighbor pairs
"""
neighbors = list(mit.windowed(doc_idxs, neighbor_distance+1))
neighbors = [[x[0], x[-1]] for x in neighbors]
return neighbors
def get_reverse_pairs(edge_list):
"""Get the reverse of edge list pairs
This is required by Pytorch Geometric datasets.
"""
return [[x[1], x[0]] for x in edge_list]
edge_list = create_edge_list(doc_idxs, num_neighbors=1)
edge_list
edge_index = torch.tensor(edge_list, dtype=torch.long)
tokens = tokenize(doc)
tokens = [[x] for x in tokens]
x = torch.tensor(tokens, dtype=torch.float)
y = torch.randint(0, 6, (9, ), dtype=torch.long)
data = Data(x=x, y=y, edge_index=edge_index.t().contiguous())
data
data.num_nodes
# ## Model Explainer
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = GCNConv(1, 10)
self.conv2 = GCNConv(10, 10)
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
# +
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
x, edge_index = data.x, data.edge_index
for epoch in range(1, 201):
model.train()
optimizer.zero_grad()
log_logits = model(x, edge_index)
loss = F.nll_loss(log_logits, data.y)
loss.backward()
optimizer.step()
# +
explainer = GNNExplainer(model, epochs=200)
node_idx = 8
node_feat_mask, edge_mask = explainer.explain_node(node_idx, x, edge_index)
ax, G = explainer.visualize_subgraph(node_idx, edge_index, edge_mask, y=data.y)
plt.show()
# -
|
examples/notebooks/data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
# +
train1 = json.load(open('data/dureader_squad_train.json'))
val1 = json.load(open('data/dureader_squad_val.json'))
train2 = json.load(open('data/webqa_squad_train.json'))
val2 = json.load(open('data/webqa_squad_eval.json'))
# -
len(train1['data']), len(train2['data']), len(val1['data']), len(val2['data'])
train1['data'].extend(train2['data'])
len(train1['data'])
val1['data'].extend(val2['data'])
len(val1['data'])
json.dump(train1, open('data/webqa_dureader_squad_train.json', 'w'))
json.dump(val1, open('data/webqa_dureader_squad_eval.json', 'w'))
|
merge.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import sys, collections
# +
#load mouse transcript
tr2g = {}
trlist = []
with open('../index/tr2g_mouse') as f:
for line in f:
l = line.split()
tr2g[l[0]] = l[1]
trlist.append(l[0])
genes = list(set(tr2g[t] for t in tr2g))
# load equivalence classes
ecs = {}
with open('../kallisto_mccarroll_out/matrix.ec') as f:
for line in f:
l = line.split()
ec = int(l[0])
trs = [int(x) for x in l[1].split(',')]
ecs[ec] = trs
def ec2g(ec):
if ec in ecs:
return list(set(tr2g[trlist[t]] for t in ecs[ec]))
else:
return []
# +
# load test dataset
cell_gene = collections.defaultdict(lambda: collections.defaultdict(float))
pbar=None
pumi=None
with open('../kallisto_mccarroll_out/output.sort.txt') as f:
gs = set()
gl_sum=[]
for line in f:
l = line.split()
barcode,umi,ec,count = line.split()
ec = int(ec)
if barcode == pbar:
# same barcode
if umi == pumi:
# same UMI, let's add new genelist of existing genelist
gl = ec2g(ec)
gl_sum = gl_sum + gl
else:
# new UMI, process the previous gene set
gs = set(gl_sum)
g_max = max(gl_sum, key=gl_sum.count)
for i in set(gl_sum):
if (gl_sum.count(i) < gl_sum.count(g_max)):
gs.remove(i)
for g in gs:
cell_gene[barcode][g] += 1.0/len(gs)
# record new umi, reset gene set
pumi = umi
gl_sum = ec2g(ec)
else:
# bypass the first bus record
if (gl_sum == []):
pbar = barcode
pumi = umi
gl_sum = ec2g(ec)
else:
# work with previous gene list
gs = set(gl_sum)
g_max = max(gl_sum, key=gl_sum.count)
for i in set(gl_sum):
if (gl_sum.count(i) < gl_sum.count(g_max)):
gs.remove(i)
for g in gs:
cell_gene[pbar][g] += 1.0/len(gs)
if sum(cell_gene[pbar][g] for g in cell_gene[pbar]) < 10:
del cell_gene[pbar]
pbar = barcode
pumi = umi
gl_sum = ec2g(ec)
#remember the last gene
gs = set(gl_sum)
g_max = max(gl_sum, key=gl_sum.count)
for i in set(gl_sum):
if (gl_sum.count(i) < gl_sum.count(g_max)):
gs.remove(i)
for g in gs:
cell_gene[pbar][g] += 1.0/len(gs)
if sum(cell_gene[pbar][g] for g in cell_gene[pbar]) < 10:
del cell_gene[pbar]
# -
barcode_hist = collections.defaultdict(int)
for barcode in cell_gene:
cg = list(cell_gene[barcode])
s = 0
for g in cg:
s = s + cell_gene[barcode][g]
barcode_hist[barcode] += s
#calculate number of UMI detetected per cell
bcv = [x for b,x in barcode_hist.items() if x > 1000 and x < 5000]
_ = plt.hist(bcv,bins=100)
print(len(bcv))
# +
outfile = '../kallisto_mccarroll_out/v2/matrix.mtx'
gene_to_id = dict((g,i+1) for i,g in enumerate(genes))
barcodes_to_use = [b for b,x in barcode_hist.items() if x > 3200]
num_entries = 0
for barcode in barcodes_to_use:
num_entries += len([x for x in cell_gene[barcode].values() if round(x)>0])
# -
with open(outfile, 'w') as of:
of.write('%%MatrixMarket matrix coordinate real general\n%\n')
#number of genes
of.write("%d %d %d\n"%(len(genes), len(barcodes_to_use), num_entries))
bcid = 0
for barcode in barcodes_to_use:
bcid += 1
cg = cell_gene[barcode]
gl = [(gene_to_id[g],round(cg[g])) for g in cg if round(cg[g]) > 0]
gl.sort()
for x in gl:
of.write("%d %d %d\n"%(x[0],bcid,x[1]))
gene_names = {}
with open('../index/mart_export_mouse.txt') as f:
f.readline()
for line in f:
g,t,gn = line.split()
gene_names[g] = gn
# +
id_to_genes = dict((i,g) for (g,i) in gene_to_id.items())
gl = []
for i in range(1,len(genes)+1):
g = id_to_genes[i]
gid = g[:g.find('.')]
if gid in gene_names:
gn = gene_names[gid]
else:
gn = ''
gl.append((g,gn))
with open('../kallisto_mccarroll_out/v2/genes.tsv','w') as of:
for g,gn in gl:
of.write("%s\t%s\n"%(g,gn))
with open('../kallisto_mccarroll_out/v2/barcodes.tsv','w') as of:
of.write('\n'.join(x + '' for x in barcodes_to_use))
of.write('\n')
# +
import numpy as np
import pandas as pd
import scanpy.api as sc
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
sc.logging.print_versions()
results_file = '../kallisto_mccarroll_out/v2/kallisto_mccarroll.h5ad'
sc.settings.set_figure_params(dpi=80)
adata = sc.read_10x_mtx('../kallisto_mccarroll_out/v2/', var_names='gene_symbols', cache=False)
# -
adata.var_names_make_unique()
sc.pp.filter_cells(adata, min_genes=100)
sc.pp.filter_genes(adata, min_cells=2)
mito_genes = [name for name in adata.var_names if name.startswith('mt-')]
# for each cell compute fraction of counts in mito genes vs. all genes
# the `.A1` is only necessary as X is sparse to transform to a dense array after summing
adata.obs['percent_mito'] = np.sum(adata[:, mito_genes].X, axis=1) / np.sum(adata.X, axis=1)
# add the total counts per cell as observations-annotation to adata
adata.obs['n_counts'] = adata.X.sum(axis=1)
adata = adata[adata.obs['n_genes'] < 2500, :]
adata
adata = adata[adata.obs['percent_mito'] < 0.2, :]
adata
sc.pp.log1p(adata)
sc.tl.pca(adata, svd_solver='arpack')
sc.pl.pca_variance_ratio(adata, log=True)
sc.pp.neighbors(adata, n_neighbors=10, n_pcs=30)
sc.tl.umap(adata)
sc.tl.louvain(adata, resolution=0.6)
sc.pl.umap(adata, color=['louvain'])
sc.tl.tsne(adata, n_pcs=30)
sc.pl.tsne(adata, color=['louvain'])
sc.tl.rank_genes_groups(adata, 'louvain', method='t-test', corr_method='benjamini-hochberg')
result = adata.uns['rank_genes_groups']
groups = result['names'].dtype.names
#pd.DataFrame({group + '_' + key: result[key][group]
# for group in groups for key in ['names', 'scores','logfoldchanges','pvals','pvals_adj']}).head(5)
marker_genes = ['Pax6', 'Gad1', 'Slc6a9', 'Opn1mw', 'Vsx2', 'Rlbp1']
sc.pl.dotplot(adata, marker_genes, groupby='louvain')
sc.pl.stacked_violin(adata, marker_genes, groupby='louvain', rotation=90)
adata
cellID = pd.read_table('../kallisto_mccarroll_out/v2/barcodes.tsv',sep="\t",header=None)
geneID = pd.read_table('../kallisto_mccarroll_out/v2/genes.tsv',sep="\t",header=None)
data = pd.read_table('../kallisto_mccarroll_out/v2/matrix.mtx',sep=" ",skiprows=3,header=None)
data.columns = ["GENE","CELL","COUNT"]
matrix = data.pivot(index='GENE', columns='CELL', values='COUNT')
matrix_out = matrix.fillna(0)
matrix_out.index = geneID.iloc[matrix.index-1,1]
matrix_out.columns = cellID.iloc[matrix.columns-1,0]
matrix_out.index.name = 'GENE'
#Take average values for duplicate gene records
matrix_out.groupby('GENE', as_index=False).mean()
matrix_out.set_index(matrix_out.columns[0])
matrix_out.to_csv("../kallisto_mccarroll_out/v2/gene_matrix.txt",sep="\t")
len(matrix)
|
notebooks/mccarroll/kallisto_mccarroll_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from fastparquet import ParquetFile
import os
# from ctakes_xml import CtakesXmlParser
# from sklearn.feature_extraction.text import TfidfVectorizer
from stringdist import levenshtein_norm as lev_norm
# import matplotlib
# # %matplotlib inline
# -
# ### Read the parquet file into a pandas dataframe. Using fastparquet here because pyarrow couldn't read in a file of this size for some reason
notes_file = 'synthnotes/data/note-events.parquet'
pq_root_path = 'synthnotes/data/xml_extracted'
pf = ParquetFile(notes_file)
df = pf.to_pandas()
# ### Get the list of ids from the processed xml files so we can select a subset of the mimic notes
xml_dir = 'synthnotes/data/xml_files'
xml_files = os.listdir(xml_dir)
ids = [int(f.split('.txt.xmi')[0]) for f in xml_files]
# ### Select the subset of notes that we have xml output from ctakes for. Reset the index and drop some unnecessary columns
notes = df[df.ROW_ID.isin(ids)]
notes = notes.reset_index(drop=True)
notes = notes.drop(['CHARTDATE','CHARTTIME','STORETIME','CGID','ISERROR'],axis=1)
# +
def get_notes_sample(df, n=100, category='Nursing'):
notes = df[notes_df['CATEGORY'] == 'Nursing']
notes = notes[notes['ISERROR'].isnull()]
notes = notes[notes['DESCRIPTION'] == 'Generic Note']
notes = notes.sample(n=n)
notes = notes.reset_index(drop=True)
return notes
# -
# ### process the xml files and store in parquet locally
# #### TODO: switch this to use columnar format: need to change how we extract different types of elements
# +
# parser = CtakesXmlParser()
# schemas = list()
# for file in xml_files:
# xml_out = parser.parse(f'{xml_dir}/{file}')
# for k, v in xml_out.items():
# feature_df = pd.DataFrame(list(v))
# if feature_df.shape[0] > 0:
# table = pa.Table.from_pandas(feature_df)
# pq.write_to_dataset(table, f'{pq_root_path}/{k}')
# else:
# print(f"{k} was empty for {file}")
# -
# # Creating templates
# The plan:<br>
# For each sentences in all documents:
# 1. Get the predicates for the sentence
# 2. Get the entities for the sentence
# 3. For each entity:
# - append the cui code from umls concept to the end
# 4. Combine predicates and entities and sort based on their begin position
# 5. Save to a dataframe
#
#
# ### Some helper functions:
# +
def get_df_from_pq(root, name):
return pq.read_table(f'{root}/{name}').to_pandas()
def transform_preds(df):
df['frameset'] = df['frameset'].apply(lambda x: x.split('.')[0])
return df
def transform_mentions(mentions):
# Don't want this to fail if these have already been removed
try:
mentions = mentions.drop(
['conditional', 'history_of', 'generic', 'polarity', 'discovery_technique', 'subject'],
axis=1)
except:
pass
sorted_df = mentions.groupby(['sent_id', 'begin']) \
.apply(lambda x: x.sort_values(['begin', 'end']))
# Drop the mentions that are parts of a larger span. Only keep the containing span that holds multiple
# mentions
deduped = sorted_df.drop_duplicates(['sent_id', 'begin'], keep='last')
deduped = deduped.drop_duplicates(['sent_id', 'end'], keep='first')
return deduped.reset_index(drop=True)
def set_template_token(df, column):
df['template_token'] = df[column]
return df
def get_template_tokens(row):
return pd.Series({
'doc_id': row['doc_id'],
'sent_id': row['sent_id'],
'token': row['template_token'],
'begin': row['begin'],
'end': row['end']
})
# def merge_mentions_umls(mentions, umls):
# umls['umls_xmi_id'] = umls['xmi_id']
# mentions = mentions.merge(umls[['umls_xmi_id', 'cui']], on='umls_xmi_id')
# return mentions
# def umls_dedup(umls):
# return umls.drop_duplicates(subset=['cui'])
# def set_umls_join_key(umls):
# umls['umls_xmi_id'] = umls['xmi_id']
# return umls
def set_sentence_pos(df):
df = df.groupby(["doc_id"]).apply(lambda x: x.sort_values(["begin"])).reset_index(drop=True)
df['sentence_number'] = df.groupby("doc_id").cumcount()
return df
def get_root_verb(row):
pass
def extract_sent(row):
begin = row['begin']
end = row['end']
row['TEXT'] = row['TEXT'][begin:end]
return row
def write_notes(row):
fn = f'raw_notes/{row["ROW_ID"]}'
with open(fn, 'w') as f:
f.write(row['TEXT'])
def get_text_from_sentence(row, notes):
doc = notes[notes['ROW_ID'] == row['doc_id']]
b = row['begin']
e = row['end']
return doc['TEXT'].iloc[0][b:e]
def edit_dist(row, term2):
term1 = row.loc['preferred_text']
return lev_norm(term1, term2)
def get_cui( mention, umls_df):
ont_arr = list(map(int, mention['ontology_arr'].split())) or None
ment_text = mention['text']
concepts = umls_df[umls_df['xmi_id'].isin(ont_arr)].loc[:, ['cui', 'preferred_text', 'xmi_id']]
concepts['dist'] = concepts.apply(edit_dist, args=(ment_text,), axis=1)
sorted_df = concepts.sort_values(by='dist', ascending=True).reset_index(drop=True)
cui = sorted_df['cui'].iloc[0]
xmi_id = sorted_df['xmi_id'].iloc[0]
pref_text = sorted_df['preferred_text'].iloc[0]
return cui, xmi_id, pref_text
# -
# ## Pull in the dataframes for elements we need for processing
preds = get_df_from_pq(pq_root_path, 'predicates')
mentions = get_df_from_pq(pq_root_path, 'mentions')
umls = get_df_from_pq(pq_root_path, 'umls_concepts')
sents = get_df_from_pq(pq_root_path, 'sentences')
tokens = get_df_from_pq(pq_root_path, 'tokens')
# +
sents = sents.rename({'id': 'sent_id'}, axis=1)
sents.head()
# -
# ## Prep sentences DF
# ### Add raw text from notes to sentences
# +
sents = sents.rename({'id': 'sent_id'}, axis=1)
sents = sents.merge(notes[['ROW_ID', 'TEXT']],
left_on='doc_id', right_on='ROW_ID').drop('ROW_ID', axis=1)
sents = sents.apply(extract_sent, axis=1)
sents = sents.rename({'TEXT': 'text'}, axis=1)
# -
# ### Add position of sentence in document to sentences df
sents = set_sentence_pos(sents)
# ### remove sentences without entities
sents_with_mentions = sents[
sents['sent_id'].isin(
mentions.drop_duplicates(subset='sent_id')['sent_id']
)
]
# # Prep UMLS DF
# ### Remove umls concepts which don't have a preferred text field
umls = umls[~umls['preferred_text'].isna()]
# # Pref Mentions DF
# ### Transform mentions
# 1. Drop some unused fields
# 2. Only keep the first umls code from ontology array ( **no longer doing this as it limits the cui codes we can choose
# from in the umls concepts table**)
# 3. Sort by begin and end offsets. Remove mentions that end on the same offset. Only want to keep the full span and not split entities up. This should give us better semantic meaning
# 4. Add raw text to mentions
# 5. Add in umls concept information (CUI) to mentions
# a. There are many possible cuis for a the text span of an entity. Here, we're going to use
# the edit distance from the original span and the umls preferred text. For now, just choose the first
# umls concept with the best score (lowest)
mentions = get_df_from_pq(pq_root_path, 'mentions')
mentions = transform_mentions(mentions)
mentions.head()
# ### Add original text to mentions
mentions['text'] = mentions.apply(get_text_from_sentence, args=(notes,), axis=1)
mentions.head()
# ### Add sentence position to mentions
mentions = mentions.merge(sents_with_mentions[['sent_id', 'sentence_number']],
on='sent_id')
mentions.head()
# # Prep Predicates DF
# ### Transform predicates
# Simple transformation. Just modify the frameset string to remove everything after the '.'
preds = transform_preds(preds)
# ### Remove predicates not in sentences with mentions
print(preds.shape)
preds = preds[
preds['sent_id'].isin( sents_with_mentions['sent_id'] )
]
print(preds.shape)
# ### Add original text to predicates
preds['text'] = preds.apply(get_text_from_sentence, args=(notes,), axis=1)
# # Linking CUI codes to entities (mentions)
# ### Assign cui codes to mentions (entities)
#
# cTAKES over-generates cui and tui codes for text spans in a clinical note. There can be multiple coding schemes that have a code for a term and a cui could apply to the original text span specifically or be a generalization or abstraction over the meaning of the span. For generating text we want the cui that most closely matches the original text span. Future work could look at these generalizations to get a better sense of semantic meaning. However, this will require a deep understanding of the UMLS ontology an how to work with it to extract this kind of information.
#
# For each mention:
# 1. Collect all the umls concept rows (based on xmi_id) that are in the mention's ontology array
# 2. Compute edit distance between the above umls rows' preferred text column and the mention's original text
# 3. Sort edit distances in ascending order
# 4. Choose the first umls concept row (a lower edit distance means the two texts are more similar)
mentions[['cui', 'umls_xmi_id', 'preferred_text']] = mentions. \
apply(get_cui, args=(umls,), axis=1, result_type='expand')
mentions.head()
# # Set the template tokens we're going to use
# For mentions this is either: the type of mention, the CUI code, or the two concatenated together
#
# For predicates it is the frameset trimmed of everything after the '.'
# +
mentions['template_token'] = mentions['mention_type']
preds['template_token'] = preds['frameset']
preds_toks = preds.apply(get_template_tokens, axis=1)
mentions_toks = mentions.apply(get_template_tokens, axis=1)
mentions_toks.groupby(['sent_id', 'end']).head()
preds_toks.groupby(['sent_id', 'end']).head()
# -
# #### Append the two template tokens dataframes
template_tokens = preds_toks.append(mentions_toks)
temp_tokens = template_tokens.groupby(['sent_id']).apply(lambda x: x.sort_values(['begin']))
temp_tokens.head()
# ### Get the semantic templates
# Group the rows of the above template tokens dataframe by sentence id and join them together into a single string. Must sort by begin offset.
sem_templates = template_tokens.sort_values('begin').groupby('sent_id')['token'].apply(' '.join)
sem_templates.head()
temp_tokens.token.unique().shape
# +
sem_df = pd.DataFrame(sem_templates) # What is this?
sem_df.head()
sem_df.reset_index(level=0, inplace=True)
sem_df = sem_df.rename(columns={'token': 'sem_template'})
sem_df = sem_df.merge(sents[['sent_id', 'sentence_number', 'doc_id', 'begin', 'end']],
left_on='sent_id', right_on='sent_id' )#.drop('id', axis=1)
# -
sem_df.head()
# # Gather corpus statistics
# ### Average sentences per doc
avg_sents_per_doc = sents.groupby('doc_id').size().mean()
print(avg_sents_per_doc)
# ### Average sentences w/ entities per doc
avg_sents_with_ents_per_doc = sents_with_mentions.groupby('doc_id').size().mean()
print(avg_sents_with_ents_per_doc)
# ### Count of unique cuis (When removing overlapping text spans)
print(mentions['cui'].nunique())
# ### Average # of cuis per doc
mentions.groupby('doc_id').size().mean()
# ### Average # of cuis per sentence
mentions.groupby('sent_id').size().mean()
# ### Average # of words per doc (excluding newline tokens and symbols)
tokens = tokens[(~tokens['sent_id'].isnull()) & (tokens['token_type'] != 'NewlineToken')]
wc_by_doc = tokens.groupby('doc_id').count()['id'].reset_index(name='count')
wc_by_doc['count'].mean()
# ### Average # of words per sentence
wc_by_sentence = tokens.groupby('sent_id')['id'].count().reset_index(name='count')
wc_by_sentence['count'].mean()
# ### Get frequency of mentions
mention_counts = mentions.groupby('mention_type').size().reset_index(name='count')
mention_counts
mention_counts['frequency'] = mention_counts['count'] / mention_counts['count'].sum()
mention_counts
# ### Frequency of mentions by sentence position
mentions_by_pos = pd.crosstab(
mentions['mention_type'],
mentions['sentence_number']).apply(lambda x: x / x.sum(), axis=0)
mentions_by_pos
# ### Frequency of CUIs
cui_counts = mentions.groupby('cui').size().reset_index(name='count')
cui_counts = cui_counts.sort_values('count', ascending=False).reset_index(drop=True)
cui_counts.head(10)
cui_counts['frequency'] = cui_counts['count'] / cui_counts['count'].sum()
cui_counts.head(10)
# ### Frequency with preferred text
# +
cui_counts_with_text = cui_counts.merge(mentions[['cui', 'preferred_text']], on='cui') \
.drop_duplicates('cui') \
.reset_index(drop=True)
cui_counts_with_text.head(10)
# -
# ### Frequency of CUIs by sentence position
cui_by_pos = pd.crosstab(mentions['cui'], mentions['sentence_number']).apply(lambda x: x / x.sum(), axis=0)
cui_by_pos.head()
cui_by_pos.loc[:, 0].sort_values(ascending=False)[:10]
# ### Number of unique templates
sem_df.head()
sem_df['sem_template'].nunique()
# ### Frequency of templates (identified by sentence number)
count_temps = sem_df.groupby('sem_template').size().reset_index(name='count')
count_temps = count_temps.sort_values('count', ascending=False).reset_index(drop=True)
count_temps.head(10)
count_temps['frequency'] = count_temps['count'] / count_temps['count'].sum()
count_temps.head(10)
# ### Frequency of templates by sentence position
sem_df.head()
sem_df['sentence_number'].shape
temp_by_pos = pd.crosstab(sem_df['sem_template'], sem_df['sentence_number']).apply(lambda x: x / x.sum(), axis=0)
temp_by_pos.head()
# # Write dataframes to parquet
# We want to write these to a parquet file so that they can be used by a separate notebook to do clustering and note generation. This is just prep-work for those processes.
df_dir = 'data/processed_dfs'
# Write sentences, mentions, predicates, and umls concepts to parquet, sem_df
# +
sents_with_mentions.to_parquet(f'{df_dir}/sentences.parquet')
mentions.to_parquet(f'{df_dir}/mentions.parquet')
preds.to_parquet(f'{df_dir}/predicates.parquet')
umls.to_parquet(f'{df_dir}/umls.parquet')
sem_df.to_parquet(f'{df_dir}/templates.parquet')
temp_by_pos.to_parquet(f'{df_dir}/templates_by_pos.parquet')
# -
|
extra/preprocessing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lasio_torch]
# language: python
# name: conda-env-lasio_torch-py
# ---
# # Introduction to PyTorch
#
# In this tutorial, we will use PyTorch. A few words of introduction to what PyTorch does. We will dive into relevant portions of PyTorch in detail throughout this course.
#
# ## What is PyTorch?
#
# PyTorch is a scientific computing (it is much more general than a toolbox for machine learning!) package developed primarily by Facebook.
#
# In this tutorial, let us look at the building blocks of PyTorch.
#
# This notebook is just a simple introduction. We'll delve deep into it as we go along..
#
# ## Tensors
#
# Tensors are the core building blocks of PyTorch. Think of them like turbocharged `numpy.ndarray`. They run on GPUs and they come with more bells and whistles that a normal `numpy.ndarray` cannot do.
#
# Oh and they can track gradients and other things.
from __future__ import print_function
import torch
# Let us initialize an empty uninitialized tensor.
x = torch.empty(3, 4)
print(x)
# Let us construct a tensor with random values in it.
x = torch.rand(3, 4)
print(x)
# Let us look at the size of the tensor that we defined in the previous cell.
x.size()
# ## Operations on tensors
# +
a = torch.ones(2, 3)
b = torch.ones(2, 3)
s = a + 2*b
print(s)
d = a - 3*b
print(d)
# -
# You can use standard NumPy-like indexing with all bells and whistles!
print(d[:, 1])
print(d[1, :])
# ## Converting to NumPy arrays
b_np = b.numpy()
print(b_np)
print('Type of b', type(b))
print('Type of b_np', type(b_np))
# ## Converting to torch tensors
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
print(a)
print(b)
# Till now, it should not be a surprise for anyone used to `numpy`. Mostly, it has been functionining like `numpy`. Let us look at how they are different now.
#
# I mentioned earlier that they work on GPUs as well.
#
# Let us see how easy it is!
# ## CUDA Tensors
# First, let us check if your machine has GPU and CUDA is enabled. It is easily done using `torch.cuda.is_available()`.
#
# While we are at it, let us also look at how many GPUs we have. This is done using `torch.cuda.device_count()`.
torch.cuda.is_available()
torch.cuda.device_count()
# A device is chosen using `torch.device("cuda")`.
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
# There is another useful shortcut worth knowing.
#
# Calling `.cuda()` on a tensor moves the tensors to the GPUs. For a lot of image classification exercises, computing on CPUs is extremely slow.
#
# `.cuda()` works on a lot of things - models, tensors etc.
x = torch.ones([2, 2])
x.cuda()
|
Lesson1-IntroPyTorch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (myenv)
# language: python
# name: myenv
# ---
# ## Image Clustering
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
import numpy as np
from sklearn.cluster import KMeans
import glob
model = VGG16(weights='imagenet', include_top=False)
model.summary()
# +
img_path = 'images/image_1.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
vgg16_feature = model.predict(img_data)
print(vgg16_feature.shape)
# +
vgg16_feature_list = []
path = 'images/cl2/*'
for im in glob.glob(path):
# process the files under the directory 'dogs' or 'cats'
# ...
img = image.load_img(im, target_size=(224, 224))
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
vgg16_feature = model.predict(img_data)
vgg16_feature_np = np.array(vgg16_feature)
vgg16_feature_list.append(vgg16_feature_np.flatten())
# -
vgg16_feature_list_np = np.array(vgg16_feature_list)
kmeans = KMeans(n_clusters=2, random_state=0).fit(vgg16_feature_list_np)
print(kmeans.labels_)
glob.glob(path)
|
.ipynb_checkpoints/Image Clustering_1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chebychev and cubic spline derivative approximation errors
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demapp06.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# # !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
# ## Initial tasks
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from compecon import BasisChebyshev, BasisSpline, nodeunif, demo
# ## Function to be approximated
f = lambda x: np.exp(-x)
df = lambda x: -np.exp(-x)
d2f = lambda x: np.exp(-x)
# Set degree of approximation and endpoints of approximation interval
a = -1 # left endpoint
b = 1 # right endpoint
n = 10 # order of interpolatioin
# Construct refined uniform grid for error ploting
x = np.linspace(a,b, 1001)
# Construct Chebychev interpolant
C = BasisChebyshev(n, a, b, f=f)
# Construct cubic spline interpolant
S = BasisSpline(n, a, b, f=f)
# ### Plot function approximation error
# +
y = f(x)
fig1, axs = plt.subplots(2,1,sharex=True)
fig1.suptitle('Function Approximation Error')
(pd.DataFrame({
'Chebychev': y - C(x),
'Cubic Spline': y - S(x)},
index=x)
.plot(subplots=True, ax=axs)
);
# -
# ### Plot first derivative approximation error
# +
dy = df(x)
fig1, axs = plt.subplots(2,1,sharex=True)
fig1.suptitle('First Derivative Approximation Error')
(pd.DataFrame({
'Chebychev': dy - C(x, 1),
'Cubic Spline': dy - S(x, 1)},
index=x)
.plot(subplots=True, ax=axs)
);
# -
# ### Plot second derivative approximation error
# +
d2y = d2f(x)
fig1, axs = plt.subplots(2,1,sharex=True)
fig1.suptitle('Second Derivative Approximation Error')
(pd.DataFrame({
'Chebychev': d2y - C(x, 2),
'Cubic Spline': d2y - S(x, 2)},
index=x)
.plot(subplots=True, ax=axs)
);
|
_build/html/_sources/notebooks/app/06 Chebychev and cubic spline derivative approximation errors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python385jvsc74a57bd0762b0bf26a87919d6f1db8421403575d096055d091f3057ceb697d4bd11e8916
# ---
import json
import codecs
# + tags=[]
f=open("result.json",'r')
data=json.load(f)
# -
value = ['id1', 'id2', 'id3']
for row in data['head']['title']['content'][:5]:
value.append(row[0])
value = [value]
for index in range(len(data['rows'])):
tmpRow = (data['rows'])[index]
tmpHead = tmpRow['id']
cpaResult = (tmpRow['results'])[2]
# print(tmpHead + [item['raw'] for item in cpaResult['values']])
value.append(tmpHead + [item['raw'] for item in cpaResult['values']])
import openpyxl
book_name_xlsx = 'Result.xlsx'
sheet_name_xlsx = 'result'
def write_excel_xlsx(path, sheet_name, value):
index = len(value)
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = sheet_name
for i in range(0, index):
for j in range(0, len(value[i])):
sheet.cell(row=i+1, column=j+1, value=str(value[i][j]))
workbook.save(path)
print("xlsx格式表格写入数据成功!")
def read_excel_xlsx(path, sheet_name):
workbook = openpyxl.load_workbook(path)
# sheet = wb.get_sheet_by_name(sheet_name)这种方式已经弃用,不建议使用
sheet = workbook[sheet_name]
for row in sheet.rows:
for cell in row:
print(cell.value, "\t", end="")
print()
write_excel_xlsx(book_name_xlsx, sheet_name_xlsx, value)
# + tags=["outputPrepend"]
read_excel_xlsx(book_name_xlsx, sheet_name_xlsx)
|
sv_comp21_result_parse/parseHtml.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Lab Assignment 11:
# # Write a python program to implement LDA as classifier
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#let's start by importing data set using numpy
data = pd.read_csv('Iris.csv')
data
data.describe()
#loading features and converting to a matrix X
newdata = data.values
x1 = newdata[:, 1]
n = len(x1)
x2 = newdata[:, 2]
Y = newdata[:, 4]
# +
# our scatter plot in 3d to visualise raw data and evaluate based on guassion distribution and remove outliers
# %matplotlib notebook
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, Y, color='blue')
ax.scatter(x2, Y, color='green')
ax.set_xlabel('SepalLengthCm')
ax.set_ylabel('SepalWidthCm')
plt.title('Scatter plot')
plt.show()
# -
# we can see any outlier in above raw data visualisation
# +
#standarizing or normalizing data on scale of mean = 0 and varience =1
n = len(x1)
x1_norm = np.array([])
x2_norm = np.array([])
xtemp1 = 0
xtemp2 = 0
for i in range(n):
xtemp1 = (x1[i]-np.mean(x1))/np.std(x1)
xtemp2 = (x2[i]-np.mean(x2))/np.std(x2)
x1_norm = np.append(x1_norm, xtemp1)
x2_norm = np.append(x2_norm, xtemp2)
xtemp1 = 0
xtemp2 = 0
x1_norm = x1_norm.reshape(n, 1)
x2_norm = x2_norm.reshape(n, 1)
# -
# calculating number of class for data which is
# 1. class-1 - Iris-setosa
# 2. class-2 -Iris-virginica
# covarience of our p dimensional data is equal to covarience of each class so
X = np.hstack((x1_norm, x2_norm))
cov_matrix = (X.T).dot(X)/n
print(cov_matrix)
# We can see no linear dependence of our dimensions
# now we will make matrix for this class to calculate linear classifier
F1 = np.hstack((x1_norm[:50], x2_norm[:50]))
print(F1.shape)
F2 = np.hstack((x1_norm[50:], x2_norm[50:]))
print(F2.shape)
# Finding attributes of maximum a posterior function to find linear classifier
#lets calculate pi(k) for each class
pi_1 = F1.shape[0]/len(x1)
pi_2 = F2.shape[0]/len(x1)
# mean vector of F1 , F2 and F3 classes
F1_mean = np.mean(F1, axis = 0)
F2_mean = np.mean(F2, axis = 0)
print("----------------------Mean Vector of F1 class--------------------")
print(F1_mean)
print()
print("----------------------Mean Vector of F2 class--------------------")
print(F2_mean)
print()
# lets calculate covarience matrix of classes
size = F1.shape[0]
cov_F1 = (F1.T).dot(F1)/size
cov_F2 = (F2.T).dot(F2)/size
cov = (size*cov_F1 + size*cov_F2)/n
# delta(x) = x.T*cov_matrix*mu_k - (1/2)mu_k.T*cov_matrix^-1*mu_k + log(pi_k)
#
# g_pred(X) = arg max (delta(x))
# lets find decision boundary between classes
# w0 =log(pi_k/pi_l)- (1/2)*(mu_k-mu_l)*cov_matrix*(mu_k+mu_l).T
#w = cov_matrix^-1*(mu_k-mu_l)
# 1st between class1 and class2
w0 = np.log10(pi_1/pi_2)- (1/2)*(F1_mean - F2_mean).dot(cov_matrix).dot(F1_mean + F2_mean)
w1 = (np.linalg.inv(cov_matrix)).dot(F1_mean - F2_mean)
print("For linear saperable b/w class 1 and 2 :\nw0 : ", w0,"\nw1 :" , w1)
x1 = x1.reshape(n, 1)
x2 = x2.reshape(n, 1)
Y = Y.reshape(n, 1)
# %matplotlib notebook
X_plot = np.hstack((x1, x2))
X_plot_min = np.min(X_plot)
X_plot_max = np.max(X_plot)
Y_saparable = w0 + w1[0]*F1 + w1[1]*F2
min_Y_saparable = np.min(Y_saparable)
max_Y_saparable = np.max(Y_saparable)
plt.scatter(x1, Y, color = 'r',label = "Class-1")
plt.scatter(x2, Y, color = 'g',label = "Class-2")
plt.plot([X_plot_min, X_plot_max], [min_Y_saparable, max_Y_saparable], color = 'black')
plt.title('LDA-Linear Class separable plot')
plt.legend()
plt.show()
# +
# calculating number of data points after prediction for class 1 and 2
Y_saparable = w0 + w1[0]*x1_norm + w1[1]*x2_norm
count_1 = 0
count_2 = 0
for i in range(X.shape[0]):
if(Y_saparable[i]>0):
count_1 += 1
else:
count_2 += 1
print("data points in class 1: ", count_1, "\ndata points in class 2: ", count_2)
confusion_matrix = np.array([49, 0, 1,50]).reshape(2, 2)
print("-----------------The confusion matrix for data set-------------------")
print(confusion_matrix)
# -
population = X.shape[0]
true_positive = 49
true_neg = 50
accurancy = ((true_positive+true_neg)/population)*100
print("Accuracy :", accurancy)
precision = true_positive/(true_neg+0)
print("precision : ", precision)
|
Lab-Assignment-11th/Lab-Assignment-11th_with_resposeVariable_numeric.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="data/photutils_banner.svg" width=500 alt="Photutils logo">
# + [markdown] slideshow={"slide_type": "slide"}
# # Photutils
#
# - Code: https://github.com/astropy/photutils
# - Documentation: http://photutils.readthedocs.org/en/stable/
# - Issue Tracker: https://github.com/astropy/photutils/issues
#
# ## Photutils capabilities:
#
# - Background and background noise estimation
# - Source Detection and Extraction
# - DAOFIND and IRAF's starfind
# - Image segmentation
# - local peak finder
# - Aperture photometry
# - PSF photometry
# - PSF matching
# - Centroids
# - Morphological properties
# - Elliptical isophote analysis
#
#
# ## In this additional notebook, we will review:
#
# - Aperture photometry (repeat of photutils overview material)
# - Using image segmentation to generate elliptical apertures of extended sources
# -
# ## Preliminaries
# + slideshow={"slide_type": "slide"}
# initial imports
import numpy as np
import matplotlib.pyplot as plt
# change some default plotting parameters
import matplotlib as mpl
mpl.rcParams['image.origin'] = 'lower'
mpl.rcParams['image.interpolation'] = 'nearest'
mpl.rcParams['image.cmap'] = 'viridis'
# Run the %matplotlib magic command to enable inline plotting
# in the current notebook. Choose one of these:
# %matplotlib inline
# # %matplotlib notebook
# -
# ### Load the data
# + [markdown] slideshow={"slide_type": "-"}
# We'll start by reading data and error arrays from FITS files. These are cutouts from the HST Extreme-Deep Field (XDF) taken with WFC3/IR in the F160W filter.
# + slideshow={"slide_type": "-"}
from astropy.io import fits
sci_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_sci.fits'
rms_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_rms.fits'
sci_hdulist = fits.open(sci_fn)
rms_hdulist = fits.open(rms_fn)
sci_hdulist[0].header['BUNIT'] = 'electron/s'
# -
# Print some info about the data.
sci_hdulist.info()
# Define the data and error arrays.
# + slideshow={"slide_type": "fragment"}
data = sci_hdulist[0].data.astype(np.float)
error = rms_hdulist[0].data.astype(np.float)
# -
# Extract the data header and create a WCS object.
# + slideshow={"slide_type": "fragment"}
from astropy.wcs import WCS
hdr = sci_hdulist[0].header
wcs = WCS(hdr)
# -
# Display the data.
from astropy.visualization import ImageNormalize, LogStretch
# + slideshow={"slide_type": "fragment"}
norm = ImageNormalize(vmin=1e-4, vmax=5e-2, stretch=LogStretch())
plt.imshow(data, norm=norm)
plt.imshow(data, norm=norm)
plt.title('XDF F160W Cutout')
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Part 1: Aperture Photometry
# + [markdown] slideshow={"slide_type": "slide"}
# ## Performing aperture photometry at multiple positions
# + slideshow={"slide_type": "-"}
import astropy.units as u
from photutils.utils import calc_total_error
from photutils import CircularAperture, aperture_photometry
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
radius = 5.
apertures = CircularAperture(positions, r=radius)
eff_gain = hdr['TEXPTIME']
tot_error = calc_total_error(data, error, eff_gain)
unit = u.electron / u.s
phot = aperture_photometry(data, apertures, error=tot_error, unit=unit)
phot
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bad pixel masking
# +
# create a bad pixel
data2 = data.copy()
y, x = 59, 91
data2[y, x] = 100.
aperture_photometry(data2, apertures, error=tot_error)
# -
# Note the large `aperture_sum` in the first source due to the bad pixel. Now mask the bad pixel so that it does not contribute to the photometry.
# + slideshow={"slide_type": "fragment"}
mask = np.zeros_like(data2, dtype=bool)
mask[y, x] = True
aperture_photometry(data2, apertures, error=tot_error, mask=mask)
# -
# ## Performing aperture photometry at multiple positions using multiple apertures
# First define three different aperture shapes (different radii), but with the same positions.
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
radii = [5., 7.5, 9., 11.]
apertures = [CircularAperture(positions, r=r) for r in radii]
phot = aperture_photometry(data, apertures, error=tot_error, unit=unit)
phot
# The output table above now contains columns for the `aperture_sum` and `aperture_sum_err` for each aperture. The column names are appended with `_N`, where N is running index of the apertures in the input `apertures` list, i.e. the first aperture is `_0`, the second is `_1`, etc.
# We can add columns to the table indicating the aperture radii.
phot['aperture_radius_0'] = np.ones(len(phot)) * radii[0] * u.pix
phot['aperture_radius_1'] = np.ones(len(phot)) * radii[1] * u.pix
phot['aperture_radius_2'] = np.ones(len(phot)) * radii[2] * u.pix
phot['aperture_radius_3'] = np.ones(len(phot)) * radii[3] * u.pix
phot
# or put them in the table metadata.
for i in range(len(radii)):
phot.meta['aperture_{}'.format(i)] = 'Circular aperture with r={} pix'.format(radii[i])
phot.meta
# + [markdown] slideshow={"slide_type": "slide"}
# ## Aperture photometry using Sky apertures
# -
# First, let's define the sky coordinates by converting our pixel coordinates.
# +
from astropy.wcs.utils import pixel_to_skycoord
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
x, y = np.transpose(positions)
coord = pixel_to_skycoord(x, y, wcs)
coord
# -
# Now define a circular aperture in sky coordinates.
#
# For sky apertures in angular units, the aperture radius must be a `Quantity`, in either pixel or angular units.
# + slideshow={"slide_type": "fragment"}
from photutils import SkyCircularAperture
radius = 5. * u.pix
sky_apers = SkyCircularAperture(coord, r=radius)
sky_apers.r
# -
radius = 0.5 * u.arcsec
sky_apers = SkyCircularAperture(coord, r=radius)
sky_apers.r
# When using a sky aperture, `aperture_photometry` needs the WCS transformation.
# + slideshow={"slide_type": "fragment"}
# via the wcs keyword
phot = aperture_photometry(data, sky_apers, wcs=wcs)
phot
# + slideshow={"slide_type": "fragment"}
# or via a FITS hdu (i.e. header and data)
phot = aperture_photometry(sci_hdulist[0], sky_apers)
phot
# + [markdown] slideshow={"slide_type": "slide"}
# ## Encircled flux
# -
# Here we want to perform aperture photometry at a single position with *many* apertures.
#
# Instead of generating a big table, we'll simply loop over the apertures and extract the fluxes from individual tables.
radii = np.linspace(0.1, 20, 100) # 100 apertures
flux = []
for r in radii:
ap = CircularAperture(positions[1], r=r) # single position
phot = aperture_photometry(data, ap)
flux.append(phot['aperture_sum'][0])
# + slideshow={"slide_type": "fragment"}
plt.plot(radii, flux, '+-')
plt.title('Encircled Flux')
plt.xlabel('Radius (pixels)')
plt.ylabel('Aperture Sum ($e^{-1}/s$)')
# -
# ## More about apertures: Advanced usage
# ### Aperture masks
positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)]
radius = 5.
apertures = CircularAperture(positions, r=radius)
# Create a list of `ApertureMask` objects using the `to_mask()` method.
masks = apertures.to_mask(method='exact')
# Let's plot the first one.
mask = masks[0] # the first one
plt.imshow(mask)
plt.colorbar()
# The above image is a cutout of the aperture mask.
#
# We can create an image with the aperture mask at its position.
img = mask.to_image(shape=((200, 200)))
plt.imshow(img)
plt.colorbar()
# We can also create a cutout from a data image over the mask domain.
data_cutout = mask.cutout(data)
plt.imshow(data_cutout)
plt.colorbar()
# We can also create a mask-weighted cutout from the data. Here the circular aperture mask has been applied to the data.
data_cutout_aper = mask.multiply(data)
plt.imshow(data_cutout_aper)
plt.colorbar()
# + [markdown] slideshow={"slide_type": "slide"}
# ---
# # Part 2: Image Segmentation (extended version)
# -
# Image segmentation is the process where sources are identified and labeled in an image.
#
# The sources are detected by using a S/N threshold level and defining the minimum number of pixels required within a source.
#
# First, let's define a threshold image at 2$\sigma$ (per pixel) above the background.
bkg = 0. # background level in this image
nsigma = 2.
threshold = bkg + (nsigma * error) # this should be background-only error
# Now let's detect "8-connected" sources of minimum size 5 pixels where each pixel is 2$\sigma$ above the background.
#
# "8-connected" pixels touch along their edges or corners. "4-connected" pixels touch along their edges. For reference, SExtractor uses "8-connected" pixels.
#
# The result is a segmentation image (`SegmentationImage` object). The segmentation image is the isophotal footprint of each source above the threshold.
# + slideshow={"slide_type": "fragment"}
from photutils import detect_sources
npixels = 5
segm = detect_sources(data, threshold, npixels)
print('Found {0} sources'.format(segm.nlabels))
# -
# Display the segmentation image.
# +
from photutils.utils import random_cmap
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
ax1.imshow(data, norm=norm)
lbl1 = ax1.set_title('Data')
ax2.imshow(segm, cmap=segm.cmap())
lbl2 = ax2.set_title('Segmentation Image')
# + [markdown] slideshow={"slide_type": "slide"}
# It is better to filter (smooth) the data prior to source detection.
#
# Let's use a 5x5 Gaussian kernel with a FWHM of 2 pixels.
# +
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2 pixels
kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
kernel.normalize()
ssegm = detect_sources(data, threshold, npixels, filter_kernel=kernel)
# -
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
ax1.imshow(segm, cmap=segm.cmap())
lbl1 = ax1.set_title('Original Data')
ax2.imshow(ssegm, cmap=ssegm.cmap())
lbl2 = ax2.set_title('Smoothed Data')
# ### Source deblending
# + [markdown] slideshow={"slide_type": "fragment"}
# Note above that some of our detected sources were blended. We can deblend them using the `deblend_sources()` function, which uses a combination of multi-thresholding and watershed segmentation.
# +
from photutils import deblend_sources
segm2 = deblend_sources(data, ssegm, npixels, filter_kernel=kernel,
contrast=0.001, nlevels=32)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8))
ax1.imshow(data, norm=norm)
ax1.set_title('Data')
ax2.imshow(ssegm, cmap=ssegm.cmap())
ax2.set_title('Original Segmentation Image')
ax3.imshow(segm2, cmap=segm2.cmap())
ax3.set_title('Deblended Segmentation Image')
print('Found {0} sources'.format(segm2.max))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Measure the photometry and morphological properties of detected sources
# -
from photutils import source_properties
catalog = source_properties(data, segm2, error=error, wcs=wcs)
# `catalog` is a `SourceCatalog` object. It behaves like a list of `SourceProperties` objects, one for each source.
catalog
catalog[0] # the first source
catalog[0].xcentroid # the xcentroid of the first source
# Please go [here](http://photutils.readthedocs.org/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties) to see the complete list of available source properties.
# We can create a Table of isophotal photometry and morphological properties using the ``to_table()`` method of `SourceCatalog`:
tbl = catalog.to_table()
tbl
# A subset of source can be specified, defined by the their labels in the segmentation image.
# + slideshow={"slide_type": "fragment"}
labels = [1, 5, 7, 12]
cat2 = source_properties(data, segm, error=error, wcs=wcs, labels=labels)
tbl2 = cat2.to_table()
tbl2
# -
# A subset of property columns can also be specified.
# + slideshow={"slide_type": "fragment"}
columns = ['id', 'xcentroid', 'ycentroid', 'source_sum', 'area']
tbl3 = catalog.to_table(columns=columns)
tbl3
# -
# or a subset of sources with a subset of properties:
tbl4 = cat2.to_table(columns=columns)
tbl4
# Additional properties (not stored in the table) can be accessed directly via the `SourceCatalog` object.
# get a single object (id=12)
obj = catalog[11]
obj.id
obj
# Let's plot the cutouts of the data and error images for this source.
fig, ax = plt.subplots(figsize=(12, 8), ncols=3)
ax[0].imshow(obj.make_cutout(segm2.data))
ax[0].set_title('Source id={} Segment'.format(obj.id))
ax[1].imshow(obj.data_cutout_ma)
ax[1].set_title('Source id={} Data'.format(obj.id))
ax[2].imshow(obj.error_cutout_ma)
ax[2].set_title('Source id={} Error'.format(obj.id))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Define the approximate isophotal ellipses for each object
# -
# Create elliptical apertures for each object using the measured morphological parameters.
# + slideshow={"slide_type": "-"}
from photutils import EllipticalAperture
r = 3. # approximate isophotal extent
apertures = []
for obj in catalog:
position = (obj.xcentroid.value, obj.ycentroid.value)
a = obj.semimajor_axis_sigma.value * r
b = obj.semiminor_axis_sigma.value * r
theta = obj.orientation.value
apertures.append(EllipticalAperture(position, a, b, theta=theta))
# -
# Now plot the elliptical apertures on the data and the segmentation image.
# + slideshow={"slide_type": "fragment"}
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 8))
ax1.imshow(data, norm=norm)
ax1.set_title('Data')
ax2.imshow(segm2, cmap=segm2.cmap())
ax2.set_title('Segmentation Image')
for aperture in apertures:
aperture.plot(color='white', lw=1.5, alpha=0.5, ax=ax1)
aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2)
# + [markdown] slideshow={"slide_type": "slide"}
# _Note that the segmentation image can be reused on other registered data (e.g. multiple filters) to generate a multiband catalog. One does not need to regenerate it each time -- simply apply it to other bands and/or data._
# + [markdown] slideshow={"slide_type": "slide"}
# The segmentation image can also be modified before measuring source photometry/properties, e.g.:
#
# - remove source segments (artifacts, diffraction spikes, etc.)
# - combine segments
# - mask regions of a segmentation image (e.g. near image borders)
#
# See [modifying segmentation images](https://photutils.readthedocs.io/en/stable/segmentation.html#modifying-a-segmentation-image) for further information.
# + [markdown] slideshow={"slide_type": "slide"}
# If desired, a `SExtractor` segmentation image can even be input to Photutils `source_properties()`.
#
# To generate a `SExtractor` segmentation image, set the following in the SExtractor config:
# ```
# CHECKIMAGE_TYPE SEGMENTATION
# CHECKIMAGE_NAME segmentation.fits
# ```
# -
# Once `segmentation.fits` exists, one could do the following:
#
# `>>> from photutils import SegmentationImage`
#
# `>>> se_segm_data = fits.getdata('segmentation_fits')`
#
# `>>> se_segm = SegmentationImage(se_segm_data)`
#
# `>>> se_cat = source_properties(data, se_segm, error=error, wcs=wcs, labels=labels)`
#
# Note that `data` and `se_segm_data` must have the same shape and be registered.
|
aas_233_workshop/09-Photutils/photutils_extended.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:geo_env]
# language: python
# name: conda-env-geo_env-py
# ---
# #### 1. Importing libraries, datasets, cleaning and preparing the datasets to be merged
# importing libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
import geopandas as gpd
import geoplot
# +
# This command prompts matplotlib visuals to appear in the notebook.
# %matplotlib inline
# -
path = r'C:\Users\
import shapefile as shp
import matplotlib.pyplot as plt
# Import ".json" file for the berlin distict shape outline using the Geopandas `read_csv` command.
berlin = gpd.read_file(os.path.join(path, '2 - AirBnB - Data', 'Original Data', 'berlin_districts.geojson'))
# #### using a geojson file as not suitable shape file has been found for the city of Berlin (data source: https://opendata-esri-de.opendata.arcgis.com/)
berlin
# import the Airbnb data set
df_airbnb = pd.read_csv(os.path.join(path, '2 - AirBnB - Data', 'Prepared Data', 'df_airbnb_v2_rev.csv'))
df_airbnb.head()
# Dropping unnamed column
df_airbnb1 = df_airbnb.drop(columns = ['Unnamed: 0','Unnamed: 0.1'])
df_airbnb1
berlin1 = berlin[['Gemeinde_n', 'Gemeinde_s','geometry']]
# creating subset with needed information
berlin1
berlin1.rename(columns = {'Gemeinde_n': 'area'}, inplace = True)
berlin1
# #### 2. Data wrangling and merging
# #### in the previous notebook I removed some outliers from the 'price' column, now before moving in further analysis, similar steps will be applied to the 'reviews_per_month' column
df_airbnb1.describe()
df_airbnb1.shape
# ##### 'reviews_per_month' higher than 45 are quite impossible, so deleting all the values higher than 30
# +
# Clean extreme values.
df_airbnb2 = df_airbnb1[df_airbnb1['reviews_per_month'] < 30]
# -
df_airbnb2.shape
# +
# Merge both dataframes on the "area" column.
# NB: You need to merge the recipes dataframe INTO the GeoDataFrame; otherwise, the output will be a pandas dataframe rather
# than a Geopandas dataframe.
berlin_m = berlin1.merge(df_airbnb2, on = 'area', how = 'outer', indicator = True)
# -
berlin_m
type(berlin_m)
berlin_m.shape
berlin_m['_merge'].value_counts()
# +
# Check for missing values.
berlin_m.isnull().sum()
# +
# Impute missing values with the median.
berlin_m['price'].fillna(berlin_m['price'].median(), inplace=True)
berlin_m['min_nights'].fillna(berlin_m['min_nights'].median(), inplace=True)
berlin_m['nr_reviews'].fillna(berlin_m['nr_reviews'].median(), inplace=True)
berlin_m['reviews_per_month'].fillna(berlin_m['reviews_per_month'].median(), inplace=True)
berlin_m['calculated_host_listings_count'].fillna(berlin_m['calculated_host_listings_count'].median(), inplace=True)
berlin_m['availability_365'].fillna(berlin_m['availability_365'].median(), inplace=True)
# -
berlin_m.isnull().sum()
# Remove the 5 observations that don't have a room_type or price_category
berlin_clean = berlin_m[berlin_m['room_type'].isnull() == False]
berlin_clean.isnull().sum()
dups = berlin_clean.duplicated()
dups.shape
berlin_clean.dtypes
berlin_clean1 = berlin_clean[berlin_clean['Gemeinde_s'].isnull() == False]
berlin_clean1
berlin_clean1['_merge'].value_counts(dropna = False)
# #### 3. Visualization
# +
# Check the rating variable.
sns.distplot(berlin_clean['price'], bins=20)
# -
berlin_clean.dtypes
berlin_clean.drop(columns ="_merge", inplace = True)
# It's necessary to drop the categorical column; otherwise, the `to_file` function breaks.
out = r'C:\Users\laune\Desktop\6\<NAME>ara - AirBnB Analysis - 2021-07\4 - AirBnB - Analysis\prices.shp'
berlin_clean.to_file(out)
# ### Plotting a choropleth
# ##### using choropleth
# # #### ! by using berlin_clean1 I do not get any Berlin map
gplt = geoplot.choropleth(
berlin_clean1, hue = 'price', cmap='Blues', figsize=(15, 15)
)
# #### ! 4 areas are missing!
#
# ##### using matplotlib
# Create a subplot with `fig` and `ax`.
fig, ax = plt.subplots(1, figsize=(15, 15))
plt.title('best airbnb prices Berlin', size=30)
tl = berlin_clean1.plot(column='price',
cmap='Blues', # Color map for the states
linewidth=0.6, # Line width for state borders
ax=ax, # Plotting the map on `ax`
edgecolor='black'); # Area border colors
# +
c_bar_colors = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=berlin_clean1['price'].min(), vmax=berlin_clean['price'].max()))
# Plot the colorbar.
color_bar = fig.colorbar(c_bar_colors)
# -
tl.figure
|
3 - Airbnb - Scripts/3. Airbnb Geographic Visualization_V3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import io
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
import pandas as pd
import pickle
import PIL
import torch
import torchvision
import urllib
import warnings
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath('__file__')), '..'))
from attacks import eot_attack_evaluator
from attacks import eot_attacks
from attacks import utils
from utils import labels_util
# Disable annoying UserWarning caused by using nn.Upsample
# in the relighting model.
warnings.filterwarnings("ignore", category=UserWarning)
# %load_ext autoreload
# %autoreload 2
# -
# # I. PubFig10 + FaceNet + [multiilum | DPR] evaluation
# ## I.a. Pretrained classifier
# +
pubfig_configs = [
{
'dataset': 'pubfig10',
'dataset_mode': 'test',
'classif_model_name': 'pubfig_facenet',
'classif_mode': 'normal_pretrained',
'relight_model_name': 'multi_illumination_murmann',
'relight_checkpoint_path': '../relighters/multi_illumination/'\
'checkpoints/relight/epoch_13.pth',
'relighter_eps': 1e-4,
'learning_rate': 0.02,
'num_iterations': 5,
'gamma': 1.4,
'epses': [0.1],
'attack_type': 'class_constrained_eot',
'targets': [1, 3], # eg: [0, 8] Target: 'Aaron Eeckhart' for everyone besides himself, otherwise '<NAME>'
'debugging': False,
},
{
'dataset': 'pubfig10',
'dataset_mode': 'test',
'classif_model_name': 'pubfig_facenet',
'classif_mode': 'normal_pretrained',
'relight_model_name': 'dpr',
'relight_checkpoint_path': '../relighters/DPR/trained_model/trained_model_03.t7',
'learning_rate': 0.02,
'num_iterations': 5,
'epses': [0.1],
'attack_type': 'class_constrained_eot',
'targets': [1, 3],
'debugging': False,
},
]
# -
for config in pubfig_configs:
ev = eot_attack_evaluator.AttackEvaluator(config, '_results_last.csv')
ev.evaluate()
# ## I. b. Adversarially trained classifier
# +
adv_pubfig_config = {
'dataset': 'pubfig10',
'dataset_mode': 'test',
'classif_model_name': 'pubfig_facenet',
# Difference! This time we evaluate a classifier that is adversarially trained on the spot.
'classif_mode': 'adversarial_train',
'relight_model_name': 'dpr',
'relight_checkpoint_path': '../relighters/DPR/trained_model/trained_model_03.t7',
'learning_rate': 0.02,
'num_iterations': 5,
'epses': [0.1],
'attack_type': 'class_constrained_eot',
'targets': [1, 3],
'debugging': False,
}
ev = eot_attack_evaluator.AttackEvaluator(adv_pubfig_config, '_results_adv.csv')
ev.evaluate()
# -
# # II. ResNet + Indoor Scenes + multiilum (TBD)
indoor_configs = [
{
'dataset': 'indoor_scenes',
'dataset_mode': 'test',
'classif_model_name': 'resnet_indoor',
'classif_mode': 'normal_pretrained',
'relight_model_name': 'multi_illumination_murmann',
'relight_checkpoint_path': '../relighters/multi_illumination/'\
'checkpoints/relight/epoch_13.pth',
# Relighter-specific constant.
'relighter_eps': 1e-4,
# Learning rate for attack gradient descent.
'learning_rate': 0.05,
# Number of gradient descent iterations in the attack.
'num_iterations': 5,
# Gamma correction constant for the multi_illumination relighter.
'gamma': 1.3,
# Radius of ball of inf-norm of allowed perturbations.
'epses': [0.001, 0.005, 0.01],
# Constrain the perturbed image to be in the same class as
# the original image.
'attack_type': 'class_constrained_eot',
# Target label is 9 (warehouse) for all non-warehouse images, otherwise 0 (airport)
'targets': [9, 0],
'debugging': False,
},
]
|
experiments/eot_evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ehvmN7Yq2_j2" colab_type="text"
# # **Control Flow:**
# Welcome to this lesson on Control Flow! Control flow is the sequence in which your code is run helping you in decision making. Here, we'll learn about several tools in Python we can use to affect our code's control flow:
#
# * Conditional Statements
# * Boolean Expressions
# * For and While Loops
# * Break and Continue
# * Zip and Enumerate
# * List Comprehensions
# + [markdown] id="Ih5lvgbH7_bF" colab_type="text"
# ## **Conditional If Statements:**
#
# Sometimes we need to run a line of code only if a particular condition is true.
# For example suppose that you have subscribed to a sharebike rental service.
#
# Lets look at its pay as you use billing system. The app contains a credit balance which then the user can use to rent the bikes.
#
# The customer can then setup a link to their bank account so that if their phone credit balance goes below a particular threshold amountm, in this case Rs.100, then Rs.500 in credit is then added from their bank account.
#
# this can be represented as in below:
#
# ```
# if bikeapp_balance < 100:
# bikeapp_balance += 500
# bank_balance -= 500
# ```
# + id="ynX7uFME4aq_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c97cb77e-0bc3-49f5-b70a-ed697396b587"
bikeapp_balance = 99
bank_balance = 1000
print(bikeapp_balance, bank_balance)
if(bikeapp_balance<100):
bikeapp_balance += 500
bank_balance -= 500
print(bikeapp_balance, bank_balance)
# + [markdown] id="DP_isfUUtHw3" colab_type="text"
# - An *if* statement starts with the *if* keyword, followed by the condition to be checked, in this case *bikeapp_balance* < 100, and then a colon. The condition is specified in a boolean expression that evaluates to either *True* or *False*.
#
# - After this line is an indented block of code to be executed if that condition is true. Here, the lines that increment *bikeapp_balance* and decrement *bank_balance* only execute if it is true that *bikeapp_balance* is less than 100. If not, the code in this if block is simply skipped.
# + [markdown] id="8SoDevOntowm" colab_type="text"
# > You have learned about Python's comparison operators (e.g. == and !=) and how they are different from assignment operators (e.g. =). In conditional statements, you want to use comparison operators. For example, you'd want to use if x == 5 rather than if x = 5. If your conditional statement is causing a syntax error or doing something unexpected, check whether you have written == or =!
# + [markdown] id="N7kP8ymfuZs0" colab_type="text"
# ## **Conditional If Else Statements:**
#
# Else helps us to execute a diffrent block of code when a particular condition is not true or even for multiple other conditions diffrent from the *if()* condition.
#
# Lets see it in action using an example:
# + id="S-CVWAwWtAvB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a96af217-0f30-49cc-a626-11c2bfeee982"
n=4
if (n%2 == 0):
print("Number " + str(n) + " is even.")
else:
print("Number " + str(n) + " is odd.")
# + [markdown] id="X6edLDaNzBuz" colab_type="text"
# > if you have more than 2 possible cases then you can also use *elif()*. Lets look at an example:
#
# + id="sMucuIQdv8YW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d518a2a3-46b3-4f5a-e992-3542c2d55de9"
season = 'fall'
if season == 'spring':
print('plant the garden!')
elif season == 'summer':
print('water the garden!')
elif season == 'fall':
print('harvest the garden!')
elif season == 'winter':
print('stay indoors!')
else:
print('unrecognized season')
# + [markdown] id="vgGEoCCIzesU" colab_type="text"
# ### **Summarize If, Elif, Else:**
#
# 1. **if:** An *if* statement must always start with an *if* clause, which contains the first condition that is checked. If this evaluates to *True*, Python runs the code indented in this *if* block and then skips to the rest of the code after the *if* statement.
#
# 2. **elif:** *elif* is short for "else if." An *elif* clause is used to check for an additional condition if the conditions in the previous clauses in the *if* statement evaluate to *False*. As you can see in the example, you can have multiple *elif* blocks to handle different situations.
#
# 3. **else:** Last is the *else* clause, which must come at the end of an *if* statement if used. This clause doesn't require a condition. The code in an else block is run if all conditions above that in the *if* statement evaluate to *False*.
# + [markdown] id="Z1pisLR01GNq" colab_type="text"
# ## **Ouestion**:
#
# Write an if statement that lets a student know which of these grades they got based on the number they got in exams, which is stored in the integer variable *marks*.
#
# Marks -> Grade
#
# 100-90 -> A+
#
# 90-80 -> A
#
# 80-70 -> B
#
# 70-60 -> C
#
# 60-50 -> D
#
# 50-40 -> E
#
# <40 -> F
#
# > All of the lower and upper bounds here are inclusive, and marks can only take on positive integer values up to 100.
#
# In your if statement, assign the result variable to a string holding the appropriate message based on the value of marks.
#
# "you have scored *[Grade]* Grade."
# + id="2wdJN8S3zVMh" colab_type="code" colab={}
marks = 60 # use this input to make your submission
# write your if statement here
print(result)
# + [markdown] id="ugIlhL7m3zXD" colab_type="text"
# ## **Question:**
#
# Depending on where an individual is from we need to tax them appropriately. The states of CA, MN, and NY have taxes of 7.5%, 9.5%, and 8.9% respectively. Use this information to take the amount of a purchase and the corresponding state to assure that they are taxed by the right amount.
# + id="1mGkeGw536hm" colab_type="code" colab={}
state = #Either CA, MN, or NY
purchase_amount = #amount of purchase
if #provide conditional for checking state is CA
tax_amount = .075
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
elif #provide conditional for checking state is MN
tax_amount = .095
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
elif #provide conditional for checking state is NY
tax_amount = .089
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
print(result)
# + [markdown] id="x6l-cZP84fJq" colab_type="text"
# ## **Complex Boolean Expressions:**
#
# *If* statements sometimes use more complicated boolean expressions for their conditions. They may contain multiple comparisons operators, logical operators, and even calculations. Examples:
#
# ```
# if 18.5 <= (weight / height**2) < 25:
# print("BMI is considered 'normal'")
#
# if is_raining and is_sunny:
# print("Is there a rainbow?")
#
# if (not unsubscribed) and (location == "USA" or location == "IND"):
# print("send email")
# ```
#
# For really complicated conditions you might need to combine some *ands*, *ors* and *nots* together. Use parentheses if you need to make the combinations clear.
#
# However simple or complex, the condition in an *if* statement must be a boolean expression that evaluates to either *True* or *False* and it is this value that decides whether the indented block in an *if* statement executes or not.
# + [markdown] id="x0jp6rHk6tAd" colab_type="text"
# ## **Good and Bad Examples:**
#
# Here are some things to keep in mind while writing boolean expressions for your *if* statements.
#
# ### **1. Don't use True or False as conditions:**
# While "*True*" is a valid boolean expression, it's not useful as a condition since it always evaluates to *True*, so the indented code will always get run. Similarly, *if False* is not a condition you should use either - the statement following this *if* statement would never be executed.
#
# ```
# # Bad example
# if True:
# print("This indented code will always get run.")
# ```
#
# Similarly, it's useless to use any condition that you know will always evaluate to *True*, like this example above. A boolean variable can only be *True* or *False*, so either *is_cold* or *not is_cold* is always *True*, and the indented code will always be run.
#
# ```
# # Another bad example
# if is_cold or not is_cold:
# print("This indented code will always get run.")
# ```
# + [markdown] id="jLPGtgTs9Eh2" colab_type="text"
# ### **2. Be careful writing expressions that use logical operators:**
#
# Logical operators *and*, *or* and *not* have specific meanings that aren't quite the same as their meanings in plain English. Make sure your boolean expressions are being evaluated the way you expect them to.
# ```
# # Bad example
# if weather == "snow" or "rain":
# print("Wear boots!")
# ```
# This code is valid in Python, but it is not a boolean expression, although it reads like one. The reason is that the expression to the right of the or operator, "rain", is not a boolean expression - it's a string! Later we'll discuss what happens when you use non-boolean-type objects in place of booleans.
# + [markdown] id="GvEplz5N9Zdb" colab_type="text"
# ### **3. Don't compare a boolean variable with == True or == False:**
# This comparison isn’t necessary, since the boolean variable itself is a boolean expression.
# ```
# # Bad example
# if is_cold == True:
# print("The weather is cold!")
# ```
# This is a valid condition, but we can make the code more readable by using the variable itself as the condition instead, as below.
# ```
# # Good example
# if is_cold:
# print("The weather is cold!")
# ```
# > If you want to check whether a boolean is False, you can use the not operator.
# + [markdown] id="f-e4vBjswgri" colab_type="text"
# ## **Truth Value Testing:**
#
# If we use a non-boolean object as a condition in an *if* statement in place of the boolean expression, Like in the 2nd example above, Python will check for its truth value and use that to decide whether or not to run the indented code. By default, the truth value of an object in Python is considered True unless specified as False in the documentation.
#
# Here are most of the built-in objects that are considered False in Python:
#
# - constants defined to be false: *None* and *False*
# - zero of any numeric type: *0, 0.0, 0j, Decimal(0), Fraction(0, 1)*
# - empty sequences and collections: *'' , "", (), [], {}, set(), range(0)*
# + id="UXoUXQw85N0_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="979c593f-d14a-482f-c7cc-2195ad7324ab"
errors = 3
if errors:
print("You have {} errors to fix!".format(errors))
else:
print("No errors to fix!")
# + [markdown] id="KUrZxBi8xMNQ" colab_type="text"
# > In this code, errors has the truth value True because it's a non-zero number, so the error message is printed. This is a nice, succinct way of writing an if statement.
# + [markdown] id="CialzRUUycYa" colab_type="text"
# ### **Ouestion:**
# Imagine an air traffic control program that tracks three variables, *altitude*, *speed*, and *propulsion* which for a particular airplane have the values specified below.
# ```
# altitude = 10000
# speed = 250
# propulsion = "Propeller"
# ```
# For each of the following boolean expressions, work out whether it evaluates to True or False and match the correct value.
#
# - altitude < 1000 and speed > 100
# - (propulsion == "Jet" or propulsion == "Turboprop") and speed < 300 and altitude > 20000
# - not (speed > 400 and propulsion == "Propeller")
# - (altitude > 500 and speed > 100) or not propulsion == "Propeller"
# + id="PnyTpu2VxGRo" colab_type="code" colab={}
altitude = 10000
speed = 250
propulsion = "Propeller"
# write your code here
# + [markdown] id="qYBnISVTzrcg" colab_type="text"
# ## **For Loops:**
#
# Python has two kinds of loops - for loops and while loops. A for loop is used to "iterate", or do something repeatedly, over an iterable.
#
# An iterable is an object that can return one of its elements at a time. This can include sequence types, such as strings, lists, and tuples, as well as non-sequence types, such as dictionaries and files.
#
# Example:
# + id="Uklx-pvdG90s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="34869d0c-e095-48e7-827e-3c77eada7cc2"
cities = ['new york city', 'mountain view', 'chicago', 'los angeles']
for city in cities:
print(city)
print("Done!")
# + [markdown] id="quErSmRaHEol" colab_type="text"
# ### **Components of a for Loop:**
# 1. The first line of the loop starts with the *for* keyword, which signals that this is a *for* loop
# 2. Following that is *city in cities*, indicating *city* is the iteration variable, and *cities* is the iterable being looped over. In the first iteration of the loop, *city* gets the value of the first element in *cities*, which is “new york city”.
# 3. The *for* loop heading line always ends with a colon *:*
# 4. Following the *for* loop heading is an indented block of code, the body of the loop, to be executed in each iteration of this loop. There is only one line in the body of this loop - *print(city)*.
# 5. After the body of the loop has executed, we don't move on to the next line yet; we go back to the for heading line, where the iteration variable takes the value of the next element of the iterable. In the second iteration of the loop above, *city* takes the value of the next element in *cities*, which is "mountain view".
# 6. This process repeats until the loop has iterated through all the elements of the iterable. Then, we move on to the line that follows the body of the loop - in this case, *print("Done!")*. We can tell what the next line after the body of the loop is because it is unindented. Here is another reason why paying attention to your indentation is very important in Python!
# + [markdown] id="LkPeOdqhHt-S" colab_type="text"
# > You can name iteration variables however you like. A common pattern is to give the iteration variable and iterable the same names, except the singular and plural versions respectively (e.g., 'city' and 'cities').
# + [markdown] id="EzjgTy3oJo9U" colab_type="text"
# ## **Using the *range()* Function with for Loops:**
# *range()* is a built-in function used to create an iterable sequence of numbers. You will frequently use *range()* with a for loop to repeat an action a certain number of times, as in this example:
#
#
# + id="ZufzUKnLG_49" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="20eff6c2-c891-4538-f26f-cfab5835800b"
for i in range(3):
print("Hello!")
# + [markdown] id="bAycHJdLM2fZ" colab_type="text"
# ## **range(start=0, stop, step=1)**
# The range() function takes three integer arguments, the first and third of which are optional:
#
# - The 'start' argument is the first number of the sequence. If unspecified, 'start' defaults to 0.
# - The 'stop' argument is 1 more than the last number of the sequence. This argument must be specified.
# - The 'step' argument is the difference between each number in the sequence. If unspecified, 'step' defaults to 1.
#
#
# Notes on using range():
#
# - If you specify one integer inside the parentheses withrange(), it's used as the value for 'stop,' and the defaults are used for the other two. Example-
# + id="bCyOEET0LKEV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="82bfe7c1-03ec-4463-8659-a7a85e1f9caa"
for i in range(4):
print(i)
# + [markdown] id="seR1RsifQoiu" colab_type="text"
# - If you specify two integers inside the parentheses withrange(), they're used for 'start' and 'stop,' and the default is used for 'step.' Example-
# + id="MNzooFSsQVMb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="42aca6cc-acea-4bcb-80c9-6439337cad4c"
for i in range(2, 6):
print(i)
# + [markdown] id="kEKyJFNZQtxM" colab_type="text"
#
# - Or you can specify all three integers for 'start', 'stop', and 'step.' Example-
# + id="l2Aztu8AQcue" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="19fc3eab-52ce-4c59-916a-de56b387cf9b"
for i in range(1, 10, 2):
print(i)
# + [markdown] id="8zOw8WP4RC7B" colab_type="text"
# ## **Creating and Modifying Lists:**
# In addition to extracting information from lists, as we did in the first example above, you can also create and modify lists with *for* loops. You can **create** a list by appending to a new list at each iteration of the for loop like this:
# + id="p2zsBeOwQidy" colab_type="code" colab={}
# Creating a new list
cities = ['new york city', 'mountain view', 'chicago', 'los angeles']
capitalized_cities = []
for city in cities:
capitalized_cities.append(city.title())
# + id="M0m3k0-HRwAT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="806c0da9-3767-472f-e8f2-165aa3097c3d"
for cap_city in capitalized_cities:
print(cap_city)
# + [markdown] id="dw6_HNA_Sfc2" colab_type="text"
# **Modifying** a list is a bit more involved, and requires the use of the *range()* function.
#
# We can use the *range()* function to generate the indices for each value in the cities list. This lets us access the elements of the list with *cities[index]* so that we can modify the values in the cities list in place.
# + id="aVjmwdt_R54p" colab_type="code" colab={}
cities = ['new york city', 'mountain view', 'chicago', 'los angeles']
for index in range(len(cities)):
cities[index] = cities[index].title()
# + id="Kwh9PYVsSxIJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="309bbc2f-d27a-4536-90bc-b4e9d5de2158"
for city in cities:
print(city)
# + [markdown] id="Cux22zs5VB60" colab_type="text"
# ### **Question:**
# Write a for loop below that will print out every whole number that is a multiple of 5 and less than or equal to 30.
# + id="tsrVr6kKTMuD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="24f47265-5bce-489f-c601-73407b7971f8"
# Write a for loop using range() to print out multiples of 5 up to 30 inclusive
for i in range(5, 35, 5):
print(i)
# + [markdown] id="grakoICBGtDU" colab_type="text"
# ### **Question:**
# Write a for loop that iterates over the names list to create a usernames list. To create a username for each name, make everything lowercase and replace spaces with underscores. Running your for loop over the list:
# ```
# names = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
# ```
# should create the list:
#
# ```
# usernames = ["joey_tribbiani", "monica_geller", "chandler_bing", "phoebe_buffay"]
# ```
#
# > HINT: Use the *.replace()* method to replace the spaces with underscores. Check out how to use this method in this [Stack Overflow answer](https://stackoverflow.com/questions/12723751/replacing-instances-of-a-character-in-a-string/12723785#12723785).
# + id="zkGdiF3AVNKf" colab_type="code" colab={}
names = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
usernames = []
# write your for loop here
print(usernames)
# + [markdown] id="8gQCnzE_HUBx" colab_type="text"
# ### **Question:**
# Write a for loop that iterates over a list of strings, tokens, and counts how many of them are XML tags.
#
# > XML is a data language similar to HTML. You can tell if a string is an XML tag if it begins with a left angle bracket "<" and ends with a right angle bracket ">".
#
# Keep track of the number of tags using the variable count.
#
# You can assume that the list of strings will not contain empty strings.
# + id="ukZhXoydHnkw" colab_type="code" colab={}
tokens = ['<greeting>', 'Hello World!', '</greeting>']
count = 0
# write your for loop here
print(count)
# + [markdown] id="hts0b-6HHsap" colab_type="text"
# ### **Question:**
# Write some code, including a for loop, that iterates over a list of strings and creates a single string, html_str, which is an HTML list. For example, if the list is items = ['first string', 'second string'], printing html_str should output:
# ```
# <ul>
# <li>first string</li>
# <li>second string</li>
# </ul>
# ```
# That is, the string's first line should be the opening tag ```<ul>```. Following that is one line per element in the source list, surrounded by ```<li>``` and ```</li>``` tags. The final line of the string should be the closing tag ```</ul>```.
# + id="AOKRbOG1H_JP" colab_type="code" colab={}
items = ['first string', 'second string']
html_str = "<ul>\n" # "\ n" is the character that marks the end of the line, it does
# the characters that are after it in html_str are on the next line
# write your code here
print(html_str)
# + [markdown] id="y46BP9WUIOMg" colab_type="text"
# ## **Iterating Through Dictionaries with For Loops:**
# When you iterate through a dictionary using a for loop, doing it the normal way (for n in some_dict) will only give you access to the keys in the dictionary - which is what you'd want in some situations.
#
# In other cases, you'd want to iterate through both the keys and values in the dictionary. Let's see how this is done in an example. Consider this dictionary that uses names of actors as keys and their characters as values.
#
# + id="QYBoBC2fIYtt" colab_type="code" colab={}
cast = {
"<NAME>": "<NAME>",
"<NAME>": "<NAME>",
"<NAME>": "<NAME>",
"<NAME>": "<NAME>"
}
# + [markdown] id="4d97pJSCJGDt" colab_type="text"
# Iterating through it in the usual way with a for loop would give you just the keys, as shown below:
# + id="qGashuUvJHdu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="be7fdeb5-cd7d-47da-f235-bfe648efa81a"
for key in cast:
print(key)
# + [markdown] id="Y6NPJ50UJO0I" colab_type="text"
# If you wish to iterate through both keys and values, you can use the built-in method items like this:
#
#
# + id="ovRnghC3JNwQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="6283877e-7f74-4263-cd6f-4cf27c12f257"
for key, value in cast.items():
print("Partner 1: {} Partner 2: {}".format(key, value))
# + [markdown] id="8T7sbVuHJbRx" colab_type="text"
# > items is an awesome method that returns tuples of key, value pairs, which you can use to iterate over dictionaries in for loops.
#
#
# + [markdown] id="bMwu5B1RJrof" colab_type="text"
# ### **Ouestion:**
# You would like to count the number of fruits in your basket. In order to do this, you have the following dictionary and list of fruits. Use the dictionary and list to count the total number of fruits, but you do not want to count the other items in your basket.
# + id="P54P9OkiJW2v" colab_type="code" colab={}
result = 0
basket_items = {'apples': 4, 'oranges': 19, 'kites': 3, 'sandwiches': 8}
fruits = ['apples', 'oranges', 'pears', 'peaches', 'grapes', 'bananas']
#Iterate through the dictionary
#if the key is in the list of fruits, add the value (number of fruits) to result
print(result)
# + [markdown] id="L77tOmpwKFc9" colab_type="text"
# ## **While Loops:**
# For loops are an example of definite iteration, meaning that the loops body is run a predefined no of times. A for loop over a list executes the body for each element in the list, a for loop using the range function will execute a no of times specified by the range function.
# This deffer from indefinite iteration ie. when a loop repeats an unknown no of times, and ends when some condition is met.
# While loop is an example of indefinite iteration. Here is an example:
#
# + id="It81d8ZoLSni" colab_type="code" colab={}
card_deck = [4, 11, 8, 5, 13, 2, 8, 10]
hand = []
# adds the last element of the card_deck list to the hand list
# until the values in hand add up to 17 or more
while sum(hand) < 17:
hand.append(card_deck.pop())
# + id="9KkqkudbLU6g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="185d97e2-3fbc-4ea5-89ca-3fb4ca63be56"
for card in hand:
print(card)
# + [markdown] id="JwnTD36ZLqMs" colab_type="text"
# This example features two new functions. sum returns the sum of the elements in a list, and pop is a list method that removes the last element from a list and returns it.
# + [markdown] id="4jDhRwT2Lr-l" colab_type="text"
# ### **Components of a While Loop:**
# 1. The first line starts with the while keyword, indicating this is a while loop.
# 2. Following that is a condition to be checked. In this example, that's sum(hand) <= 17.
# 3. The while loop heading always ends with a colon :.
# 4. Indented after this heading is the body of the while loop. If the condition for the while loop is true, the code lines in the loop's body will be executed.
# 5. We then go back to the while heading line, and the condition is evaluated again. This process of checking the condition and then executing the loop repeats until the condition becomes false.
# 6. When the condition becomes false, we move on to the line following the body of the loop, which will be unindented.
#
# The indented body of the loop should modify at least one variable in the test condition. If the value of the test condition never changes, the result is an infinite loop!
# + [markdown] id="r_a96I8tMgJV" colab_type="text"
# ### **Question:**
# Find the factorial of a number using a while loop.
#
# A factorial of a whole number is that number multiplied by every whole number between itself and 1. For example, 6 factorial (written "6!") equals 6 x 5 x 4 x 3 x 2 x 1 = 720. So 6! = 720.
#
# We can write a while loop to take any given number and figure out what its factorial is.
#
# Example: If number is 6, your code should compute and print the product, 720.
# + id="r5ngmqZ0LjKu" colab_type="code" colab={}
# number to find the factorial of
number = 6
# start with our product equal to one
product = 1
# track the current number being multiplied
current = 1
# write your while loop here
# multiply the product so far by the current number
# increment current with each iteration until it reaches number
# print the factorial of number
print(product)
# + [markdown] id="0ovry-I1Mw8u" colab_type="text"
# ### **Question:**
# Now use a for loop to find the factorial!
#
# It will now be great practice for you to try to revise the code you wrote above to find the factorial of a number, but this time, using a for loop. Try it in the code editor below!
# + id="Su8VaSb5M1pH" colab_type="code" colab={}
# number to find the factorial of
number = 6
# start with our product equal to one
product = 1
# write your for loop here
# print the factorial of number
print(product)
# + [markdown] id="mMEMlamnODYu" colab_type="text"
# ## **Question:**
# Suppose you want to count from some number start_num by another number count_by until you hit a final number end_num. Use break_num as the variable that you'll change each time through the loop. For simplicity, assume that end_num is always larger than start_num and count_by is always positive.
# <br>
# <br>
#
# Before the loop, what do you want to set break_num equal to? How do you want to change break_num each time through the loop? What condition will you use to see when it's time to stop looping?
# <br>
# <br>
# After the loop is done, print out break_num, showing the value that indicated it was time to stop looping. It is the case that break_num should be a number that is the first number larger than end_num.
# <br>
# <br>
# + id="SIB04lNqOKUy" colab_type="code" colab={}
start_num = 5
end_num = 30
count_by = 3
# write a while loop that uses break_num as the ongoing number to
# check against end_num
print(break_num)
# + [markdown] id="Xn6OcbHqOsG2" colab_type="text"
# ## **Break and Continue:**
# For Loop iterate over every element in a sequence, while loops iterate over every element untill stopping condition is met.
#
# This is sufficient for most purposes, but sometimes we need more precise control over when we need to end a loop.
#
# In these cases we use the *break* keyword.
#
# + id="YgKrCyZmRzee" colab_type="code" colab={}
manifest = [("bananas", 15), ("mattresses", 24), ("dog kennels", 42),
("machine", 120), ("cheeses", 5)]
weight = 0
items = []
# + id="Sj-BgklORzHd" colab_type="code" colab={}
manifest = [("bananas", 15), ("mattresses", 24), ("dog kennels", 42),
("machine", 120), ("cheeses", 5)]
weight = 0
items = []
# + [markdown] id="JD4oemELR1jK" colab_type="text"
# This brings us to our next statement sometimes there will be conditions where instead of breaking the loop we will like to skip one iteration of the loop. In this case we will use the continue keyword.
#
# > break and continue keywords, which can be used in both for and while loops.
# - break terminates a loop
# - continue skips one iteration of a loop
# + id="2ibOEVxLSwjo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="67c9a24a-f049-4d26-c792-b224bd9b3492"
fruits= ["orange", "strawberry", "apple"]
foods = ["apple", "apple", "burger", "toast"]
fruit_count = 0
for food in foods:
if food not in fruits:
print("Not a fruit")
continue
fruit_count += 1
print("Found a fruit!!")
print("Total fruits: ", fruit_count)
# + id="ERamEo0rQcbX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 510} outputId="fd21e714-a889-4bc8-d3dc-9f7e35700b83"
manifest = [("bananas", 15), ("mattresses", 24), ("dog kennels", 42),
("machine", 120), ("cheeses", 5)]
# skips an iteration when adding an item would exceed the limit
# breaks the loop if weight is exactly the value of the limit
weight = 0
items = []
# + [markdown] id="otmYN5IOTwgU" colab_type="text"
# ### **Question:**
#
# Write a loop with a break statement to create a string, news_ticker, that is exactly 140 characters long. You should create the news ticker by adding headlines from the headlines list, inserting a space in between each headline. If necessary, truncate the last headline in the middle so that news_ticker is exactly 140 characters long.
#
# > Remember that break works in both for and while loops. Use whichever loop seems most appropriate. Consider adding print statements to your code to help you resolve bugs.
# + id="9e-iNtDjQkX4" colab_type="code" colab={}
headlines = ["Local Bear Eaten by Man",
"Legislature Announces New Laws",
"Peasant Discovers Violence Inherent in System",
"Cat Rescues Fireman Stuck in Tree",
"Brave Knight Runs Away",
"Papperbok Review: Totally Triffic"]
news_ticker = ""
# write your loop here
print(news_ticker)
# + [markdown] id="baBSZNdeUIkk" colab_type="text"
# ## **Zip and Enumerate:**
# zip and enumerate are useful built-in functions that can come in handy when dealing with loops.
#
# ## **Zip:**
# zip returns an iterator that combines multiple iterables into one sequence of tuples. Each tuple contains the elements in that position from all the iterables. For example, printing
#
# ```
# manifest = [("bananas", 15), ("mattresses", 24), ("dog kennels", 42),
# ("machine", 120), ("cheeses", 5)]
# ```
# + id="CvI2ARw7UZ3Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8cd91920-df3f-4a3b-f86b-195e13eef8fa"
items = ['bananas', 'mattresses', 'dog kennels', 'machine', 'cheeses']
weights = [15, 24, 42, 120, 5]
print(list(zip(items, weights)))
# + id="PLhgh95XWHCQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="8a55051e-d8ca-4cfd-9e5e-251e34fce4ee"
for item, weight in zip(items, weights):
print("{}: {}".format(item, weight))
# + [markdown] id="4uGD7MD7VI-L" colab_type="text"
# In addition to zipping two lists together, you can also unzip a list into tuples using an asterisk.
# + id="dHg_F7rTU-Eo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="213e156b-5add-4606-baaa-bb3b7af91bb8"
manifest = [("bananas", 15), ("mattresses", 24), ("dog kennels", 42),
("machine", 120), ("cheeses", 5)]
items, weights = zip(*manifest)
print(items)
print(weights)
# + [markdown] id="YleWcdd3VizQ" colab_type="text"
# This would create the same letters and nums tuples we saw earlier.
# + [markdown] id="JaA0PEZHWVFh" colab_type="text"
# ## **Enumerate:**
# enumerate is a built in function that returns an iterator of tuples containing indices and values of a list. You'll often use this when you want the index along with each element of an iterable in a loop.
# + id="NHUepFAuVibt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="0fb0c71a-1381-4647-a856-1698a2ac441c"
items = ['bananas', 'mattresses', 'dog kennels', 'machine', 'cheeses']
for i, item in zip(range(len(items)), items):
print(i, item)
# + id="4DJATkVnVYSO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="7e396dbe-5de7-485d-a593-b722feca0ea7"
items = ['bananas', 'mattresses', 'dog kennels', 'machine', 'cheeses']
for i, item in enumerate(items):
print(i, item)
# + [markdown] id="92PJy7_rXGYV" colab_type="text"
# ### **Question:**
#
# Use zip to write a for loop that creates a string specifying the label and coordinates of each point and appends it to the list points. Each string should be formatted as ``` label: x, y, z ``` . For example, the string for the first coordinate should be ```F: 23, 677, 4```.
# + id="DI28k3bcXAS1" colab_type="code" colab={}
x_coord = [23, 53, 2, -12, 95, 103, 14, -5]
y_coord = [677, 233, 405, 433, 905, 376, 432, 445]
z_coord = [4, 16, -6, -42, 3, -6, 23, -1]
labels = ["F", "J", "A", "Q", "Y", "B", "W", "X"]
points = []
# write your for loop here
for point in points:
print(point)
# + [markdown] id="tVYaOI-PYXIi" colab_type="text"
# ## **Question:**
# Use zip to create a dictionary cast that uses names as keys and heights as values.
#
#
# + id="X8VB9r6lYjgn" colab_type="code" colab={}
cast_names = ["<NAME>", "<NAME>", "<NAME>",
,"<NAME>", "<NAME>"]
cast_heights = [172, 168, 172, 166, 170]
cast = # replace with your code
print(cast)
# + [markdown] id="4nXbwD4bY0qn" colab_type="text"
# ## **Question:**
# Use zip to transpose data from a 4-by-3 matrix to a 3-by-4 matrix.
# + id="TNOuyi3eY6jh" colab_type="code" colab={}
data = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11))
data_transpose = # replace with your code
print(data_transpose)
# + [markdown] id="k3LD-7ReY_hc" colab_type="text"
# ## **Question:**
# Use enumerate to modify the cast list so that each element contains the name followed by the character's corresponding income in $ 1000. For example, the first element of cast should change from "<NAME>" to "<NAME> 150".
# + id="Yz5KAaVxZO4y" colab_type="code" colab={}
cast = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
heights = [150, 100, 40, 0, 80]
# write your for loop here
print(cast)
# + [markdown] id="QDgp2aaFZalY" colab_type="text"
# ## **List Comprehensions:**
# In Python you can create lists very quickly and consisely with a cool tool called as list comprehension. For exampls from earlier:
# + id="ih37N8jGa_Nu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="92b2ca7e-c6a5-44a1-f81b-0b0724ad1cfb"
cities = ['new york city', 'mountain view',
'chicago', 'los angeles']
capitalized_cities = []
for city in cities:
capitalized_cities.append(city.title())
print(capitalized_cities)
# + [markdown] id="i7ek39QWbbTl" colab_type="text"
# With the help of list comprehension this can be reduced to:
#
# + id="6dNrPANObBU-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="742a3c44-76a5-4551-cc9f-b8ac864dcff3"
capitalized_cities = [city.title() for city in cities]
print(capitalized_cities)
# + [markdown] id="PFYkA6q-bpFJ" colab_type="text"
# List comprehensions allow us to create a list using a for loop in one step.
#
# You create a list comprehension with brackets [], including an expression to evaluate for each element in an iterable. This list comprehension above calls city.title() for each element city in cities, to create each element in the new list, capitalized_cities.
# + [markdown] id="GHwo95c_cQTq" colab_type="text"
# ## **Conditionals in List Comprehensions:**
# You can also add conditionals to list comprehensions (listcomps).
#
# Lets look at an example:
# + id="3dBmqynjbhMw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3daeccb2-91fb-4104-f82a-930804ca7614"
squares = []
for x in range(9):
squares.append(x**2)
print(squares)
# + [markdown] id="moGS7Ruacp5K" colab_type="text"
# writing the same as a list comprehension will be like:
# + id="4mKNK5MtclQK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="feac3c8e-50bb-49a6-fbe9-1675a146f713"
squares = [x**2 for x in range(9)]
print(squares)
# + [markdown] id="lYwMMvDIdX54" colab_type="text"
# Now if we want only those numbers that are multiple of 2:
# + id="Ly-zmKbadb8h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="26fc5436-6e24-4f28-9408-84f19c5f6524"
squares = [x**2 for x in range(9) if x % 2 == 0]
print(squares)
# + [markdown] id="GYfWe8kydlxg" colab_type="text"
# The code above sets squares equal to the list [0, 4, 16, 36, 64], as x to the power of 2 is only evaluated if x is even. If you want to add an else, you will get a syntax error doing this.
# + id="SCKNfhaEdeFf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 130} outputId="9e1b0744-2c02-4bd7-8ddb-38d50b6bf5b5"
squares = [x**2 for x in range(9) if x % 2 == 0 else x + 3]
# + [markdown] id="p_a9CKS_duRo" colab_type="text"
# If you would like to add else, you have to move the conditionals to the beginning of the listcomp, right after the expression, like this.
# + id="srvbtvFVdrG4" colab_type="code" colab={}
squares = [x**2 if x % 2 == 0 else x + 3 for x in range(9)]
# + [markdown] id="8MfbXBqHdzld" colab_type="text"
# > List comprehensions are not found in other languages, but are very common in Python.
# + [markdown] id="Jc3k9Rbbd5kM" colab_type="text"
# ### **Question:**
# Use a list comprehension to create a new list first_names containing just the first names in names in lowercase.
# + id="OL7SIKyFdxPP" colab_type="code" colab={}
names = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
first_names = # write your list comprehension here
print(first_names)
# + [markdown] id="qQ84Hi0peE4U" colab_type="text"
# ### **Question:**
# Use a list comprehension to create a list multiples_3 containing the first 20 multiples of 3.
#
#
# + id="VS0_4QnteNt7" colab_type="code" colab={}
multiples_3 = # write your list comprehension here
print(multiples_3)
# + [markdown] id="3euEzM19eQbe" colab_type="text"
# ### **Question:**
# Use a list comprehension to create a list of names passed that only include those that scored at least 65.
# + id="GP9dY8jLeWJD" colab_type="code" colab={}
scores = {
"Deku": 70,
"Bakugo": 35,
"Tenya": 82,
"Shoto": 23,
"All Might": 98
}
passed = # write your list comprehension here
print(passed)
|
Day_2_Control_Flow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Shape Detection
# ### Shape Detection
# ### Installation of pip and imutils
conda install pip
pip install imutils
# ### Importing the necessary libraries
#Import the necessary libraries
import cv2
import imutils
# ### Defining the ShapeDetector Class
#Define the ShapeDetector class
class ShapeDetector:
#Constructor
def __init__(self):
#Since there is nothing to initialise, pass
pass
#Function to detect the shape
#Takes contours as the second argument
def detect(self , c):
#Basically, the shape is unidentified
shape = "unidentified"
#Find the perimeter of the shape
peri = cv2.arcLength(c , True)
#Approximate the contours to get the number of vertices
approx = cv2.approxPolyDP(c , 0.04 * peri , True)
#Start detecting the shape based on the number of vertices
#If the number of vertices is 3, its a triagle
if len(approx) == 3:
shape = "Triangle"
#If the number number of vertices is 4, the shape can either be a square or a rectangle
elif len(approx) == 4:
#Draw an approximate rectangle around the contours
(x , y , w , h) = cv2.boundingRect(approx)
#Calculate the ratio of width to height
av = w / float(h)
#If the ratio is approximately equal to 1, then the shape is a square, else a rectangle
shape = "Square" if av >= 0.95 and av <= 1.05 else "Rectangle"
#If the number of vertices is 5, its a pentagon
elif len(approx) == 5:
shape = "Pentagon"
#If the shape is none of the above, then its a circle
else:
shape = "Circle"
#Return the shape of the object
return shape
# ### Loading and processing the image to make it easier to detect shapes, finding the contours
#Load the image to a variable
image = cv2.imread('shapes_and_colors.jpg')
#Resize the image to get better approximation
resized = imutils.resize(image , width=300)
#Obtain factor by which the image is resized
ratio = image.shape[0] / float(resized.shape[0])
#Convert the resized image into Grayscale image
gray = cv2.cvtColor(resized , cv2.COLOR_BGR2GRAY)
#Apply Gaussian blur to the Grayscaled image
blurred = cv2.GaussianBlur(gray , (5, 5) , 0)
#Threshold the blurred image
thresh = cv2.threshold(blurred , 60 , 255 , cv2.THRESH_BINARY)[1]
#Find the contours of the copy of the threshold image
cnts = cv2.findContours(thresh.copy() , cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#Create a ShapeDetector object
sd = ShapeDetector()
# ### Iterating over the contours to detect shapes and displaying the final output
#Iterate over every point in contours
for c in cnts:
#Find the moments of the contour
M = cv2.moments(c)
#Obtain the x and y co-ordinates of the centre of the contour
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
#Detect the shape using the contour
shape = sd.detect(c)
#Convert c to float
c = c.astype("float")
#Multiply the contour by the ratio to get the co-ordinates of the original image
c *= ratio
#Convert c to int
c = c.astype("int")
#Draw the contour
cv2.drawContours(image , [c] , -1 , (0 , 255 , 0) , 2)
#Put the name of the shape as text
cv2.putText(image , shape , (cX , cY) , cv2.FONT_HERSHEY_SIMPLEX , 0.5 , (255 , 255 , 255) , 2)
# Display the output image that has both the contours and the name of the shape
cv2.imshow("Image", image)
cv2.waitKey(0)
|
Object Detection and Tracking/Sathyashree/Sathyashree_OpenCV_Shape_Detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import tree
from typing import Dict, Tuple, Set, List
from tqdm import tqdm
import matplotlib.pyplot as plt
from random import randint
from collections import Counter
# +
class Dataset(object):
def __init__(self, clazzes_num, features_num, X, Y):
self.clazzes_num = clazzes_num
self.features_num = features_num
self.X = X
self.Y = Y
def read_file(filename: str):
X: List[List[int]] = []
Y: List[int] = []
file = open(filename, "r")
features_num, clazzes_num = [int(x) for x in file.readline().split()]
num_objects = int(file.readline())
for i in range(num_objects):
features = [int(x) for x in file.readline().split()]
clazz = features.pop()
X.append(features)
Y.append(clazz)
return Dataset(clazzes_num, features_num, X, Y)
# -
FILES_NUM = 21
FOLDER = "DT_txt"
FILE_TRAIN_SUFFIX = "_train.txt"
FILE_TEST_SUFFIX = "_test.txt"
# +
train_datasets: List[Dataset] = []
test_datasets: List[Dataset] = []
for i in range(1, FILES_NUM + 1):
file_index = f'0{i}' if i < 10 else f'{i}'
train_file = FOLDER + "/" + file_index + FILE_TRAIN_SUFFIX
test_file = FOLDER + "/" + file_index + FILE_TEST_SUFFIX
train_datasets.append(read_file(train_file))
test_datasets.append(read_file(test_file))
print(len(train_datasets))
print(len(test_datasets))
# -
heights = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
criterions = ["gini", "entropy"]
splitters = ["best", "random"]
def count_for_datasets(train_dataset: Dataset, test_dataset: Dataset):
best_accuracy = 0
best_height = -1
best_criterion = ""
best_splitter = ""
for height in heights:
for criterion in criterions:
for splitter in splitters:
clf = tree.DecisionTreeClassifier(criterion=criterion,splitter=splitter,max_depth=height)
clf.fit(train_dataset.X, train_dataset.Y)
accuracy = 0
for i in range(len(test_dataset.X)):
pred = clf.predict([test_dataset.X[i]])
if pred == test_dataset.Y[i]:
accuracy += 1
accuracy /= len(test_dataset.Y)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_height = height
best_criterion = criterion
best_splitter = splitter
return best_height, best_criterion, best_splitter, best_accuracy
for i in range(FILES_NUM):
best_height, best_criterion, best_splitter, best_accuracy = count_for_datasets(train_datasets[i], test_datasets[i])
print(f'Dataset #{i + 1}: accuracy={best_accuracy}, height={best_height}, criterion={best_criterion}, splitter={best_splitter}')
# +
# max height=11 for dataset #21
# min height=1 for dataset #3
min_num = "03"
min_index = 2
min_height = 1
criterion_for_min = "gini"
splitter_for_min = "best"
max_num = "21"
max_index = 20
max_height = 11
criterion_for_max = "entropy"
splitter_for_max = "best"
# -
# min
train_accuracies_min = []
test_accuracies_min = []
for height in heights:
clf = tree.DecisionTreeClassifier(criterion=criterion_for_min,splitter=splitter_for_min,max_depth=height)
clf.fit(train_datasets[min_index].X, train_datasets[min_index].Y)
####### train accuracy
train_accuracy = 0
for i in range(len(train_datasets[min_index].X)):
pred = clf.predict([train_datasets[min_index].X[i]])
if pred == train_datasets[min_index].Y[i]:
train_accuracy += 1
train_accuracy /= len(train_datasets[min_index].Y)
train_accuracies_min.append(train_accuracy)
######################
####### test accuracy
test_accuracy = 0
for i in range(len(test_datasets[min_index].X)):
pred = clf.predict([test_datasets[min_index].X[i]])
if pred == test_datasets[min_index].Y[i]:
test_accuracy += 1
test_accuracy /= len(test_datasets[min_index].Y)
test_accuracies_min.append(test_accuracy)
# max
train_accuracies_max = []
test_accuracies_max = []
for height in heights:
clf = tree.DecisionTreeClassifier(criterion=criterion_for_max,splitter=splitter_for_max,max_depth=height)
clf.fit(train_datasets[max_index].X, train_datasets[max_index].Y)
####### train accuracy
train_accuracy = 0
for i in range(len(train_datasets[max_index].X)):
pred = clf.predict([train_datasets[max_index].X[i]])
if pred == train_datasets[max_index].Y[i]:
train_accuracy += 1
train_accuracy /= len(train_datasets[max_index].Y)
train_accuracies_max.append(train_accuracy)
######################
####### test accuracy
test_accuracy = 0
for i in range(len(test_datasets[max_index].X)):
pred = clf.predict([test_datasets[max_index].X[i]])
if pred == test_datasets[max_index].Y[i]:
test_accuracy += 1
test_accuracy /= len(test_datasets[max_index].Y)
test_accuracies_max.append(test_accuracy)
def show_plot_heights_accuracies(x, y, title):
plt.title(title)
plt.plot(x, y)
plt.ylabel('Accuracy')
plt.xlabel('Height')
plt.show()
show_plot_heights_accuracies(heights, train_accuracies_min, "Minimum Optimal Height (train)")
show_plot_heights_accuracies(heights, test_accuracies_min, "Minimum Optimal Height (test)")
show_plot_heights_accuracies(heights, train_accuracies_max, "Maximum Optimal Height (train)")
show_plot_heights_accuracies(heights, test_accuracies_max, "Maximum Optimal Height (test)")
def generate_random_sample(dataset: Dataset):
size = len(dataset.X)
random_X: List[List[int]] = []
random_Y: List[int] = []
for i in range(size):
index = randint(0, size - 1)
random_X.append(dataset.X[i])
random_Y.append(dataset.Y[i])
return random_X, random_Y
# +
# random forest
def random_forest_for_dataset(train_dataset, trees_num=101):
trees = []
for i in range(trees_num):
clf = tree.DecisionTreeClassifier()
X, y = generate_random_sample(train_dataset)
clf.fit(X, y)
trees.append(clf)
return trees
def forest_predict(trees, x):
predictions = []
for tree in trees:
predictions.append(tree.predict([x])[0])
res, _unused = Counter(predictions).most_common(1)[0]
# print(predictions)
return res
# -
def handle_dataset_forest(train_dataset: Dataset, test_dataset: Dataset):
forest = random_forest_for_dataset(train_dataset)
train_accuracy = 0
test_accuracy = 0
for i in range(len(train_dataset.X)):
pred = forest_predict(forest, train_dataset.X[i])
if pred == train_dataset.Y[i]:
train_accuracy += 1
train_accuracy /= len(train_dataset.X)
for i in range(len(test_dataset.X)):
pred = forest_predict(forest, test_dataset.X[i])
if pred == test_dataset.Y[i]:
test_accuracy += 1
test_accuracy /= len(test_dataset.X)
return train_accuracy, test_accuracy
for i in range(FILES_NUM):
train_accuracy, test_accuracy = handle_dataset_forest(train_datasets[i], test_datasets[i])
print(f'Dataset #{i + 1}: train accuracy={train_accuracy}, test accuracy={test_accuracy}')
|
labs/dt/decision_tree.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dev
# language: python
# name: dev
# ---
# Dependencies
import numpy as np
import pandas as pd
from scipy import stats
# Read data
housing_data = pd.read_csv("../Resources/housing_data.csv", header=None)
housing_data = housing_data.sample(frac=1).reset_index(drop=True)
# Create two samples
s1 = housing_data.iloc[0:19, 13]
s2 = housing_data.iloc[20:40, 13]
# Run T test
(t_stat, p) = stats.ttest_ind(s1, s2, equal_var=False)
print("t-statistics is {}.".format(t_stat))
print("p-value is {}.".format(p))
|
05-Matplotlib/3/Activities/07-Ins_Students_t_test/Solved/ttest.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watershed Distance Transform for 3D Data
# ---
# Implementation of papers:
#
# [Deep Watershed Transform for Instance Segmentation](http://openaccess.thecvf.com/content_cvpr_2017/papers/Bai_Deep_Watershed_Transform_CVPR_2017_paper.pdf)
#
# [Learn to segment single cells with deep distance estimator and deep cell detector](https://arxiv.org/abs/1803.10829)
# +
import os
import errno
import datetime
import numpy as np
import deepcell
# -
# ### Load the Training Data
# +
# Download the data (saves to ~/.keras/datasets)
filename = 'mousebrain.npz'
(X_train, y_train), (X_test, y_test) = deepcell.datasets.mousebrain.load_data(filename)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# -
# ### Set up filepath constants
# +
# the path to the data file is currently required for `train_model_()` functions
# change DATA_DIR if you are not using `deepcell.datasets`
DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets'))
# DATA_FILE should be a npz file, preferably from `make_training_data`
DATA_FILE = os.path.join(DATA_DIR, filename)
# confirm the data file is available
assert os.path.isfile(DATA_FILE)
# +
# Set up other required filepaths
# If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR
PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR)
ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume
MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX))
LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX))
# create directories if they do not exist
for d in (MODEL_DIR, LOG_DIR):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# -
# ### Set up training parameters
# +
from tensorflow.keras.optimizers import SGD
from deepcell.utils.train_utils import rate_scheduler
fgbg_model_name = 'conv_fgbg_3d_model'
conv_model_name = 'conv_watershed_3d_model'
n_epoch = 10 # Number of training epochs
test_size = .10 # % of data saved as test
norm_method = 'whole_image' # data normalization - `whole_image` for 3d conv
receptive_field = 61 # should be adjusted for the scale of the data
optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
lr_sched = rate_scheduler(lr=0.01, decay=0.99)
# FC training settings
n_skips = 3 # number of skip-connections (only for FC training)
batch_size = 1 # FC training uses 1 image per batch
# Transformation settings
transform = 'watershed'
distance_bins = 4 # number of distance classes
erosion_width = 0 # erode edges
# 3D Settings
frames_per_batch = 3
# -
# ### First, create a foreground/background separation model
#
# #### Instantiate the fgbg model
# +
from deepcell import model_zoo
fgbg_model = model_zoo.bn_feature_net_skip_3D(
receptive_field=receptive_field,
n_features=2, # segmentation mask (is_cell, is_not_cell)
n_frames=frames_per_batch,
n_skips=n_skips,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])),
multires=False,
last_only=False,
norm_method='whole_image')
# -
# #### Train the fgbg model
# +
from deepcell.training import train_model_conv
fgbg_model = train_model_conv(
model=fgbg_model,
dataset=DATA_FILE, # full path to npz file
model_name=fgbg_model_name,
transform='fgbg',
optimizer=optimizer,
batch_size=batch_size,
frames_per_batch=frames_per_batch,
n_epoch=n_epoch,
model_dir=MODEL_DIR,
lr_sched=rate_scheduler(lr=0.01, decay=0.95),
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
# -
# ### Next, Create a model for the watershed energy transform
#
# #### Instantiate the distance transform model
# +
from deepcell import model_zoo
watershed_model = model_zoo.bn_feature_net_skip_3D(
fgbg_model=fgbg_model,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_frames=frames_per_batch,
n_conv_filters=32,
n_dense_filters=128,
multires=False,
last_only=False,
input_shape=tuple([frames_per_batch] + list(X_train.shape[2:])),
norm_method='whole_image')
# -
# #### Train the model
# +
from deepcell.training import train_model_conv
watershed_model = train_model_conv(
model=watershed_model,
dataset=DATA_FILE, # full path to npz file
model_name=conv_model_name,
transform=transform,
distance_bins=distance_bins,
erosion_width=erosion_width,
optimizer=optimizer,
batch_size=batch_size,
n_epoch=n_epoch,
frames_per_batch=frames_per_batch,
model_dir=MODEL_DIR,
lr_sched=lr_sched,
rotation_range=180,
flip=True,
shear=False,
zoom_range=(0.8, 1.2))
# -
# ### Run the model
#
# The model was trained on only a `frames_per_batch` frames at a time. In order to run this data on a full set of frames, a new model must be instantiated, which will load the trained weights.
#
# #### Save weights of trained models
# +
fgbg_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name))
fgbg_model.save_weights(fgbg_weights_file)
watershed_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(conv_model_name))
watershed_model.save_weights(watershed_weights_file)
# -
# #### Initialize the new models
# +
from deepcell import model_zoo
# All training parameters should match except for the `input_shape`
run_fgbg_model = model_zoo.bn_feature_net_skip_3D(
receptive_field=receptive_field,
n_features=2,
n_frames=frames_per_batch,
n_skips=n_skips,
n_conv_filters=32,
n_dense_filters=128,
input_shape=tuple(X_test.shape[1:]),
multires=False,
last_only=False,
norm_method=norm_method)
run_fgbg_model.load_weights(fgbg_weights_file)
run_watershed_model = model_zoo.bn_feature_net_skip_3D(
fgbg_model=run_fgbg_model,
receptive_field=receptive_field,
n_skips=n_skips,
n_features=distance_bins,
n_frames=frames_per_batch,
n_conv_filters=32,
n_dense_filters=128,
multires=False,
last_only=False,
input_shape=tuple(X_test.shape[1:]),
norm_method=norm_method)
run_watershed_model.load_weights(watershed_weights_file)
# -
# too many batches at once causes OOM
X_test, y_test = X_test[:4], y_test[:4]
print(X_test.shape)
# #### Make predictions on test data
# +
test_images = run_watershed_model.predict(X_test)[-1]
test_images_fgbg = run_fgbg_model.predict(X_test)[-1]
print('watershed transform shape:', test_images.shape)
print('segmentation mask shape:', test_images_fgbg.shape)
# -
# #### Watershed post-processing
# +
argmax_images = []
for i in range(test_images.shape[0]):
max_image = np.argmax(test_images[i], axis=-1)
argmax_images.append(max_image)
argmax_images = np.array(argmax_images)
argmax_images = np.expand_dims(argmax_images, axis=-1)
print('watershed argmax shape:', argmax_images.shape)
# +
# threshold the foreground/background
# and remove back ground from watershed transform
threshold = 0.5
fg_thresh = test_images_fgbg[..., 1] > threshold
fg_thresh = np.expand_dims(fg_thresh, axis=-1)
argmax_images_post_fgbg = argmax_images * fg_thresh
# +
# Apply watershed method with the distance transform as seed
from skimage.measure import label
from skimage.morphology import watershed
from skimage.feature import peak_local_max
watershed_images = []
for i in range(argmax_images_post_fgbg.shape[0]):
image = fg_thresh[i, ..., 0]
distance = argmax_images_post_fgbg[i, ..., 0]
local_maxi = peak_local_max(
test_images[i, ..., -1],
min_distance=10,
threshold_abs=0.05,
indices=False,
labels=image,
exclude_border=False)
markers = label(local_maxi)
segments = watershed(-distance, markers, mask=image)
watershed_images.append(segments)
watershed_images = np.array(watershed_images)
watershed_images = np.expand_dims(watershed_images, axis=-1)
# -
# ### Plot the results
# +
import matplotlib.pyplot as plt
import matplotlib.animation as animation
index = np.random.randint(low=0, high=watershed_images.shape[0])
frame = np.random.randint(low=0, high=watershed_images.shape[1])
print('Image:', index)
print('Frame:', frame)
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(15, 15), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(X_test[index, frame, ..., 0])
ax[0].set_title('Source Image')
ax[1].imshow(test_images_fgbg[index, frame, ..., 1])
ax[1].set_title('FGBG Prediction')
ax[2].imshow(fg_thresh[index, frame, ..., 0], cmap='jet')
ax[2].set_title('FGBG {}% Threshold'.format(int(threshold * 100)))
ax[3].imshow(argmax_images[index, frame, ..., 0], cmap='jet')
ax[3].set_title('Distance Transform')
ax[4].imshow(argmax_images_post_fgbg[index, frame, ..., 0], cmap='jet')
ax[4].set_title('Distance Transform w/o Background')
ax[5].imshow(watershed_images[index, frame, ..., 0], cmap='jet')
ax[5].set_title('Watershed Segmentation')
fig.tight_layout()
plt.show()
# +
# Can also export as a video
# But this does not render well on GitHub
from IPython.display import HTML
from deepcell.utils.plot_utils import get_js_video
HTML(get_js_video(watershed_images[..., [-1]], batch=index))
# -
|
scripts/watershed/Watershed Transform 3D Fully Convolutional.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Higgs inference)
# language: python
# name: python3
# ---
# # Appendix 3: reweighting existing samples
#
# <NAME>, <NAME>, <NAME>, and <NAME> 2018-2019
# What if you already have a bunch of generated events, either simulated with MadMiner with a different benchmark setup, or with stand-alone MadGraph? MadMiner now lets you add the missing event weights to these files.
# ## 0. Preparations
# +
import logging
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
# %matplotlib inline
from madminer.core import MadMiner
# -
# MadMiner uses the Python `logging` module to provide additional information and debugging output. You can choose how much of this output you want to see by switching the level in the following lines to `logging.DEBUG` or `logging.WARNING`.
# +
# MadMiner output
logging.basicConfig(
format="%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s",
datefmt="%H:%M",
level=logging.INFO,
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
# -
# ## 1. Original event sample
# We'll start with the event sample from the main tutorial, and assume we forgot to add one benchmark we *really* care about before starting the event generation. Let's try to add this benchmark (and the corresponding weights) after generating the events!
#
# We will assume that you have generated events according to the following settings, please adapt them if necessary:
mg_process_dir = "mg_processes/signal1"
run_name = "run_01"
sampling_benchmark = "sm"
# ## 2. Load setup and add new benchmark
miner = MadMiner()
miner.load("data/setup.h5")
miner.add_benchmark({"CWL2": 10.0, "CPWL2": 8.0}, "oh_shit_we_forgot_this")
miner.save("data/setup_with_extra_benchmark.h5")
# This doesn't change the morphing setup (which always uses the first benchmarks, i.e. the ones that were already defined in the MadMiner file).
# ## 3. Prepare reweighting
# Here's the crucial step. `reweight_benchmarks` is a list of all the benchmarks that the sample will be reweighted to -- this should *not* include the benchmarks for which the sample already contains weights.
miner.reweight_existing_sample(
mg_process_directory=mg_process_dir,
run_name=run_name,
param_card_template_file="Cards/param_card_template.dat",
sample_benchmark=sampling_benchmark,
reweight_benchmarks=["oh_shit_we_forgot_this"],
)
|
examples/tutorial_particle_physics/A3_reweighting_existing_samples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ptrch]
# language: python
# name: conda-env-ptrch-py
# ---
#download and unzip data
'''
!wget -P ./data http://www.statmt.org/europarl/v7/europarl.tgz
!tar -xvzf ./data/europarl.tgz -C ./data/
!pip install pandas
'''
import pandas as pd
import os
import re
import sys
# +
cols = ['lang', 'path']
df = pd.DataFrame(columns=cols)
languages = os.popen('ls ./data/txt/').read().split('\n')[:-1]
for lang in languages:
print('adding language {}...'.format(lang))
lang_frame = pd.DataFrame(columns=cols)
txts = os.popen('ls ./data/txt/{}/'.format(lang)).read().split('\n')[:-1]
paths = ['./data/txt/{}/{}'.format(lang, txt) for txt in txts]
lang_frame['path'] = paths
lang_frame['lang'] = lang
lang_frame.index = range(len(df), len(df)+len(lang_frame))
df = pd.concat([df, lang_frame], axis=0)
# +
def update_progress(progress):
"""Displays or updates a console progress bar
Accepts a float between 0 and 1. Any int will be converted to a float.
A value under 0 represents a 'halt'.
A value at 1 or bigger represents 100%
"""
barLength = 25 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rProgress: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), round(progress*100, 3), status)
sys.stdout.write(text)
sys.stdout.flush()
def get_text(idx, df=df):
"""Takes idx and df and returns text."""
path = df.loc[idx].path
with open(path, 'r') as f:
result = f.read()
return result
def remove_tags(input_string):
result = input_string
tag = re.compile(r'<[^<]*>')
while tag.search(result):
match = tag.search(result)
strt = match.span()[0]
stp = match.span()[1]
result = result[:strt] + result[stp:]
return result
# +
#withold some fraction from every class for validation.
test_prop = .05
test = pd.DataFrame(columns=cols)
for lang in df.lang.unique():
samp = df[df.lang == lang].sample(frac=test_prop)
test = pd.concat([test, samp])
train = df[~df.index.isin(test.index)]
print('documents in training set: ', len(train))
print('documents in validation set: ', len(test))
def concat_to_file(idx, df, path):
txt = get_text(idx, df=df)
txt = remove_tags(txt)
with open(path, 'a+') as f:
f.write(txt)
train_path = './data/train.txt'
val_path = './data/val.txt'
os.system('rm {} {}'.format(train_path, val_path))
cnt = 0
print('creating train file...')
shuffle = train.sample(frac=1)
for i, row in shuffle.iterrows():
try:
concat_to_file(i, train, train_path)
except UnicodeDecodeError:
pass
cnt += 1
prog = cnt/len(train)
update_progress(prog)
print('train file saved at: ', train_path)
cnt = 0
print('creating validation file...')
shuffle = test.sample(frac=1)
for i, row in shuffle.iterrows():
try:
concat_to_file(i, test, val_path)
except UnicodeDecodeError:
pass
cnt += 1
prog = cnt/len(test)
update_progress(prog)
print('validation file saved at: ', val_path)
|
prepare_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2.1 Imports
import numpy as np
import pandas as pd
import pymongo
from pymongo import MongoClient
# +
# connection string in uri format
# # !pip install pymongo[srv]
# since the password is included, the actual string is hidden
## connection_string =
## "mongodb+srv://ORGANIZATION:PASSWORD@cluster-0.lmfnt.mongodb.net/DATABASE_NAME?retryWrites=true&w=majority"
# -
# connect to MongoDB Atlas (MongoDB cloud server)
client = MongoClient(connection_string)
db = client.test
# access to database
database = client['airbnb']
# access to collection(table)
collection = database['hongkong']
# # 2.2 Queries about '<NAME>'
# +
# total number of listings
x = collection.count_documents({'neighbourhood_cleansed':'Yau Tsim Mong'})
print('total number of listings:', x)
# +
# top 5 listings by review no
x = collection.find({'neighbourhood_cleansed':'<NAME>'},
{'_id':0,
'name':1,
'number_of_reviews':1}).sort('number_of_reviews',-1).limit(5)
for cursor in x:
print('listing name:', cursor['name'])
print('number of reviews:', cursor['number_of_reviews'])
print('\n')
# +
# next top 5 (5-10th) listings by review no
x = collection.find({'neighbourhood_cleansed':'<NAME>'},
{'_id':0,
'name':1,
'number_of_reviews':1}).sort('number_of_reviews',-1).skip(5).limit(5)
for cursor in x:
print('listing name:', cursor['name'])
print('number of reviews:', cursor['number_of_reviews'])
print('\n')
# +
# top 5 listings by review rating with at least 50 reviews
x = collection.find({'neighbourhood_cleansed':'<NAME>',
'number_of_reviews':{'$gt':50}},
{'_id':0,
'name':1,
'number_of_reviews':1,
'review_scores_rating':1}).sort('review_scores_rating',-1).limit(5)
for cursor in x:
print('listing name:', cursor['name'])
print('review_scores_rating:', cursor['review_scores_rating'])
print('number of reviews:', cursor['number_of_reviews'])
print('\n')
# +
# next top 5 (5-10)listings by review rating with at least 50 reviews
x = collection.find({'neighbourhood_cleansed':'<NAME>',
'number_of_reviews':{'$gt':50}},
{'_id':0,
'name':1,
'number_of_reviews':1,
'review_scores_rating':1}).sort('review_scores_rating',-1).skip(5).limit(5)
for cursor in x:
print('listing name:', cursor['name'])
print('review_scores_rating:', cursor['review_scores_rating'])
print('number of reviews:', cursor['number_of_reviews'])
print('\n')
# +
# all listings with review rating score below 50
x = collection.find({'neighbourhood_cleansed':'<NAME>',
'review_scores_rating':{'$lt':30}},
{'_id':0,
'name':1,
'review_scores_rating':1})
for cursor in x:
try:
print('listing name:', cursor['name'])
except:
pass
print('review_scores_rating:', cursor['review_scores_rating'])
print('\n')
# +
# observations show that the lowest review rating score is 20
# 20 may be the minimum possible score
# +
# averaage price
x = collection.find({'neighbourhood_cleansed':'<NAME>'},
{'_id':0,
'name':1,
'price':1})
total = 0
count = 0
for cursor in x:
try:
count += 1
total += float(cursor['price'].replace('$',''))
except:
pass
average_price = total / count
print('average price:', round(average_price,2))
# +
# listings with 'bedrooms': 2 or 'beds': 3
x = collection.count_documents({'neighbourhood_cleansed':'<NAME>',
'$or':
[{'bedrooms': 2,
'beds': 3}]})
print('number of listings that have 2 bedrooms or 3 beds:', x)
# -
# +
# END
|
2_yau_tsim_mong.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import tarfile
from six.moves import urllib
import pandas as pd
DOWNLOAD_ROOT = "https://rwa.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH=os.path.join("datasets","housing")
def load_housing_data(housing_path=HOUSING_PATH):
csv_path=os.path.join(housing_path,"housing.csv")
return pd.read_csv(csv_path)
# -
housingdata = load_housing_data()
## housingdata.head()
### housingdata.info()
## housingdata.describe()
## housingdata["ocean_proximity"].value_counts()
# %matplotlib inline
import matplotlib.pyplot as plt
housingdata.hist(bins=50,figsize=(20,15))
plt.show()
# +
### creating test dat set
import numpy as np
def split_train_test(data,test_ratio=0.2):
shuffled_indices=np.random.permutation(len(data))
test_set_size = int(len(data) *test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices],data.iloc[test_indices]
train_set,test_set=split_train_test(housingdata)
print(len(train_set),"train+",len(test_set),"test")
# -
### avoiding using data from training set into test set
import hashlib
def test_set_check(identifier,test_ratio,hash):
return hash(np.int64(identifier)).digest()[-1]<256 * test_ratio
def split_train_test_by_id(data,test_ratio,id_column,hash=hashlib.md5):
ids=data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_test_ratio,hash))
return data.loc[_in_test_set],data.loc[in_test_set]
from sklearn.model_selection import train_test_split
train_set,test_set = train_test_split(housingdata,test_size=0.2,random_state=42)
print(len(train_set),"train+",len(test_set),"test")
### to make sure import attributes data is representative of the vrarios categories of incomes in the whole dataset
housingdata["income_cat"]=np.ceil(housingdata["median_income"]/1.5)
housingdata["income_cat"].where(housingdata["income_cat"]<5,5.0,inplace=True) ### pandas.dataFrame.where
housingdata["income_cat"].hist(bins=20,figsize=(10,8))
plt.show()
# +
from sklearn.model_selection import StratifiedShuffleSplit
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index,test_index in split.split(housingdata,housingdata["income_cat"]):
print("train:",train_index,"test:",test_index)
strat_train_set=housingdata.loc[train_index]
strat_test_set=housingdata.loc[test_index]
test_set_var= strat_test_set["income_cat"].value_counts()/len(strat_test_set)
overall_set_var= housingdata["income_cat"].value_counts()/len(housingdata)
print(overall_set_var,test_set_var,(test_set_var-overall_set_var)/overall_set_var * 100)
# -
### remove the income_cat attribute so the data is back to it's original state
for set_ in (strat_train_set,strat_test_set):
set_.drop("income_cat",axis=1,inplace=True) ## pandas.datafram.drop
housing=strat_train_set.copy()
housing.plot(kind="scatter",x="longitude",y="latitude",alpha=0.4,
s=housing["population"]/100,label="population",
figsize=(10,7),c="median_house_value",cmap=plt.get_cmap("jet"),
colorbar=True,)
plt.legend()
## compute the standard correlation coefficient ( pearson's r) between every pair of attributes
## ALERT: this is only works with linear correlation
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# +
## another way to check for correlation
from pandas.plotting import scatter_matrix
attributes=["median_house_value","median_income","total_rooms","housing_median_age"]
scatter_matrix(housing[attributes],figsize=(12,8)) ## pandas function
# -
housing.plot(kind="scatter",x="median_income",y="median_house_value",alpha=0.1)
## found some data quirks around 50000 and 45000, 350000 we may need to clear up
### experimenting twith attribute combinations
housing["rooms_per_household"]=housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"]=housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"]=housing["population"]/housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
### prepare the data for ML algorithms
# step 1: clearning training set
housing=strat_train_set.drop("median_house_value",axis=1) ## drop is just copy the data from set to housing
housing_labels=strat_train_set["median_house_value"].copy()
## three options
housing.dropna(subset=["total_bedrooms"]) #option 1
housing.drop("total_bedrooms",axis=1) #option 2
median=housing["total_bedrooms"].median() ## option 3
housing["total_bedrooms"].fillna(median,inplace=True)
## Scikit_Learn imputer instance provides a lots functions
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
housing_num=housing.drop("ocean_proximity",axis=1)
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
## use this "trained" imputer to transfor the training set
X=imputer.transform(housing_num)
## put it back into a Pandas DataFrame
housing_tr=pd.DataFrame(X,columns=housing_num.columns)
print(housing_tr)
# +
### https://arxiv.org/pdf/1309.0238v1.pdf
###
# -
## handling Text and Categorical Attributes
## covert these categories from text to numbers
housing_cat=housing["ocean_proximity"]
housing_cat_encoded,housing_categories = housing_cat.factorize()
housing_cat_encoded[:10]
###housing_categories
## one issue with this representation is that ML algorithms will assume that two nearby values
## are more similar than two distant values. To fix this issue, a common solution is to create one binary
## attribute per category. It's called one-hot encoding
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1)) ## SciPy sparse matrix, numpy reshape
housing_cat_1hot.toarray()
# +
## Custom Transformers
## all you need is to create a class and implement three methods: fit()(returning self)
## transform() and fit_transform(). You can get the last on for free by simply adding TransformerMixin as
## a base class and add BaseEstimator as a base class to get two extra methods( get_params() and set_params())
from sklearn.base import BaseEstimator,TransformerMixin
rooms_ix,bedrooms_ix,population_ix,household_ix = 3,4,5,6
class CombinedAttributesAdder(BaseEstimator,TransformerMixin):
def __init__(self,add_bedrooms_per_room=True):
self.add_bedrooms_per_room=add_bedrooms_per_room
def fit(self,X,y=None):
return self
def transform(self,X,y=None):
rooms_per_household = X[:,rooms_ix]/X[:,household_ix]
population_per_household=X[:,population_ix]/X[:,household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:,bedrooms_ix]/X[:,rooms_ix]
###numpy.c_ = <numpy.lib.index_tricks.CClass object>
### Translates slice objects to concatenation along the second axis.
return np.c_[X,rooms_per_household,population_per_household,bedrooms_per_room]
else:
return np.c_[X,rooms_per_household,population_per_household]
attr_adder=CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs=attr_adder.transform(housing.values)
# -
### feature scaling, two common ways: min-max scaling and standarization
# min-max sacling(normalization) values are shifed and rescaled to end up raning from 0 to 1
# transformer : MinMaxScaler for this , feature_range for ranger change if you don't want 0-1
# standardization is quite different: first it subtract the mean value and then it divides by
# the variance so that the resulting distribution has unit variance.It may not oggd for neural networks
# abut it's much less affected by outliers.
## StandardScaler
## WARNING: th fit the scalers to the traning data only!!! and then use them to transform
## the training set and the test set
# +
## Transformation Pipelines
# the sequence of steps. All but the last estimator must be transformers.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('imputer',Imputer(strategy="median")),
('attribs_adder',CombinedAttributesAdder()),
('std_scaler',StandardScaler()),
])
housing_num_tr=num_pipeline.fit_transform(housing_num)
# +
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.preprocessing import LabelEncoder
from scipy import sparse
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features as a numeric array.
The input to this transformer should be a matrix of integers or strings,
denoting the values taken on by categorical (discrete) features.
The features can be encoded using a one-hot aka one-of-K scheme
(``encoding='onehot'``, the default) or converted to ordinal integers
(``encoding='ordinal'``).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
encoding : str, 'onehot', 'onehot-dense' or 'ordinal'
The type of encoding to use (default is 'onehot'):
- 'onehot': encode the features using a one-hot aka one-of-K scheme
(or also called 'dummy' encoding). This creates a binary column for
each category and returns a sparse matrix.
- 'onehot-dense': the same as 'onehot' but returns a dense array
instead of a sparse matrix.
- 'ordinal': encode the features as ordinal integers. This results in
a single column of integers (0 to n_categories - 1) per feature.
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories are sorted before encoding the data
(used categories can be found in the ``categories_`` attribute).
dtype : number type, default np.float64
Desired dtype of output.
handle_unknown : 'error' (default) or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform (default is to raise). When this is parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros.
Ignoring unknown categories is not supported for
``encoding='ordinal'``.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting. When
categories were specified manually, this holds the sorted categories
(in order corresponding with output of `transform`).
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import CategoricalEncoder
>>> enc = CategoricalEncoder(handle_unknown='ignore')
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
... # doctest: +ELLIPSIS
CategoricalEncoder(categories='auto', dtype=<... 'numpy.float64'>,
encoding='onehot', handle_unknown='ignore')
>>> enc.transform([[0, 1, 1], [1, 0, 4]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[ 0., 1., 1., 0., 0., 0., 0., 0., 0.]])
See also
--------
sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of
integer ordinal features. The ``OneHotEncoder assumes`` that input
features take on values in the range ``[0, max(feature)]`` instead of
using the unique values.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,
handle_unknown='error'):
self.encoding = encoding
self.categories = categories
self.dtype = dtype
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit the CategoricalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
The data to determine the categories of each feature.
Returns
-------
self
"""
if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:
template = ("encoding should be either 'onehot', 'onehot-dense' "
"or 'ordinal', got %s")
raise ValueError(template % self.handle_unknown)
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':
raise ValueError("handle_unknown='ignore' is not supported for"
" encoding='ordinal'")
X = check_array(X, dtype=np.object, accept_sparse='csc', copy=True)
n_samples, n_features = X.shape
self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]
for i in range(n_features):
le = self._label_encoders_[i]
Xi = X[:, i]
if self.categories == 'auto':
le.fit(Xi)
else:
valid_mask = np.in1d(Xi, self.categories[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(Xi[~valid_mask])
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
le.classes_ = np.array(np.sort(self.categories[i]))
self.categories_ = [le.classes_ for le in self._label_encoders_]
return self
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix or a 2-d array
Transformed input.
"""
X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)
n_samples, n_features = X.shape
X_int = np.zeros_like(X, dtype=np.int)
X_mask = np.ones_like(X, dtype=np.bool)
for i in range(n_features):
valid_mask = np.in1d(X[:, i], self.categories_[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(X[~valid_mask, i])
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
X[:, i][~valid_mask] = self.categories_[i][0]
X_int[:, i] = self._label_encoders_[i].transform(X[:, i])
if self.encoding == 'ordinal':
return X_int.astype(self.dtype, copy=False)
mask = X_mask.ravel()
n_values = [cats.shape[0] for cats in self.categories_]
n_values = np.array([0] + n_values)
indices = np.cumsum(n_values)
column_indices = (X_int + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(n_samples * n_features)[mask]
out = sparse.csc_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.encoding == 'onehot-dense':
return out.toarray()
else:
return out
# +
## feed a Pandas DataFrame containing non-numerical columns directly into pipeline,
# instead of having to first manully extract the numerical columns into a NumPy array
from sklearn.base import BaseEstimator,TransformerMixin
from sklearn_features.transformers import DataFrameSelector
class DataFrameSelector(BaseEstimator,TransformerMixin):
def __init__(self,attribute_names):
self.attribute_names=attribute_names
def fit(self,X,y=None):
return self
def transform(self,X):
return X[self.attribute_names].values
num_attribs = list(housing_num)
cat_attribs=["ocean_proximity"]
num_pipeline = Pipeline([
('selector',DataFrameSelector(num_attribs)),
('imputer',Imputer(strategy="median")),
('attribs_adder',CombinedAttributesAdder()),
('std_scaler',StandardScaler()),
])
cat_pipeline = Pipeline([
("selector",DataFrameSelector(cat_attribs)),
("cat_encoder",CategoricalEncoder(encoding="onehot-dense"))
])
# join two pipelines into a single pipleline using Scikit-Learn's FeatureUnion class
from sklearn.pipeline import FeatureUnion
full_pipeline = FeatureUnion(transformer_list=[
("num_pipleline",num_pipeline),
("cat_pipeline",cat_pipeline)
])
housing_prepared=full_pipeline.fit_transform(housing)
housing_prepared
# -
# +
## select and train a Model
## train a Linear Regression model
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared,housing_labels)
some_data=housing.iloc[:5]
some_labels=housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("predictions:",lin_reg.predict(some_data_prepared))
# measure the errors using Scikit-Learn's mean_squared_error function
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse= mean_squared_error(housing_labels,housing_predictions)
line_rmse=np.sqrt(lin_mse)
line_rmse
# +
## Three options to solve this problem. More powerful models,feed with better features
# or t oreduce the constriants on the model
from sklearn.tree import DecisionTreeRegressor
tree_reg=DecisionTreeRegressor()
tree_reg.fit(housing_prepared,housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse= mean_squared_error(housing_labels,housing_predictions)
tree_rmse=np.sqrt(tree_mse)
tree_rmse
# +
# Better Evaluation using cross-validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg,housing_prepared,housing_labels,
scoring="neg_mean_squared_error",cv=10)
## it randomly splits the training set into 10 distinct subsets called folds
# then it trains and evaluates the DT model 10 times ,picking a different fold for
tree_rmse_scores=np.sqrt(-scores)
# -
## disply score result
def display_scores(scores):
print("Scores:",scores)
print("Mean:",scores.mean())
print("Standard deviation:",scores.std())
display_scores(tree_rmse_scores)
linear_scores = cross_val_score(lin_reg,housing_prepared,housing_labels,
scoring="neg_mean_squared_error",cv=10)
lin_rmse_scores=np.sqrt(-linear_scores)
display_scores(lin_rmse_scores)
# +
## randomeForestRegressor , building a model on top of many other models is called Ensemble learning
from sklearn.ensemble import RandomForestRegressor
import os.path
from sklearn.externals import joblib
# make sure we save the model and then load it later on
modelfilepath="housing_RandomForest.pkl"
if os.path.exists(""):
forest_reg = joblib.load(modelfilepath)
else:
forest_reg=RandomForestRegressor()
forest_reg.fit(housing_prepared,housing_labels)
joblib.dump(forest_reg,modelfilepath)
# -
from sklearn.metrics import mean_squared_error
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse= mean_squared_error(housing_labels,housing_predictions)
forest_rmse=np.sqrt(forest_mse)
forest_rmse
print(forest_rmse)
forest_scores = cross_val_score(forest_reg,housing_prepared,housing_labels,
scoring="neg_mean_squared_error",cv=10)
forest_rmse_scores=np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
# because the score on the training set is still much lower than on the validation sets,measning that the model is still overfitting the training set before spedning too much time tweaking the hyperparameters, we should try other models
# +
# fine-tune your model
# Grid serach. GridSearchchCV. All you need to do is tell it which hyperparameters you
# want it to experiment with, and what values to try out, and it will evaluate all the possible
# combinations of hyperparameter values, using cross-validation.
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import randint as sp_randint
param_grid=[
{'n_estimators':[3,10,30],'max_features':[2,4,6,8]},
{'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]},
]
param_dist = {"n_estimators": sp_randint(1, 200),
"max_features": sp_randint(1, 8),
}
forest_reg = RandomForestRegressor(random_state=42)
# grid_search=GridSearchCV(forest_reg,param_grid,cv=5,scoring="neg_mean_squared_error")
n_iter_search = 10
grid_search=RandomizedSearchCV(forest_reg, param_distributions=param_dist,
n_iter=n_iter_search, cv=5,scoring="neg_mean_squared_error", random_state=42)
grid_search.fit(housing_prepared,housing_labels)
# -
grid_search.best_params_
cvres=grid_search.cv_results_
for mean_score,params in zip(cvres["mean_test_score"],cvres["params"]):
print(np.sqrt(-mean_score),params)
# Randomized Search. When the hyperarameter search space is loarge
# RandomizedSearchCV
# Ensemble Methods
# Analyze the best models and their Errors
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs=["rooms_per_hhold","pop_per_hhold","bedrooms_per_room"]
cat_encoder=cat_pipeline.named_steps["cat_encoder"]
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances,attributes),reverse=True)
# with this information, you may want to try dropping some of the less useful features
# you should also look at the specific erros that your system makes
# +
# Evaluate your system on the test set
final_model = grid_search.best_estimator_
X_test =strat_test_set.drop("median_house_value",axis=1)
y_test =strat_test_set["median_house_value"].copy()
X_test_prepared=full_pipeline.transform(X_test)
final_predications = final_model.predict(X_test_prepared)
final_mse=mean_squared_error(y_test,final_predications)
final_rmse=np.sqrt(final_mse)
final_rmse
# +
# project prelunch phase:
# highlighting what you have learned, what worked and what did not
# what assumptions were made and what your systems's limitations are
# document everythong and create nice presentations with clear visualizations
# and easy-to-remember statements
# +
# monitoring sudden breakage, but also performance degradation
|
SKlearn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='https://www.udemy.com/user/joseportilla/'><img src='../Pierian_Data_Logo.png'/></a>
# ___
# <center><em>Content Copyright by <NAME></em></center>
# # Comparison Operators
#
# In this lecture we will be learning about Comparison Operators in Python. These operators will allow us to compare variables and output a Boolean value (True or False).
#
# If you have any sort of background in Math, these operators should be very straight forward.
#
# First we'll present a table of the comparison operators and then work through some examples:
#
# <h2> Table of Comparison Operators </h2><p> In the table below, a=3 and b=4.</p>
#
# <table class="table table-bordered">
# <tr>
# <th style="width:10%">Operator</th><th style="width:45%">Description</th><th>Example</th>
# </tr>
# <tr>
# <td>==</td>
# <td>If the values of two operands are equal, then the condition becomes true.</td>
# <td> (a == b) is not true.</td>
# </tr>
# <tr>
# <td>!=</td>
# <td>If values of two operands are not equal, then condition becomes true.</td>
# <td>(a != b) is true</td>
# </tr>
# <tr>
# <td>></td>
# <td>If the value of left operand is greater than the value of right operand, then condition becomes true.</td>
# <td> (a > b) is not true.</td>
# </tr>
# <tr>
# <td><</td>
# <td>If the value of left operand is less than the value of right operand, then condition becomes true.</td>
# <td> (a < b) is true.</td>
# </tr>
# <tr>
# <td>>=</td>
# <td>If the value of left operand is greater than or equal to the value of right operand, then condition becomes true.</td>
# <td> (a >= b) is not true. </td>
# </tr>
# <tr>
# <td><=</td>
# <td>If the value of left operand is less than or equal to the value of right operand, then condition becomes true.</td>
# <td> (a <= b) is true. </td>
# </tr>
# </table>
# Let's now work through quick examples of each of these.
#
# #### Equal
2 == 2
1 == 0
# Note that <code>==</code> is a <em>comparison</em> operator, while <code>=</code> is an <em>assignment</em> operator.
# #### Not Equal
2 != 1
2 != 2
# #### Greater Than
2 > 1
2 > 4
# #### Less Than
2 < 4
2 < 1
# #### Greater Than or Equal to
2 >= 2
2 >= 1
# #### Less than or Equal to
2 <= 2
2 <= 4
# **Great! Go over each comparison operator to make sure you understand what each one is saying. But hopefully this was straightforward for you.**
#
# Next we will cover chained comparison operators
|
Complete Python 3/01-Python Comparison Operators/01-Comparison Operators.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import tensorflow as tf
import numpy as np
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
def prepare(image):
IMG_SIZE = 64
image = cv2.imread(image)
new_image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
new_image = np.expand_dims(new_image, axis=0)
return np.array(new_image)/255
# load model
model = tf.keras.models.load_model('CatsVsDogs_CV_model')
# +
# predict
# Enter a name for a picture of a dog or cat
picture_name = 'picture3.jpg'
CATEGORIES = ["Dog","Cat"]
prediction=model.predict([prepare(picture_name)])
print(prediction)
print(CATEGORIES[int(np.round(prediction[0][0], decimals=0))])
|
Cats_vs_Dogs/Predict_Dog_vs_Cat.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bootcamp
# name: bootcamp_venv
# ---
# # NumPy
#
# **NumPy** é uma biblioteca de algebra linear.
# Ela importante para _Data Science_ em **Python** pois as próximas bibliotecas a utilizam como um de seus blocos principais.
#
# Outro motivo é porque também é muito rápida em execução e permite a criação de _arrays_ homogêneos.
# Comumente importada com essa abreviação
import numpy as np
# ## NumPy _Arrays_
#
# NumPy _arrays_ serão altamente vistos e trabalhados
# +
# np.array?
# -
py_list = [1, 2, 3]
py_list, type(py_list)
np_arr = np.array(py_list)
np_arr, type(np_arr)
# ### outros métodos
#
# NumPy possui outros métodos capazes de gerar arrays.
# Semelhante à função range
np.arange(10)
# Gera um array com as dimensões passadas como argumento
print(np.zeros((2, 3)))
print('\n')
print(np.ones((3, 2)))
# Retorna um array de 'num' números igualmente espaçados entre 'start' e 'stop'
np.linspace(0, 9, 10)
# Retorna uma matriz com 1 na diagonal e 0 nas demais posições
np.eye(5)
|
python/02-data-analysis/1-numpy/008-arrays.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparaison de lexiques créoles
#
# Vous disposez de trois lexiques de langues créoles (haïtien, mauricien et guadeloupéen) au format TSV qui listent un ensemble de verbes avec leurs transcriptions phonologiques et leur correspondance en français.
#
# Les trois lexiques partagent la même structure en trois colonnes :
# - le verbe en créole ;
# - la transcription phonologique ;
# - la correspondance en français.
#
# ## Lister les transcriptions
#
# Vous direz tout d’abord quels sont les verbes qui n’appartiennent qu’à un seul lexique puis quels sont ceux en commun aux trois. Pour ceux-là, vous finirez en comparant leurs prononciations. Au final, on voudrait pour chaque transcription française la liste des prononciations avec, dans l’ordre : le guadeloupéen, le haïtien puis le mauricien.
#
# ```txt
# adorer: adoʀe, adoɣe, adore
# ```
#
# ## Identifier les phonèmes divergents
#
# Dans un second temps, vous identifierez les phonèmes qui divergent entre les différentes transcriptions phonologiques et trouverez un moyen de les mettre en valeur dans la sortie de votre programme. Par exemple :
#
# ```txt
# adorer: ado[ʀ]e, ado[ɣ]e, ado[r]e
# ```
#
# ## Programme
#
# Vous partirez du code ci-dessous qui génère un dictionnaire de dictionnaires. Les dictionnaires imbriqués ont pour clé un étymon français auquel est attaché un tuple comprenant la forme en créole et sa transcription phonologique :
#
# ```python
# {'V-guadeloupean.tsv': {
# 'adorer': ('adoré', 'adoʀe'),
# …
# }
# }
# ```
# +
import csv
lexicons = {
'V-guadeloupean.tsv': dict(),
'V-haitian.tsv': dict(),
'V-mauritian.tsv': dict(),
}
for lexicon in lexicons.keys():
with open(f"../data/{lexicon}") as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for form, phon, fr in reader:
lexicons[lexicon].update({fr: (form, phon)})
# -
# ### Ensembles de verbes
#
# **Première étape :** constituez un ensemble de verbes pour chaque langue créole.
# Your code here
v_guadeloupean = set(lexicons['V-guadeloupean.tsv'].keys())
v_haitian = set(lexicons['V-haitian.tsv'].keys())
v_mauritian = set(lexicons['V-mauritian.tsv'].keys())
# ### Verbes uniques
#
# **Deuxième étape :** pour chaque lexique, déterminez quels sont les verbes qui n’apparaissent dans aucun autre.
# Your code here
v_p_guadeloupean = set.difference(v_guadeloupean, v_haitian, v_mauritian)
v_p_haitian = set.difference(v_haitian, v_guadeloupean, v_mauritian)
v_p_mauritian = set.difference(v_mauritian, v_haitian, v_guadeloupean)
# **Troisième étape :** déterminez à présent quels sont les verbes communs aux trois langues.
# Your code here
v_commons = set.intersection(v_guadeloupean, v_haitian, v_mauritian)
# **Quatrième étape :** à partir de la liste des verbes en commun aux trois langues, construisez un dictionnaire qui liste pour chacun les transcriptions phonologiques dans toutes les langues créoles.
# +
# Your code here
transcriptions = dict()
for verb in v_commons:
transcriptions.update({
verb: (
lexicons['V-guadeloupean.tsv'][verb][1],
lexicons['V-haitian.tsv'][verb][1],
lexicons['V-mauritian.tsv'][verb][1]
)
})
# -
# ### Identifier les phonèmes divergents
#
# Il vous reste, pour chaque transcription, à identifier les phonèmes qui divergent des autres !
# Your code here
for verb, (guadeloupean, haitian, mauritian) in transcriptions.items():
d = {
'guadeloupean': set(guadeloupean),
'haitian': set(haitian),
'mauritian': set(mauritian)
}
phonems = d['guadeloupean'] ^ d['haitian'] | d['haitian'] ^ d['mauritian']
for phonem in phonems:
guadeloupean = guadeloupean.replace(phonem, f"[{phonem}]")
haitian = haitian.replace(phonem, f"[{phonem}]")
mauritian = mauritian.replace(phonem, f"[{phonem}]")
print(f"{verb} : {guadeloupean}, {haitian}, {mauritian}")
|
2.data-structures/answers/8.creole-lexicons.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os,glob,re
from Bio import SeqIO
import baltic as bt
base_path='/Users/evogytis/Documents/manuscripts/SARS-CoV-2_kitenis/data/'
used_seqs=set()
accession=re.compile('\|(EPI_ISL_[0-9]+)\|')
for tpath in glob.glob(os.path.join(base_path,'trees/*mcc.tre')):
ll=bt.loadNexus(tpath)
ll.treeStats()
for k in ll.getExternal():
c=accession.search(k.name)
if c:
used_seqs.add(c.group(1))
# used_seqs=used_seqs.union(set([k.name.split('|')[1] if k.name.split('|')[1].startswith('EPI') else k.name.split('|')[2] for k in ll.getExternal()]))
for tpath in glob.glob(os.path.join(base_path,'trees/*.newick')):
ll=bt.loadNewick(tpath)
ll.treeStats()
for k in ll.getExternal():
c=accession.search(k.name)
if c:
used_seqs.add(c.group(1))
# print([k.name for k in ll.getExternal()])
# used_seqs=used_seqs.union(set([k.name.split('|')[1] if k.name.split('|')[1].startswith('EPI') else k.name.split('|')[2] for k in ll.getExternal() if 'NC_' not in k.name]))
# ll=bt.loadNexus(os.path.join(base_path,'trees/core_continent.mcc.tre'))
# ll.treeStats()
# used_seqs=used_seqs.union(set([k.name.split('|')[1] for k in ll.getExternal()]))
# for seq in SeqIO.parse(os.path.join(base_path,'alignments/B.1.620_repr_Cameroon_wRef.fasta'),'fasta'):
# fields=seq.id.split('|')
# if len(fields)>1:
# used_seqs.add(fields[-2])
# else:
# print('not used',seq.id)
for seq in SeqIO.parse(os.path.join(base_path,'alignments/core.fasta'),'fasta'):
fields=seq.id.split('|')
if len(fields)>1:
used_seqs.add(fields[-2])
else:
print('not used',seq.id)
# -
print(len(used_seqs))
for seq in sorted(used_seqs):
print(seq)
|
scripts/acknowledgment-table.ipynb
|