code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# # Single node modeling example
# We set up a 2D isotropic acoustic modeling operator for a single shot with the source located in the center of the model, and receivers located in a fixed array across the top of the model.
using PyPlot, WaveFD, JetPackWaveFD, Random
# ## Define the model
# Even though this is a 2D example, the model passed to the nonlinear operator below is 3D, with size [nz,nx,1] for the velocity only case. In the case of variable density acoustics, the model would be of size [nz,nx,2].
v = read!("../20_marmousi_model_setup/marmousi_vp_20m_176x851.bin", Array{Float32}(undef,176,851));
dz,dx = 20.0,20.0
nz,nx = size(v)
m = reshape(v, (nz,nx,1))
@show dz,dx
@show size(v)
@show size(m);
# ## Note on scratch space for temporary files
# When dealing with serialized nonlinear wavefields as in this example, we need to specify the location where scratch files will be written.
#
# You may need to change this to point to a temporary directory available on your system.
scratch = "/mnt/scratch"
@assert isdir(scratch)
F = JopNlProp2DAcoIsoDenQ_DEO2_FDTD(;
b = ones(Float32,size(v)),
nthreads = Sys.CPU_THREADS,
isinterior = true,
ntrec = 1601,
dtrec = 0.004,
dtmod = 0.002,
dz = dz,
dx = dx,
wavelet = WaveletCausalRicker(f=5.0),
sx = dx*(nx/2),
sz = dz,
rx = dx*[0:0.5:nx-1;],
rz = 2*dz*ones(length(0:0.5:nx-1)),
srcfieldfile = joinpath(scratch, "field-$(randstring()).bin"),
reportinterval=1000)
d = F*m;
# close the modeling operator, to remove seriliazation files
close(F)
figure(figsize=(16,9)); clf()
imshow(d,aspect="auto", cmap="gray")
dmax = maximum(abs, d)
clim(0.025 .* [-dmax,+dmax])
colorbar();
| 30_forward_modeling/03_SingleNode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
cities = ["San Francisco", "Omaha", "New Orleans", "Cincinnati", "Pittsburgh"]
cars_in_cities = [214.7, 564.4, 416.5, 466.7, 350.6]
x_axis = np.arange(len(cars_in_cities))
# +
# Create a bar chart based upon the above data
# +
# Create the ticks for our bar chart's x axis
# +
# Set the limits of the x axis
# +
# Set the limits of the y axis
# +
# Give the chart a title, x label, and y label
# +
# Save an image of the chart and print it to the screen
# -
| 01-Lesson-Plans/05-Matplotlib/1/Activities/08-Stu_PyBars/Unsolved/py_bars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/pipelines).**
#
# ---
#
# In this exercise, you will use **pipelines** to improve the efficiency of your machine learning code.
#
# # Setup
#
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex4 import *
print("Setup Complete")
# You will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).
#
# 
#
# Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
# +
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv('../input/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/test.csv', index_col='Id')
# Remove rows with missing target, separate target from predictors
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
# Break off validation set from training data
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y,
train_size=0.8, test_size=0.2,
random_state=0)
# "Cardinality" means the number of unique values in a column
# Select categorical columns with relatively low cardinality (convenient but arbitrary)
categorical_cols = [cname for cname in X_train_full.columns if
X_train_full[cname].nunique() < 10 and
X_train_full[cname].dtype == "object"]
# Select numerical columns
numerical_cols = [cname for cname in X_train_full.columns if
X_train_full[cname].dtype in ['int64', 'float64']]
# Keep selected columns only
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
# -
X_train.head()
# The next code cell uses code from the tutorial to preprocess the data and train a model. Run this code without changes.
# +
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy='constant')
# Preprocessing for categorical data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
clf = Pipeline(steps=[('preprocessor', preprocessor),
('model', model)
])
# Preprocessing of training data, fit model
clf.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = clf.predict(X_valid)
print('MAE:', mean_absolute_error(y_valid, preds))
# -
# The code yields a value around 17862 for the mean absolute error (MAE). In the next step, you will amend the code to do better.
#
# # Step 1: Improve the performance
#
# ### Part A
#
# Now, it's your turn! In the code cell below, define your own preprocessing steps and random forest model. Fill in values for the following variables:
# - `numerical_transformer`
# - `categorical_transformer`
# - `model`
#
# To pass this part of the exercise, you need only define valid preprocessing steps and a random forest model.
# +
# Your code here
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy = 'constant') # if strategy is “constant”, then replace missing values with fill_value
# default fill_value will be - 0 when imputing numerical data
# - “missing_value” for strings or object
# Preprocessing for categorical data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')), # transform value from string to number
('onehot', OneHotEncoder(handle_unknown='ignore')) # if wee see some value whhat we didn't see before we drop them
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Check your answer
step_1.a.check()
# +
# Lines below will give you a hint or solution code
#step_1.a.hint()
#step_1.a.solution()
# -
# ### Part B
#
# Run the code cell below without changes.
#
# To pass this step, you need to have defined a pipeline in **Part A** that achieves lower MAE than the code above. You're encouraged to take your time here and try out many different approaches, to see how low you can get the MAE! (_If your code does not pass, please amend the preprocessing steps and model in Part A._)
# +
# Bundle preprocessing and modeling code in a pipeline
my_pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('model', model)
])# contains the model and the preprocessor data
# Preprocessing of training data, fit model
my_pipeline.fit(X_train, y_train) # fit the training and the target together
# Preprocessing of validation data, get predictions
preds = my_pipeline.predict(X_valid) # get the prediction
# Evaluate the model
score = mean_absolute_error(y_valid, preds)
print('MAE:', score)
# Check your answer
step_1.b.check()
# -
# Line below will give you a hint
step_1.b.hint()
# +
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy='mean') # if strategy is “mean”, then replace missing values using the mean along each column
# Preprocessing for categorical data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
my_pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('model', model)
])
# Preprocessing of training data, fit model
my_pipeline.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = my_pipeline.predict(X_valid)
# Evaluate the model
score = mean_absolute_error(y_valid, preds)
print('MAE:', score)
# Check your answer
step_1.b.check()
# -
# # Step 2: Generate test predictions
#
# Now, you'll use your trained model to generate predictions with the test data.
# +
# Preprocessing of test data, fit model
preds_test = my_pipeline.predict(X_test) # Your code here
# Check your answer
step_2.check()
# +
# Lines below will give you a hint or solution code
#step_2.hint()
#step_2.solution()
# -
# Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition.
# Save test predictions to file
output = pd.DataFrame({'Id': X_test.index,
'SalePrice': preds_test})
output.to_csv('submission.csv', index=False)
print("done")
# # Submit your results
#
# Once you have successfully completed Step 2, you're ready to submit your results to the leaderboard! If you choose to do so, make sure that you have already joined the competition by clicking on the **Join Competition** button at [this link](https://www.kaggle.com/c/home-data-for-ml-course).
# 1. Begin by clicking on the **Save Version** button in the top right corner of the window. This will generate a pop-up window.
# 2. Ensure that the **Save and Run All** option is selected, and then click on the **Save** button.
# 3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
# 4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the **Submit** button to submit your results to the leaderboard.
#
# You have now successfully submitted to the competition!
#
# If you want to keep working to improve your performance, select the **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
#
#
# # Keep going
#
# Move on to learn about [**cross-validation**](https://www.kaggle.com/alexisbcook/cross-validation), a technique you can use to obtain more accurate estimates of model performance!
# ---
#
#
#
#
# *Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/intermediate-machine-learning/discussion) to chat with other learners.*
| exercise-pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ###### List of Modules
# +
## pandas used to transform given tsv to csv file
import pandas as pd
## CountVectorizer tokenizes the collection of text documents and build a vocabulary of known words it returns ints
## TfidVectorizer is same as CountVectorizer but it returns float values. In the below I'd compare the both values it false
## because float and int are not equal
##
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
## from sklearn.linear_model. I'd imported "PassiveAggressiveClassifier, SGDClassifier" for classification
from sklearn.linear_model import PassiveAggressiveClassifier, SGDClassifier
## from sklearn.svm. I'd imported "LinearSVC" for classication
from sklearn.svm import LinearSVC
## from sklearn.naive_bayes. I'd imported "MultinomialNB" is another kind of classifier most of the Data Scienctist use this
## one for better accuracy
from sklearn.naive_bayes import MultinomialNB
## metrics for predictions
from sklearn import metrics
## pyplot for visualizing graphs
import matplotlib.pyplot as plt
## shuffle for shuffling the data randomly
from sklearn.utils import shuffle
## classification_report which gives f1-score, precision, recall, support
from sklearn.metrics import classification_report
## seaborn is used for Data Visualizarion library
## I made confusion matrix for every classifier. So, for visualizing we need seaborn
import seaborn as sns
# -
# Given data is .tsv extension. I had converting it into .csv file for training and testing using pandas
train2_tsv = 'train2.tsv'
train2_csv = pd.read_table(train2_tsv,sep = '\t')
train2_csv.to_csv('train2.csv',index = True)
# Let's see some train data
train = pd.read_csv('train2.csv')
train.head()
# Now data has been converted csv file. But it doesn't have column name. So, I had grab the what I need for training and then change the data into DataFrame using pandas
train_dict = {'ID':train['0'],'train_statement':train['Says the Annies List political group supports third-trimester abortions on demand.'],
'Justification':train["That's a premise that he fails to back up. Annie's List makes no bones about being comfortable with candidates who oppose further restrictions on late-term abortions. Then again, this year its backing two House candidates who voted for more limits."],
'train_label':train['false']}
train_df = pd.DataFrame(train_dict)
train_df.count()
# ## We need to classify only true and false
# I had taken out the training data which is having only true and false labels only.
# Below I write the code based on that one firstly I had seperate the true and false labeled data. And than concatinate
false_set = train_df.loc[train_df['train_label'] == 'false']
true_set = train_df.loc[train_df['train_label'] == 'true']
print(false_set.count())
print(true_set.count())
# Now concatinating the true and false labeled data
## concatinating true and false labeled datasets
real_train_data = pd.concat([false_set,true_set])
## shuffling real_train_data
real_train_data = shuffle(real_train_data)
real_train_data.columns
# Similarly as train data I had converted .tsv file extension into csv file of test data
test2_tsv = 'test2.tsv'
test2_csv = pd.read_table(test2_tsv,sep = '\t')
test2_csv.to_csv('test2.csv',index = False)
# Now data has been converted csv file. But it doesn't have column name. So, I had grab the what I need for training and then change the data into DataFrame using pandas
test = pd.read_csv('test2.csv')
test_dict = {'ID':test['0'],'test_statement':test['Building a wall on the U.S.-Mexico border will take literally years.'],
'Justification':test['Meantime, engineering experts agree the wall would most likely take years to complete. Keep in mind, too, it took more than six years to build roughly 700 miles of fence and barriers along the roughly 2,000-mile U. S. -Mexico border.'],
'test_label':test['true']}
test_df = pd.DataFrame(test_dict)
# Here in the test data I have to seperate the true and false labels and finally concatinate them
test_false_set = test_df.loc[test_df['test_label'] == 'false']
test_true_set = test_df.loc[test_df['test_label'] == 'true']
print(test_false_set.count())
print(test_true_set.count())
# +
## concatinating true and false labeled datasets
real_test_data = pd.concat([test_false_set,test_true_set])
## shuffling real_train_data
real_test_data = shuffle(real_test_data)
# +
## CountVectorizer tokenizes the collection of text documents and build a vocabulary of known words it returns ints
count_vectorizer = CountVectorizer(stop_words = 'english')
## function of fit_transform is fit and transform the function for feature extraction
count_train = count_vectorizer.fit_transform(real_train_data['train_statement'])
## transforms documents to document-type matrix
count_test = count_vectorizer.transform(real_test_data['test_statement'])
# +
## TfidVectorizer is same as CountVectorizer but it returns float values. In the below I'd compare the both values it false
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
## function of fit_transform is fit and transform the function for feature extraction
tfidf_train = tfidf_vectorizer.fit_transform(train_df['train_statement'])
## transforms documents to document-type matrix
tfidf_test = tfidf_vectorizer.transform(test_df['test_statement'])
# -
# Let's see the feature names names and their matrix values
print(tfidf_vectorizer.get_feature_names()[:10])
print(tfidf_train.A[:5])
tfidf_train.shape
# Below code let you know that CountVectorizer rerurns integer and TfidfVectorizer returns the float values values for the same dataset. Finally I'd compare the two values.
count_df = pd.DataFrame(count_train.A, columns=count_vectorizer.get_feature_names())
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vectorizer.get_feature_names())
print(count_df.head())
print(tfidf_df.head())
difference = set(count_df.columns) - set(tfidf_df.columns)
print(difference)
print(count_df.equals(tfidf_df))
# ## Multinomial Naive Bayes Classifier
# +
## Instantiate a Multinomial Naive Bayes classifier: nb_classifier
nb_classifier = MultinomialNB()
## Fit the classifier to the training data
nb_classifier.fit(count_train, real_train_data['train_label'])
## Create the predicted tags: pred
mnb_pred = nb_classifier.predict(count_test)
## Create the predicted tags: pred
mnb_score = metrics.accuracy_score(real_test_data['test_label'], mnb_pred)
## Calculate the confusion matrix: mnb_cm
mnb_cm = metrics.confusion_matrix(real_test_data['test_label'], mnb_pred, labels=['true', 'false'])
print('Confusion Matrix')
print(mnb_cm)
print("Multinomial Naive Bayes accuracy: %0.3f" % mnb_score)
# +
mnb_cm = metrics.confusion_matrix(real_test_data['test_label'],mnb_pred)
plt.show()
plt.figure(figsize=(5,5))
sns.heatmap(mnb_cm, annot=True, linewidth=.5, square = True, cmap = 'Blues_r',fmt='f');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
plt.title('Confusion Matrix for MultinomialNB', size = 10);
report = classification_report(real_test_data['test_label'],mnb_pred)
print(report)
# -
# ## Passive Aggressive Classifier
# +
## Instantiating a Passive Aggressive Classifier classifier: pa_tfidf_clf
pa_tfidf_clf = PassiveAggressiveClassifier()
## Fit the classifier to the training data
pa_tfidf_clf.fit(count_train, real_train_data['train_label'])
## Create the predicted tags: pac_pred
pac_pred = pa_tfidf_clf.predict(count_test)
## Calculate the accuracy score: pac_score
pac_score = metrics.accuracy_score(real_test_data['test_label'], pac_pred)
## Calculate the confusion matrix: pac_cm
pac_cm = metrics.confusion_matrix(real_test_data['test_label'], pac_pred, labels=['true', 'false'])
print('Confusion Matrix --- PassiveAggressiveClassifier')
print(pac_cm)
print("accuracy: %0.3f" % pac_score)
# -
# Below code shows the confusion matrix in a graphical form for PassiveAggressiveClassifier and classification reports
# +
pac_cm = metrics.confusion_matrix(real_test_data['test_label'],pac_pred)
plt.show()
plt.figure(figsize=(5,5))
sns.heatmap(pac_cm, annot=True, linewidth=.5, square = True, cmap = 'Blues_r',fmt='f');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
plt.title('Confusion Matrix --- MultinomialNB', size = 10);
report = classification_report(real_test_data['test_label'],pac_pred)
print(report)
# -
# ## Support Vector Classifier
# +
## Instantiate a Support Vector classifier: svc_tfidf_clf
svc_tfidf_clf = LinearSVC()
## Fit the classifier to the training data
svc_tfidf_clf.fit(count_train, real_train_data['train_label'])
## Create the predicted tags: svc_pred
svc_pred = svc_tfidf_clf.predict(count_test)
## Calculate the accuracy score: svc_score
svc_score = metrics.accuracy_score(real_test_data['test_label'], svc_pred)
## Calculate the confusion matrix: cm
svc_cm = metrics.confusion_matrix(real_test_data['test_label'], svc_pred, labels=['true', 'false'])
print('Confusion Matrix --- LinearSVC')
print(svc_cm)
print("accuracy: %0.3f" % svc_score)
# -
# Below code shows the confusion matrix in a graphical form for SVCClassifier and classification reports
# +
svc_cm = metrics.confusion_matrix(real_test_data['test_label'],svc_pred)
plt.show()
plt.figure(figsize=(5,5))
sns.heatmap(svc_cm, annot=True, linewidth=.5, square = True, cmap = 'Blues_r',fmt='f');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
plt.title('Confusion Matrix --- MultinomialNB', size = 10);
report = classification_report(real_test_data['test_label'],svc_pred)
print(report)
# -
# ## Stochastic Gradient Descent Classifier
# +
## Instantiate a Multinomial Naive Bayes classifier: sgd_tfidf_clf
sgd_tfidf_clf = SGDClassifier()
## Fit the classifier to the training data
sgd_tfidf_clf.fit(count_train, real_train_data['train_label'])
## Create the predicted tags: sgd_pred
sgd_pred = sgd_tfidf_clf.predict(count_test)
## Calculate the accuracy score: score
sgd_score = metrics.accuracy_score(real_test_data['test_label'], sgd_pred)
## Calculate the confusion matrix: cm
sgd_cm = metrics.confusion_matrix(real_test_data['test_label'], sgd_pred, labels=['true', 'false'])
print('Confusion Matrix --- SGD Classifier')
print(sgd_cm)
print("accuracy: %0.3f" % sgd_score)
# +
sgd_cm = metrics.confusion_matrix(real_test_data['test_label'],sgd_pred)
plt.show()
plt.figure(figsize=(5,5))
sns.heatmap(sgd_cm, annot=True, linewidth=.5, square = True, cmap = 'Blues_r',fmt='f');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
plt.title('Confusion Matrix --- MultinomialNB', size = 10);
report = classification_report(real_test_data['test_label'],sgd_pred)
print(report)
# -
| Mallikarjuna/Fake News Detection/binary_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Task #1
#
# A template code for training an RBM on H$_2$ data for $r = 1.2$ is shown here. Modify this!
#
# Imports and loading in data:
# +
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
from RBM_helper import RBM
import H2_energy_calculator
import typing
import collections
# training_data = torch.from_numpy(np.loadtxt("H2_data/R_1.2_samples.txt"))
training_data = torch.from_numpy(np.loadtxt("H2_data/R_1.2_samples.txt"))
coeff = np.loadtxt("H2_data/H2_coefficients.txt")[20,:]
true_energy = H2_energy_calculator.energy_from_freq(training_data, coeff)
print("H2 energy for r = 1.2: ",true_energy)
# -
# Define the RBM:
# +
n_vis = 2
n_hin = 10
rbm = RBM(n_vis, n_hin)
# -
# Train the RBM:
# +
def train_RBM(training_data:torch.Tensor) ->float:
"""
Train Restricted Bolzman Machiene.
Args:
training_data: A tensor of binary data of radius.
Returns:
Energy value.
"""
epochs = 500 # number of training steps
num_samples = 1000 # number of samples to generate from the RBM to calculate the H2 energy
true_energy = H2_energy_calculator.energy_from_freq(training_data, coeff)
print("True energy: ",true_energy)
for e in range(1, epochs+1):
# do one epoch of training
rbm.train(training_data)
# now generate samples and calculate the energy
if e % 100 == 0:
# print("\nEpoch: ", e)
# print("Sampling the RBM...")
# For sampling the RBM, we need to do Gibbs sampling.
# Initialize the Gibbs sampling chain with init_state as defined below.
init_state = torch.zeros(num_samples, n_vis)
RBM_samples = rbm.draw_samples(15, init_state)
# print("Done sampling. Calculating energy...")
energies = H2_energy_calculator.energy(RBM_samples, coeff, rbm.wavefunction)
print("Energy from RBM samples: ", energies.item())
return energies.item()
# +
#
# load R_<r_value_samples.txt from H2_data folder
#
# evaluate true_energy on each training data, use it to plot E_bond(r)
Ebond = {}
directory = 'H2_data'
for entry in os.scandir(directory):
if (entry.path.endswith("samples.txt")):
training_data = torch.from_numpy(np.loadtxt(entry))
radius = entry.name.split("_")[1]
Ebond[radius] = train_RBM(training_data)
# print(Ebond)
# +
Ebond_sorted = collections.OrderedDict(sorted(Ebond.items()))
x_values = [] # radii
y_values = [] # Energy
for key, val in Ebond_sorted.items():
x_values.append(float(key))
y_values.append(val)
# plot Ebont(r)
plt.plot(x_values, y_values)
plt.xlabel('r')
plt.ylabel('Ebond')
plt.title('Ebond(r)')
plt.show()
# -
| Project_1_RBM_and_Tomography/.ipynb_checkpoints/Task1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Relation extraction using distant supervision: Task definition
__author__ = "<NAME>"
__version__ = "CS224U, Stanford, Spring 2019"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [The task of relation extraction](#The-task-of-relation-extraction)
# 1. [Hand-built patterns](#Hand-built-patterns)
# 1. [Supervised learning](#Supervised-learning)
# 1. [Distant supervision](#Distant-supervision)
# 1. [Set-up](#Set-up)
# 1. [The corpus](#The-corpus)
# 1. [The knowledge base](#The-knowledge-base)
# 1. [Problem formulation](#Problem-formulation)
# 1. [Joining the corpus and the KB](#Joining-the-corpus-and-the-KB)
# 1. [Negative instances](#Negative-instances)
# 1. [Multi-label classification](#Multi-label-classification)
# 1. [Building datasets](#Building-datasets)
# 1. [Evaluation](#Evaluation)
# 1. [Splitting the data](#Splitting-the-data)
# 1. [Choosing evaluation metrics](#Choosing-evaluation-metrics)
# 1. [Running evaluations](#Running-evaluations)
# 1. [Evaluating a random-guessing strategy](#Evaluating-a-random-guessing-strategy)
# 1. [A simple baseline model](#A-simple-baseline-model)
# ## Overview
#
# This notebook illustrates an approach to [relation extraction](http://deepdive.stanford.edu/relation_extraction) using [distant supervision](http://deepdive.stanford.edu/distant_supervision). It uses a simplified version of the approach taken by Mintz et al. in their 2009 paper, [Distant supervision for relation extraction without labeled data](https://www.aclweb.org/anthology/P09-1113). If you haven't yet read that paper, read it now! The rest of the notebook will make a lot more sense after you're familiar with it.
#
# ### The task of relation extraction
#
# Relation extraction is the task of extracting from natural language text relational triples such as:
#
# ```
# (founders, SpaceX, Elon_Musk)
# (has_spouse, Elon_Musk, Talulah_Riley)
# (worked_at, Elon_Musk, Tesla_Motors)
# ```
#
# If we can accumulate a large knowledge base (KB) of relational triples, we can use it to power question answering and other applications. Building a KB manually is slow and expensive, but much of the knowledge we'd like to capture is already expressed in abundant text on the web. The aim of relation extraction, therefore, is to accelerate the construction of new KBs — and facilitate the ongoing curation of existing KBs — by extracting relational triples from natural language text.
#
# ### Hand-built patterns
#
# An obvious way to start is to write down a few patterns which express each relation. For example, we can use the pattern "X is the founder of Y" to find new instances of the `founders` relation. If we search a large corpus, we may find the phrase "<NAME> is the founder of SpaceX", which we can use as evidence for the relational triple `(founders, SpaceX, Elon_Musk)`.
#
# Unfortunately, this approach doesn't get us very far. The central challenge of relation extraction is the fantastic diversity of language, the multitude of possible ways to express a given relation. For example, each of the following sentences expressed the relational triple `(founders, SpaceX, Elon_Musk)`:
#
# - "You may also be thinking of _<NAME>_ (founder of _SpaceX_), who started PayPal."
# - "Interesting Fact: _<NAME>_, co-founder of PayPal, went on to establish _SpaceX_, one of the most promising space travel startups in the world."
# - "If Space Exploration (_SpaceX_), founded by Paypal pioneer _<NAME>_ succeeds, commercial advocates will gain credibility and more support in Congress."
#
# The patterns which connect "<NAME>" with "SpaceX" in these examples are not ones we could have easily anticipated. To do relation extraction effectively, we need to go beyond hand-built patterns.
#
# ### Supervised learning
#
# Effective relation extraction will require applying machine learning methods. The natural place to start is with supervised learning. This means training an extraction model from a dataset of examples which have been labeled with the target output. Sentences like the three examples above would be annotated with the `founders` relation, but we'd also have sentences which include "<NAME>" and "SpaceX" but do not express the `founders` relation, such as:
#
# - "Billionaire entrepreneur _<NAME>_ announced the latest addition to the _SpaceX_ arsenal: the 'Big F---ing Rocket' (BFR)".
#
# Such "negative examples" would be labeled as such, and the fully-supervised model would then be able to learn from both positive and negative examples the linguistic patterns that indicate each relation.
#
# The difficulty with the fully-supervised approach is the cost of generating training data. Because of the great diversity of linguistic expression, our model will need lots and lots of training data: at least tens of thousands of examples, although hundreds of thousands or millions would be much better. But labeling the examples is just as slow and expensive as building the KB by hand would be.
#
# ### Distant supervision
#
# The goal of distant supervision is to capture the benefits of supervised learning without paying the cost of labeling training data. Instead of labeling extraction examples by hand, we use existing relational triples to automatically identify extraction examples in a large corpus. For example, if we already have in our KB the relational triple `(founders, SpaceX, Elon_Musk)`, we can search a large corpus for sentences in which "SpaceX" and "<NAME>" co-occur, make the (unreliable!) assumption that all the sentences express the `founder` relation, and then use them as training data for a learned model to identify new instances of the `founder` relation — all without doing any manual labeling.
#
# This is a powerful idea, but it has two limitations. The first is that, inevitably, some of the sentences in which "SpaceX" and "<NAME>" co-occur will not express the `founder` relation — like the BFR example above. By making the blind assumption that all such sentences do express the `founder` relation, we are essentially injecting noise into our training data, and making it harder for our learning algorithms to learn good models. Distant supervision is effective in spite of this problem because it makes it possible to leverage vastly greater quantities of training data, and the benefit of more data outweighs the harm of noisier data.
#
# The second limitation is that we need an existing KB to start from. We can only train a model to extract new instances of the `founders` relation if we already have many instances of the `founders` relation. Thus, while distant supervision is a great way to extend an existing KB, it's not useful for creating a KB containing new relations from scratch.
#
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## Set-up
#
# * Make sure your environment includes all the requirements for [the cs224u repository](https://github.com/cgpotts/cs224u).
#
# * If you haven't already, download [the course data](http://web.stanford.edu/class/cs224u/data/data.zip), unpack it, and place it in the directory containing the course repository – the same directory as this notebook. (If you want to put it somewhere else, change `rel_ext_data_home` below.)
# +
import gzip
import numpy as np
import random
import os
from collections import Counter, defaultdict, namedtuple
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import train_test_split
import rel_ext
# -
rel_ext_data_home = os.path.join('data', 'rel_ext_data')
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## The corpus
# As usual when we're doing NLP, we need to start with a _corpus_ — a large sample of natural language text. And because our goal is to do relation extraction with distant supervision, we need to be able to identify entities in the text and connect them to a knowledge base of relations between entities. So, we need a corpus in which entity mentions are annotated with _entity resolutions_ which map them to unique, unambiguous identifiers. Entity resolution serves two purposes:
#
# 1. It ensures that if an entity mention could refer to two different entities, it is properly disambiguated. For example, "New York" could refer to the city or the state.
# 1. It ensures that if two different entity mentions refer to the same entity, they are properly identified. For example, both "New York City" and "The Big Apple" refer to New York City.
#
# The corpus we'll use for this project is derived from the [Wikilinks dataset](https://code.google.com/archive/p/wiki-links/) [announced by Google in 2013](https://research.googleblog.com/2013/03/learning-from-big-data-40-million.html). This dataset contains over 40M mentions of 3M distinct entities spanning 10M webpages. It provides entity resolutions by mapping each entity mention to a Wikipedia URL.
#
# Now, in order to do relation extraction, we actually need _pairs_ of entity mentions, and it's important to have the context around and between the two mentions. Fortunately, UMass has provided an [expanded version of Wikilinks](http://www.iesl.cs.umass.edu/data/data-wiki-links) which includes the context around each entity mention. We've written code to stitch together pairs of entity mentions along with their contexts, and we've filtered the examples extensively. The result is a compact corpus suitable for our purposes.
#
# Because we're frequently going to want to retrieve corpus examples containing specific entities, it will be convenient to create a `Corpus` class which holds not only the examples themselves, but also a precomputed index. Let's take a closer look.
# +
corpus = rel_ext.Corpus(os.path.join(rel_ext_data_home, 'corpus.tsv.gz'))
print('Read {0:,} examples'.format(len(corpus)))
# -
# Great, that's a lot of examples! Let's take a closer look at one.
print(corpus.examples[1])
# Every example represents a fragment of webpage text containing two entity mentions. The first two fields, `entity_1` and `entity_2`, contain unique identifiers for the two entities mentioned. We name entities using Wiki IDs, which you can think of as the last portion of a Wikipedia URL. Thus the Wiki ID `Barack_Obama` designates the entity described by [https://en.wikipedia.org/wiki/Barack_Obama](https://en.wikipedia.org/wiki/Barack_Obama).
#
# The next five fields represent the text surrounding the two mentions, divided into five chunks: `left` contains the text before the first mention, `mention_1` is the first mention itself, `middle` contains the text between the two mentions, `mention_2` is the second mention, and the field `right` contains the text after the second mention. Thus, we can reconstruct the context as a single string like this:
# +
ex = corpus.examples[1]
' '.join((ex.left, ex.mention_1, ex.middle, ex.mention_2, ex.right))
# -
type(ex)
# The last five fields contain the same five chunks of text, but this time annotated with part-of-speech (POS) tags, which may turn out to be useful when we start building models for relation extraction.
#
# Let's look at the distribution of entities over the corpus. How many entities are there, and what are the most common ones?
counter = Counter()
for example in corpus.examples:
counter[example.entity_1] += 1
counter[example.entity_2] += 1
print('The corpus contains {} entities'.format(len(counter)))
counts = sorted([(count, key) for key, count in counter.items()], reverse=True)
print('The most common entities are:')
for count, key in counts[:20]:
print('{:10d} {}'.format(count, key))
# The main benefit we gain from the `Corpus` class is the ability to retrieve examples containing specific entities. Let's find examples containing `Elon_Musk` and `Tesla_Motors`.
corpus.show_examples_for_pair('Elon_Musk', 'Tesla_Motors')
# Actually, this might not be all of the examples containing `Elon_Musk` and `Tesla_Motors`. It's only the examples where `Elon_Musk` was mentioned first and `Tesla_Motors` second. There may be additional examples that have them in the reverse order. Let's check.
corpus.show_examples_for_pair('Tesla_Motors', 'Elon_Musk')
# Sure enough. Going forward, we'll have to **remember to check both "directions"** when we're looking for examples contains a specific pair of entities.
#
# This corpus is not without flaws. As you get more familiar with it, you will likely discover that it contains many examples that are nearly — but not exactly — duplicates. This seems to be a consequence of the web document sampling methodology that was used in the construction of the Wikilinks dataset. However, despite a few warts, it will serve our purposes.
#
# One thing this corpus does _not_ include is any annotation about relations. Thus, it could not be used for the fully-supervised approach to relation extraction, because the fully-supervised approach requires that each pair of entity mentions be annotated with the relation (if any) that holds between the two entities. In order to make any headway, we'll need to connect the corpus with an external source of knowledge about relations. We need a knowledge base.
#
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## The knowledge base
# The data distribution for this unit includes a _knowledge base_ (KB) ultimately derived from [Freebase](https://en.wikipedia.org/wiki/Freebase). Unfortunately, Freebase was shut down in 2016, but the Freebase data is still available from various sources and in various forms. The KB included here was extracted from the [Freebase Easy data dump](http://freebase-easy.cs.uni-freiburg.de/dump/).
#
# The KB is a collection of _relational triples_, each consisting of a _relation_, a _subject_, and an _object_. For example, here are three triples from the KB:
#
# ```
# (place_of_birth, Barack_Obama, Honolulu)
# (has_spouse, Barack_Obama, Michelle_Obama)
# (author, The_Audacity_of_Hope, Barack_Obama)
# ```
#
# As you might guess:
#
# - The relation is one of a handful of predefined constants, such as `place_of_birth` or `has_spouse`.
# - The subject and object are entities represented by Wiki IDs (that is, suffixes of Wikipedia URLs).
#
# Let's write some code to read the KB so that we can take a closer look.
#
# Now, just as we did for the corpus, we'll create a `KB` class to store the KB triples and some associated indexes. We'll want to be able to look up KB triples both by relation and by entities, so we'll create indexes for both of those access patterns.
# +
kb = rel_ext.KB(os.path.join(rel_ext_data_home, 'kb.tsv.gz'))
print('Read {0:,} KB triples'.format(len(kb)))
# -
# Let's get a sense of the high-level characteristics of this KB. Some questions we'd like to answer:
#
# - How many relations are there?
# - How big is each relation?
# - Examples of each relation.
# - How many unique entities does the KB include?
len(kb.all_relations)
# How big is each relation? That is, how many triples does each relation contain?
for rel in kb.all_relations:
print('{:12d} {}'.format(len(kb.get_triples_for_relation(rel)), rel))
# Let's look at one example from each relation, so that we can get a sense of what they mean.
for rel in kb.all_relations:
print(tuple(kb.get_triples_for_relation(rel)[0]))
# The `kb.get_triples_for_entities()` method allows us to look up triples by the entities they contain. Let's use it to see what relation(s) hold between `France` and `Germany`.
kb.get_triples_for_entities('France', 'Germany')
# Relations like `adjoins` and `has_sibling` are intuitively symmetric — if the relation holds between _X_ and _Y_, then we expect it to hold between _Y_ and _X_ as well.
kb.get_triples_for_entities('Germany', 'France')
# However, there's no guarantee that all such inverse triples actually appear in the KB. (You could write some code to check.)
#
# Most relations, however, are intuitively asymmetric. Let's see what relation holds between `Tesla_Motors` and `Elon_Musk`.
kb.get_triples_for_entities('Tesla_Motors', 'Elon_Musk')
# It's a bit arbitrary that the KB includes a given asymmetric relation rather than its inverse. For example, instead of the `founders` relation with triple `(founders, Tesla_Motors, Elon_Musk)`, we might have had a `founder_of` relation with triple `(founder_of, Elon_Musk, Tesla_Motors)`. It doesn't really matter.
#
# Although we don't have a `founder_of` relation, there might still be a relation between `Elon_Musk` and `Tesla_Motors`. Let's check.
kb.get_triples_for_entities('Elon_Musk', 'Tesla_Motors')
# Aha, yes, that makes sense. So it can be the case that one relation holds between _X_ and _Y_, and a different relation holds between _Y_ and _X_.
#
# One more observation: there may be more than one relation that holds between a given pair of entities, even in one direction.
kb.get_triples_for_entities('Cleopatra', 'Ptolemy_XIII_Theos_Philopator')
# No! What? Yup, it's true — [Cleopatra](https://en.wikipedia.org/wiki/Cleopatra) married her younger brother, [Ptolemy XIII](https://en.wikipedia.org/wiki/Ptolemy_XIII_Theos_Philopator). Wait, it gets worse — she also married her _even younger_ brother, [Ptolemy XIV](https://en.wikipedia.org/wiki/Ptolemy_XIV_of_Egypt). Apparently this was normal behavior in ancient Egypt.
#
# Moving on ...
#
# Let's look at the distribution of entities in the KB. How many entities are there, and what are the most common ones?
counter = Counter()
for kbt in kb.kb_triples:
counter[kbt.sbj] += 1
counter[kbt.obj] += 1
print('The KB contains {} entities'.format(len(counter)))
counts = sorted([(count, key) for key, count in counter.items()], reverse=True)
print('The most common entities are:')
for count, key in counts[:20]:
print('{:10d} {}'.format(count, key))
# The number of entities in the KB is less than half the number of entities in the corpus! Evidently the corpus has much broader coverage than the KB.
#
# Note that there is no promise or expectation that this KB is _complete_. Not only does the KB contain no mention of many entities from the corpus — even for the entities it does include, there may be possible triples which are true in the world but are missing from the KB. As an example, these triples are in the KB:
#
# ```
# (founders, SpaceX, Elon_Musk)
# (founders, Tesla_Motors, Elon_Musk)
# (worked_at, Elon_Musk, Tesla_Motors)
# ```
#
# but this one is not:
#
# ```
# (worked_at, Elon_Musk, SpaceX)
# ```
#
# In fact, the whole point of developing methods for automatic relation extraction is to extend existing KBs (and build new ones) by identifying new relational triples from natural language text. If our KBs were complete, we wouldn't have anything to do.
#
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## Problem formulation
#
# With our data assets in hand, it's time to provide a precise formulation of the prediction problem we aim to solve. We need to specify:
#
# - What is the input to the prediction?
# - Is it a specific pair of entity _mentions_ in a specific context?
# - Or is it a pair of _entities_, apart from any specific mentions?
# - What is the output of the prediction?
# - Do we need to predict at most one relation label? (This is [multi-class classification](https://en.wikipedia.org/wiki/Multiclass_classification).)
# - Or can we predict multiple relation labels? (This is [multi-label classification](https://en.wikipedia.org/wiki/Multi-label_classification).)
#
# ### Joining the corpus and the KB
#
# In order to leverage the distant supervision paradigm, we'll need to connect information in the corpus with information in the KB. There are two possibilities, depending on how we formulate our prediction problem:
#
# - __Use the KB to generate labels for the corpus.__ If our problem is to classify a pair of entity _mentions_ in a specific example in the corpus, then we can use the KB to provide labels for training examples. Labeling specific examples is how the fully supervised paradigm works, so it's the obvious way to think about leveraging distant supervision as well. Although it can be made to work, it's not actually the preferred approach.
# - __Use the corpus to generate features for entity pairs.__ If instead our problem is to classify a pair of _entities_, then we can use all the examples from the corpus where those two entities co-occur to generate a feature representation describing the entity pair. This is the approach taken by [Mintz et al. 2009](https://www.aclweb.org/anthology/P09-1113), and it's the approach we'll pursue here.
#
# So we'll formulate our prediction problem such that the input is a pair of entities, and the goal is to predict what relation(s) the pair belongs to. The KB will provide the labels, and the corpus will provide the features.
dataset = rel_ext.Dataset(corpus, kb)
# Let's determine how many examples we have for each triple in the KB. We'll compute averages per relation.
dataset.count_examples()
# For most relations, the total number of examples is fairly large, so we can be optimistic about learning what linguistic patterns express a given relation. However, for individual entity pairs, the number of examples is often quite low. Of course, more data would be better — much better! But more data could quickly become unwieldy to work with in a notebook like this.
# ### Negative instances
#
# By joining the corpus to the KB, we can obtain abundant positive instances for each relation. But a classifier cannot be trained on positive instances alone. In order to apply the distant supervision paradigm, we will also need some negative instances — that is, entity pairs which do not belong to any known relation. If you like, you can think of these entity pairs as being assigned to a special relation called `NO_RELATION`. We can find plenty of such pairs by searching for examples in the corpus which contain two entities which do not belong to any relation in the KB.
unrelated_pairs = dataset.find_unrelated_pairs()
print('Found {0:,} unrelated pairs, including:'.format(len(unrelated_pairs)))
for pair in list(unrelated_pairs)[:10]:
print(' ', pair)
# That's a lot of negative instances! In fact, because these negative instances far outnumber our positive instances (that is, the triples in our KB), when we train models we'll wind up downsampling the negative instances substantially.
#
# Remember, though, that some of these supposedly negative instances may be false negatives. Our KB is not complete. A pair of entities might be related in real life even if they don't appear together in the KB.
# ### Multi-label classification
#
# A given pair of entities can belong to more than one relation. In fact, this is quite common in our KB.
dataset.count_relation_combinations()
# While a few of those combinations look like data errors, most look natural and intuitive. Multiple relations per entity pair is a commonplace phenomenon.
#
# This observation strongly suggests formulating our prediction problem as [multi-label classification](https://en.wikipedia.org/wiki/Multi-label_classification). We could instead treat it as [multi-class classification](https://en.wikipedia.org/wiki/Multiclass_classification) — and indeed, [Mintz et al. 2009](https://www.aclweb.org/anthology/P09-1113) did so — but if we do, we'll be faced with the problem of assigning a single relation label to entity pairs which actually belong to multiple relations. It's not obvious how best to do this (and Mintz et al. 2009 did not make their method clear).
#
# There are a number of ways to approach multi-label classification, but the most obvious is the [binary relevance method](https://en.wikipedia.org/wiki/Multi-label_classification#Problem_transformation_methods), which just factors multi-label classification over _n_ labels into _n_ independent binary classification problems, one for each label. A disadvantage of this approach is that, by treating the binary classification problems as independent, it fails to exploit correlations between labels. But it has the great virtue of simplicity, and it will suffice for our purposes.
#
# So our problem will be to take as input an entity pair and a candidate relation (label), and to return a binary prediction as to whether the entity pair belongs to the relation. Since a KB triple is precisely a relation and a pair of entities, we could say equivalently that our prediction problem amounts to binary classification of KB triples. Given a candidate KB triple, do we predict that it is valid?
# ### Building datasets
#
# We're now in a position to write a function to build datasets suitable for training and evaluating predictive models. These datasets will have the following characteristics:
#
# - Because we've formulated our problem as multi-label classification, and we'll be training separate models for each relation, we won't build a single dataset. Instead, we'll build a dataset for each relation, and our return value will be a map from relation names to datasets.
# - The dataset for each relation will consist of two parallel lists:
# - A list of candidate `KBTriples` which combine the given relation with a pair of entities.
# - A corresponding list of boolean labels indicating whether the given `KBTriple` belongs to the KB.
# - The dataset for each relation will include `KBTriples` derived from two sources:
# - Positive instances will be drawn from the KB.
# - Negative instances will be sampled from unrelated entity pairs, as described above.
kbts_by_rel, labels_by_rel = dataset.build_dataset(
include_positive=True, sampling_rate=0.1, seed=1)
print(kbts_by_rel['adjoins'][0], labels_by_rel['adjoins'][0])
print(kbts_by_rel['capital'][637], labels_by_rel['capital'][637])
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## Evaluation
#
# Before we start building models, let's set up a test harness that allows us to measure a model's performance. This may seem backwards, but it's analogous to the software engineering paradigm of [test-driven development](https://en.wikipedia.org/wiki/Test-driven_development): first, define success; then, pursue it.
# ### Splitting the data
#
# Whenever building a model from data, it's good practice to partition the data into a multiple _splits_ — minimally, a training split on which to train the model, and a test split on which to evaluate it. In fact, we'll go a bit further, and define three splits:
#
# - __The `tiny` split (1%).__ It's often useful to carve out a tiny chunk of data to use in place of training or test data during development. Of course, any quantitative results obtained by evaluating on the `tiny` split are nearly meaningless, but because evaluations run extremely fast, using this split is a good way to flush out bugs during iterative cycles of code development.
# - __The `train` split (74%).__ We'll use the majority of our data for training models, both during development and at final evaluation. Experiments with the `train` split may take longer to run, but they'll have much greater statistical power.
# - __The `dev` split (25%).__ We'll use the `dev` split as test data for intermediate (formative) evaluations during development. During routine experiments, all evaluations should use the `dev` split.
#
# You could also carve out a `test` split for a final (summative) evaluation at the conclusion of your work. The bake-off will have its own test set, so you needn't do this, but this is an important step for projects without pre-defined test splits.
#
# Splitting our data assets is somewhat more complicated than in many other NLP problems, because we have both a corpus and KB. In order to minimize leakage of information from training data into test data, we'd like to split both the corpus and the KB. And in order to maximize the value of a finite quantity of data, we'd like to align the corpus splits and KB splits as closely as possible. In an ideal world, each split would have its own hermetically-sealed universe of entities, the corpus for that split would contain only examples mentioning those entities, and the KB for that split would contain only triples involving those entities. However, that ideal is not quite achievable in practice. In order to get as close as possible, we'll follow this plan:
#
# - First, we'll split the set of entities which appear as the subject in some KB triple.
# - Then, we'll split the set of KB triples based on their subject entity.
# - Finally, we'll split the set of corpus examples.
# - If the first entity in the example has already been assigned to a split, we'll assign the example to the same split.
# - Alternatively, if the second entity has already been assigned to a split, we'll assign the example to the same split.
# - Otherwise, we'll assign the example to a split randomly.
#
# <!-- \[ TODO: figure out whether we actually need to split the _corpus_ -- any lift from testing on train corpus? \] -->
#
# The `Dataset` method `build_splits` handles all of this:
# +
splits = dataset.build_splits(
split_names=['tiny', 'train', 'dev'],
split_fracs=[0.01, 0.74, 0.25],
seed=1)
splits
# -
# So now we can use `splits['train'].corpus` to refer to the training corpus, or `splits['dev'].kb` to refer to the dev KB.
# ### Choosing evaluation metrics
#
# Because we've formulated our prediction problem as a family of binary classification problems, one for each relation (label), choosing evaluation metrics is pretty straightforward. The standard metrics for evaluating binary classification are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall), which are more meaningful than simple accuracy, particularly in problems with a highly biased label distribution (like ours). We'll compute and report precision and recall separately for each relation (label). There are only two wrinkles:
#
# 1. __How best to combine precision and recall into a single metric.__ Having two evaluation metrics is often inconvenient. If we're considering a change to our model which improves precision but degrades recall, should we take it? To drive an iterative development process, it's useful to have a single metric on which to hill-climb. For binary classification, the standard answer is the [F<sub>1</sub>-score](https://en.wikipedia.org/wiki/F1_score), which is the harmonic mean of precision and recall. However, the F<sub>1</sub>-score gives equal weight to precision and recall. For our purposes, precision is probably more important than recall. If we're extracting new relation triples from (massively abundant) text on the web in order to augment a knowledge base, it's probably more important that the triples we extract are correct (precision) than that we extract all the triples we could (recall). Accordingly, instead of the F<sub>1</sub>-score, we'll use the F<sub>0.5</sub>-score, which gives precision twice as much weight as recall.
#
# 1. __How to aggregate metrics across relations (labels).__ Reporting metrics separately for each relation is great, but in order to drive iterative development, we'd also like to have summary metrics which aggregate across all relations. There are two possible ways to do it: _micro-averaging_ will give equal weight to all problem instances, and thus give greater weight to relations with more instances, while _macro-averaging_ will give equal weight to all relations, and thus give lesser weight to problem instances in relations with more instances. Because the number of problem instances per relation is, to some degree, an accident of our data collection methodology, we'll choose macro-averaging.
#
# Thus, while every evaluation will report lots of metrics, when we need a single metric on which to hill-climb, it will be the macro-averaged F<sub>0.5</sub>-score.
# ### Running evaluations
#
# It's time to write some code to run evaluations and report results. This is now straightforward. The `rel_ext.evaluate()` function takes as inputs:
#
# - `splits`: a `dict` mapping split names to `Dataset` instances
# - `classifier`, which is just a function that takes a list of `KBTriples` and returns a list of boolean predictions
# - `test_split`, the split on which to evaluate the classifier, `dev` by default
# - `verbose`, a boolean indicating whether to print output
# ### Evaluating a random-guessing strategy
#
# In order to validate our evaluation framework, and to set a floor under expected results for future evaluations, let's implement and evaluate a random-guessing strategy. The random guesser is a classifier which completely ignores its input, and simply flips a coin.
# +
def lift(f):
return lambda xs: [f(x) for x in xs]
def make_random_classifier(p=0.50):
def random_classify(kb_triple):
return random.random() < p
return lift(random_classify)
# -
rel_ext.evaluate(splits, make_random_classifier())
# The results are not too surprising. Recall is generally around 0.50, which makes sense: on any given example with label `True`, we are 50% likely to guess the right label. But precision is very poor, because most labels are not `True`, and because our classifier is completely ignorant of the features of specific problem instances. Accordingly, the F<sub>0.5</sub>-score is also very poor — first because even the equally-weighted F<sub>1</sub>-score is always closer to the lesser of precision and recall, and second because the F<sub>0.5</sub>-score weights precision twice as much as recall.
#
# Actually, the most remarkable result in this table is the comparatively good performance for the `contains` relation! What does this result tell us about the data?
#
# \[ [top](#Relation-extraction-using-distant-supervision) \]
# ## A simple baseline model
#
# It shouldn't be too hard to do better than random guessing. But for now, let's aim low — let's use the data we have in the easiest and most obvious way, and see how far that gets us.
#
# We start from the intuition that the words between two entity mentions frequently tell us how they're related. For example, in the phrase "SpaceX was founded by <NAME>", the words "was founded by" indicate that the `founders` relation holds between the first entity mentioned and the second. Likewise, in the phrase "<NAME> established SpaceX", the word "established" indicates the `founders` relation holds between the second entity mentioned and the first.
#
# So let's write some code to find the most common phrases that appear between the two entity mentions for each relation. As the examples illustrate, we need to make sure to consider both directions: that is, where the subject of the relation appears as the first mention, and where it appears as the second.
# +
def find_common_middles(split, top_k=3, show_output=False):
corpus = split.corpus
kb = split.kb
mids_by_rel = {
'fwd': defaultdict(lambda: defaultdict(int)),
'rev': defaultdict(lambda: defaultdict(int))}
for rel in kb.all_relations:
for kbt in kb.get_triples_for_relation(rel):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
mids_by_rel['fwd'][rel][ex.middle] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
mids_by_rel['rev'][rel][ex.middle] += 1
def most_frequent(mid_counter):
return sorted([(cnt, mid) for mid, cnt in mid_counter.items()], reverse=True)[:top_k]
for rel in kb.all_relations:
for dir in ['fwd', 'rev']:
top = most_frequent(mids_by_rel[dir][rel])
if show_output:
for cnt, mid in top:
print('{:20s} {:5s} {:10d} {:s}'.format(rel, dir, cnt, mid))
mids_by_rel[dir][rel] = set([mid for cnt, mid in top])
return mids_by_rel
_ = find_common_middles(splits['train'], show_output=True)
# -
# A few observations here:
#
# - Some of the most frequent middles are natural and intuitive. For example, ", son of" indicates a forward `parents` relation, while "and his son" indicates a reverse `parents` relation.
# - Punctuation and stop words such as "and" and "of" are extremely common. Unlike some other NLP applications, it's probably a bad idea to throw these away — they carry lots of useful information.
# - However, punctuation and stop words tend to be highly ambiguous. For example, a bare comma is a likely middle for almost every relation in at least one direction.
# - A few of the results reflect quirks of the dataset. For example, the appearance of the phrase "in 1994 , he became a central figure in the" as a common middle for the `genre` relation reflects both the relative scarcity of examples for that relation, and an unfortunate tendency of the Wikilinks dataset to include duplicate or near-duplicate source documents. (That middle connects the entities [Ready to Die](https://en.wikipedia.org/wiki/Ready_to_Die) — the first studio album by the Notorious B.I.G. — and [East Coast hip hop](https://en.wikipedia.org/wiki/East_Coast_hip_hop).)
def train_top_k_middles_classifier(top_k=3):
split = splits['train']
corpus = split.corpus
top_k_mids_by_rel = find_common_middles(split=split, top_k=top_k)
def classify(kb_triple):
fwd_mids = top_k_mids_by_rel['fwd'][kb_triple.rel]
rev_mids = top_k_mids_by_rel['rev'][kb_triple.rel]
for ex in corpus.get_examples_for_entities(kb_triple.sbj, kb_triple.obj):
if ex.middle in fwd_mids:
return True
for ex in corpus.get_examples_for_entities(kb_triple.obj, kb_triple.sbj):
if ex.middle in rev_mids:
return True
return False
return lift(classify)
rel_ext.evaluate(splits, train_top_k_middles_classifier())
# Not surprisingly, the performance of even this extremely simplistic model is noticeably better than random guessing. Of course, recall is much worse across the board, but precision and F<sub>0.5</sub>-score are sometimes much better. We observe big gains especially on `adjoins`, `author`, `has_sibling`, and `has_spouse`. Then again, at least one relation actually got worse. (Can you offer any explanation for that?)
#
# Admittedly, performance is still not great in absolute terms. However, we should have modest expectations for performance on this task — we are unlikely ever to get anywhere near perfect precision with perfect recall. Why?
#
# - High precision will be hard to achieve because the KB is incomplete: some entity pairs that are related in the world — and in the corpus — may simply be missing from the KB.
# - High recall will be hard to achieve because the corpus is finite: some entity pairs that are related in the KB may not have any examples in the corpus.
#
# Because of these unavoidable obstacles, what matters is not so much absolute performance, but relative performance of different approaches.
#
# __Question:__ What's the optimal value for `top_k`, the number of most frequent middles to consider? What choice maximizes our chosen figure of merit, the macro-averaged F<sub>0.5</sub>-score?
#
# \[ [top](#Relation-extraction-using-distant-supervision) \]
| rel_ext_01_task.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''pyUdemy'': conda)'
# name: python38164bitpyudemyconda8c705f49a8e643418ce4b1ca64c8ab63
# ---
def my_function(a, b, c=10, d=12, e=13):
print('a: {}, b: {}, c: {}, d: {}, e:{}'.format(a, b, c, d, e))
my_function(1, 2)
# +
# *args: variable number of pos. arguments
# +
def my_function2(a, b, *args):
print(*args, type(args))
print('a: {}, b: {}, args: {}'.format(a, b, args))
my_function2(1, 2, 3, 4)
# +
def my_function3(*args):
print(*args, type(args))
print('args: {}'.format(args))
my_function2(1, 2, 3, 4)
# +
# NORMAL ARGS; *ARGS; DEFAULT ARGS
def my_function4(a, *args, b=None):
print(*args, type(args))
print('a: {}, b: {}, args: {}'.format(a, b, args))
my_function4(1, b=2, 3, 4)
| Chapter5_Functions/Functions/args.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datahandler import DataHandler
from models.unet import *
from models.unet_se import *
from generator import *
from params import *
from callbacks import getCallbacks
from tqdm import tqdm
import os
import skimage.io as io
from keras.models import *
from keras import backend as K
# %matplotlib inline
import matplotlib.pyplot as plt
import argparse
import sys
# +
#set common variables
epochs = 25
batch_size = 32
verbose = 1
resetSeed()
# -
#Get data and generators
dh = DataHandler()
tr_images, tr_masks, te_images, te_masks = dh.getData()
# +
from models.resnet_fcn import *
model = getSEUnet()
model.load_weights('logs/unet_se/kfold_unet_se/kfold_unet_se_dice_DA_K2/kfold_unet_se_dice_DA_K2_weights.h5')
# +
plt.gray()
plt.imshow(np.squeeze(tr_images[104] + tr_masks[104]))
# -
r = model.predict(np.expand_dims(tr_images[104], axis=0))
print(r.shape)
r[r>=0.7] = 255
r[r<0.7] = 0
plt.imshow(np.squeeze(tr_images[104] * 0.5 + r))
| .ipynb_checkpoints/TrainExp-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: aiscope-analytics-py38
# language: python
# name: aiscope-analytics-py38
# ---
# # Tutorial 1: Plot data in Watson studio
#
# In this tutorial you will:
# 1. Upload some external data into IBM Cloud Object Store
# 2. Load this data into the Watson Studio environment
# 3. Create an interactive time series plot of the data using plotly
# # Preparatory steps
# ### Toggle here to run on Watson Studio or locally
running_watson_studio=True
# ### First, upload sample data to IBM Cloud
# 1. Locate the data file "sample_climate_data.csv" in the repostitory folder: c2ma-tutorials/sample-data/sample_climate_data.csv.
#
# 2. Download this file to your local machine.
#
# 3. Upload to your Watson Studio project assets: From the main project page, navigate to Assets -> Data assets. Click "New Data asset +". Drag and drop the file and wait for the upload to complete.
#
# ### Set up Watson studio project token - replace project ids and tokens for your Watson Studio project as described in workshop setup instructions [here](https://github.com/C2MA-workshop/c2ma-docs)
# @hidden_cell
# The project token is an authorization token that is used to access project resources like data sources, connections, and used by platform APIs.
if running_watson_studio:
from project_lib import Project
project = Project(project_id='XXXX', project_access_token='XXXX')
pc = project.project_context
# # Load and plot the sample data
# ### Load the required libraries
import numpy as np
import pandas as pd
import math
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# ### Load the data from the project assets
if running_watson_studio:
my_file = project.get_file("sample_climate_data.csv")
my_file.seek(0)
df = pd.read_csv(my_file)
# ### (Alternative version to load data from local storage)
if not running_watson_studio:
df = pd.read_csv("./sample-data/sample_climate_data.csv")
df.head()
df['timestamp'] = pd.to_datetime(df['datetime'])
df.head()
# ### There are three locations contained in the file
locations = df[['latitude','longitude']].drop_duplicates()
locations
# ### Select the location and variable to be plotted
# +
lat = locations.loc[0, 'latitude']
lon = locations.loc[0, 'longitude']
print(lat, lon)
var = "rainfall"
unit = "mm"
var = "temperature"
unit = "degC"
# +
dfplot = df.loc[(df['latitude']==lat) & (df['longitude']==lon)].sort_values('timestamp')
infostr = ' for location: ' + str(lat) + ' N, ' + str(lon) + ' E'
fig = make_subplots(rows=1, cols=1, shared_xaxes=True, \
subplot_titles = [var + infostr],
vertical_spacing = 0.05)
fig.add_trace(
go.Scatter(x=dfplot['timestamp'], y = dfplot[var], showlegend=False),
row=1, col=1)
fig.update_layout(
autosize=False,
width=800,
height=900)
fig.update_yaxes(title_text= var + " " + unit, row=1, col=1)
fig.for_each_yaxis(lambda axis: axis.title.update(font=dict(size=12)))
fig.show()
# -
# ### Author and license
#
# <NAME> is a Research Staff Member at IBM Research, specialising in AI for Climate Risk and Impacts.
#
# Copyright © 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
| Tutorial_1_Plot_External_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math, random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
from torch.distributions import Categorical
torch.manual_seed(51)
# %matplotlib inline
import matplotlib.pyplot as plt
# # Retrieve data from Quandl
#
# Don't bother with this step if the data's already saved from the Pickle below.
import quandl
#quandl.ApiConfig.api_key = "xxx"
apl_stock=quandl.get('WIKI/AAPL', start_date="2014-01-01", end_date="2018-08-20", )
msf_stock=quandl.get('WIKI/MSFT', start_date="2014-01-01", end_date="2018-08-20")
apl_open = apl_stock["Open"].values
apl_close = apl_stock["Close"].values
msf_open = msf_stock["Open"].values
msf_close = msf_stock["Close"].values
msf.head()
# ## Examine the data
plt.plot(range(0, len(msf_open)), msf_open)
plt.plot(range(0, len(apl_open)), apl_open)
# ## Oof! What's happened?
# Turns out that on 9 June 2014, AAPL stock was split in the ratio of 1:7. So, I'll divide everything before element 108 in the stock array by 7 to correct for this.
apl_open[:108] /= 7
apl_close[:108] /= 7
plt.plot(range(0, len(apl_open)), apl_open)
# ## Looks better, now remove the general upwards trend.
#
# Now I'm going to de-trend the data. Otherwise MSFT and AAPL are on completely different scales and we could hardly expect the AI to learn how to trade apples and oranges together. Additionally, we want the AI to learn the fundamentals of the stock signal - buy if it's going to rise. If we didn't remove the trend, maybe it'd only learn to buy at the start and hold til the end since a general upwards trend happens.
#
# SciPy's signal processing module has a function that fits a linear least-squares model to the data and subtracts it for us.
from scipy import signal
msf_open = signal.detrend(msf_open)
msf_close = signal.detrend(msf_close)
plt.plot(range(0,len(msf_open)), msf_open)
apl_open = signal.detrend(apl_open)
apl_close = signal.detrend(apl_close)
plt.plot(range(0, len(apl_open)), apl_open)
# ## Negative values?
#
# It doesn't make sense to buy a share at a negative value. But since the model will learn to maximize reward, we can just shift it up by a constant number so it's always positive.
print(apl_open.min())
print(apl_close.min())
print(msf_open.min())
print(msf_close.min())
# Choose 35
apl_open += 35.
apl_close += 35.
msf_open += 35.
msf_close += 35.
# ## Save the transformed data
import pickle
with open("aplmsfopenclose.pkl", "wb+") as f:
pickle.dump({"ao":apl_open, "ac": apl_close, "mo": msf_open, "mc": msf_close}, f)
# ## If the data's already saved, skip the above and load it here instead
import pickle
with open("aplmsfopenclose.pkl", "rb") as f:
d = pickle.load(f)
apl_open = d["ao"]
apl_close = d["ac"]
msf_open = d["mo"]
msf_close = d["mc"]
plt.plot(range(0,len(apl_open)), apl_open)
# # Build the environment
#
# * For starting cash, we can't use a dollar value because of the transformed data. After shifting earlier, we know the mean of each opening price should be 35, so I'm starting the agent off with enough cash to buy ~2.5 shares.
# * This starting cash is the mean starting cash; it can be randomized by altering the std parameter
# * action space = 0 buy apple, 1 sell apple, 2 do nothing, 3 buy msft, 4 sell msft, quantity. eg. [0, 100]
# * obs space: apl shares, msft shares, cash in bank [2], today apl open [3], today msf open [4], portfolio value [5], 5 day window [6apl, 7msf] = 8
# * If bot gets to the end with more than one of each share, we give it a bonus for having a diversified portfolio!
# * Buys and sells attract a 10% brokerage fee
class TradingEnvironment():
def __init__(self, starting_cash_mean=200., max_stride=5, series_length=208, starting_point=1, randomize_cash_std=0, \
starting_shares_mean=0., randomize_shares_std=0., inaction_penalty=0.):
self.starting_shares_mean = starting_shares_mean
self.randomize_shares_std = randomize_shares_std
self.starting_cash_mean = starting_cash_mean
self.randomize_cash_std = randomize_cash_std
self.state = torch.FloatTensor(torch.zeros(8)).cuda()
self.starting_cash = max(int(np.random.normal(self.starting_cash_mean, self.randomize_cash_std)), 0.)
self.series_length = series_length
self.starting_point = starting_point
self.cur_timestep = self.starting_point
self.state[0] = max(int(np.random.normal(self.starting_shares_mean, self.randomize_shares_std)), 0.)
self.state[1] = max(int(np.random.normal(self.starting_shares_mean, self.randomize_shares_std)), 0.)
self.starting_portfolio_value = self.portfolio_value()
self.state[2] = self.starting_cash
self.state[3] = apl_open[self.cur_timestep]
self.state[4] = msf_open[self.cur_timestep]
self.state[5] = self.starting_portfolio_value
self.state[6] = self.five_day_window()[0]
self.state[7] = self.five_day_window()[1]
self.max_stride = max_stride
self.stride = self.max_stride # no longer varying it
self.done = False
self.diversification_bonus = 1.
self.inaction_penalty = inaction_penalty
def portfolio_value(self):
return (self.state[0] * apl_close[self.cur_timestep]) + (self.state[1] * msf_close[self.cur_timestep]) + self.state[2]
def next_opening_price(self):
step = self.cur_timestep + self.stride
return [apl_open[step], msf_open[step]]
def five_day_window(self):
step = self.cur_timestep
if step < 5:
return [apl_open[0], msf_open[0]]
apl5 = apl_open[step-5:step].mean()
msf5 = msf_open[step-5:step].mean()
return [apl5, msf5]
def step(self, action):
action = [action, 1.]
cur_timestep = self.cur_timestep
ts_left = self.series_length - (cur_timestep - self.starting_point)
retval = None
cur_value = self.portfolio_value()
gain = cur_value - self.starting_portfolio_value
if cur_timestep >= self.starting_point + (self.series_length * self.stride):
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
bonus = 0.
if self.state[0] > 0 and self.state[1] > 0:
bonus = self.diversification_bonus
return new_state, cur_value + bonus + gain, True, { "msg": "done"}
if action[0] == 2:
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, -self.inaction_penalty-ts_left +gain, False, { "msg": "nothing" }
if action[0] == 0:
if action[1] * apl_open[cur_timestep] > self.state[2]:
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, -ts_left+gain/2, True, { "msg": "bankrupted self"}
else:
apl_shares = self.state[0] + action[1]
cash_spent = action[1] * apl_open[cur_timestep] * 1.1
new_state = [apl_shares, self.state[1], self.state[2] - cash_spent, *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, self.inaction_penalty-ts_left+gain, False, { "msg": "bought AAPL"}
if action[0] == 3:
if action[1] * msf_open[cur_timestep] > self.state[2]:
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, -ts_left+gain/2, True, { "msg": "bankrupted self"}
else:
msf_shares = self.state[1] + action[1]
cash_spent = action[1] * msf_open[cur_timestep] * 1.1
new_state = [self.state[0], msf_shares, self.state[2] - cash_spent, *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, self.inaction_penalty-ts_left+gain, False, { "msg": "bought MSFT"}
if action[0] == 1:
if action[1] > self.state[0]:
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, -ts_left+gain/2, True, { "msg": "sold more than have"}
else:
apl_shares = self.state[0] - action[1]
cash_gained = action[1] * apl_open[cur_timestep] * 0.9
new_state = [apl_shares, self.state[1], self.state[2] + cash_gained, *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, self.inaction_penalty-ts_left+gain, False, { "msg": "sold AAPL"}
if action[0] == 4:
if action[1] > self.state[1]:
new_state = [self.state[0], self.state[1], self.state[2], *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, -ts_left+gain/2, True, { "msg": "sold more than have"}
else:
msf_shares = self.state[1] - action[1]
cash_gained = action[1] * msf_open[cur_timestep] * 0.9
new_state = [self.state[0], msf_shares, self.state[2] + cash_gained, *self.next_opening_price(), \
cur_value, *self.five_day_window()]
self.state = new_state
retval = new_state, self.inaction_penalty-ts_left+gain, False, { "msg": "sold MSFT"}
self.cur_timestep += self.stride
return retval
def reset(self):
self.state = torch.FloatTensor(torch.zeros(8)).cuda()
self.starting_cash = max(int(np.random.normal(self.starting_cash_mean, self.randomize_cash_std)), 0.)
self.cur_timestep = self.starting_point
self.state[0] = max(int(np.random.normal(self.starting_shares_mean, self.randomize_shares_std)), 0.)
self.state[1] = max(int(np.random.normal(self.starting_shares_mean, self.randomize_shares_std)), 0.)
self.state[2] = self.starting_cash
self.state[3] = apl_open[self.cur_timestep]
self.state[4] = msf_open[self.cur_timestep]
self.starting_portfolio_value = self.portfolio_value()
self.state[5] = self.starting_portfolio_value
self.state[6] = self.five_day_window()[0]
self.state[7] = self.five_day_window()[1]
self.done = False
return self.state
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.input_layer = nn.Linear(8, 128)
self.hidden_1 = nn.Linear(128, 128)
self.hidden_2 = nn.Linear(32,31)
self.hidden_state = torch.tensor(torch.zeros(2,1,32)).cuda()
self.rnn = nn.GRU(128, 32, 2)
self.action_head = nn.Linear(31, 5)
self.value_head = nn.Linear(31, 1)
self.saved_actions = []
self.rewards = []
def reset_hidden(self):
self.hidden_state = torch.tensor(torch.zeros(2,1,32)).cuda()
def forward(self, x):
x = torch.tensor(x).cuda()
x = torch.sigmoid(self.input_layer(x))
x = torch.tanh(self.hidden_1(x))
x, self.hidden_state = self.rnn(x.view(1,-1,128), self.hidden_state.data)
x = F.relu(self.hidden_2(x.squeeze()))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return F.softmax(action_scores, dim=-1), state_values
def act(self, state):
probs, state_value = self.forward(state)
m = Categorical(probs)
action = m.sample()
if action == 1 and env.state[0] < 1: action = torch.LongTensor([2]).squeeze().cuda()
if action == 4 and env.state[1] < 1: action = torch.LongTensor([2]).squeeze().cuda()
self.saved_actions.append((m.log_prob(action), state_value))
return action.item()
env = TradingEnvironment(max_stride=4, series_length=250, starting_cash_mean=1000, randomize_cash_std=100, starting_shares_mean=100, randomize_shares_std=10)
model = Policy().cuda()
optimizer = optim.Adam(model.parameters(), lr=3e-4)
# +
env.reset()
# In case you're running this a second time with the same model, delete the gradients
del model.rewards[:]
del model.saved_actions[:]
gamma = 0.9
log_interval = 60
def finish_episode():
R = 0
saved_actions = model.saved_actions
policy_losses = []
value_losses = []
rewards = []
for r in model.rewards[::-1]:
R = r + (gamma * R)
rewards.insert(0, R)
rewards = torch.tensor(rewards)
epsilon = (torch.rand(1) / 1e4) - 5e-5
# With different architectures, I found the following standardization step sometimes
# helpful, sometimes unhelpful.
# rewards = (rewards - rewards.mean()) / (rewards.std(unbiased=False) + epsilon)
# Alternatively, comment it out and use the following line instead:
rewards += epsilon
for (log_prob, value), r in zip(saved_actions, rewards):
reward = torch.tensor(r - value.item()).cuda()
policy_losses.append(-log_prob * reward)
value_losses.append(F.smooth_l1_loss(value, torch.tensor([r]).cuda()))
optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss = torch.clamp(loss, -1e-5, 1e5)
loss.backward()
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
running_reward = 0
for episode in range(0, 4000):
state = env.reset()
reward = 0
done = False
msg = None
while not done:
action = model.act(state)
state, reward, done, msg = env.step(action)
model.rewards.append(reward)
if done:
break
running_reward = running_reward * (1 - 1/log_interval) + reward * (1/log_interval)
finish_episode()
# Resetting the hidden state seems unnecessary - it's effectively random from the previous
# episode anyway, more random than a bunch of zeros.
# model.reset_hidden()
if msg["msg"] == "done" and env.portfolio_value() > env.starting_portfolio_value * 1.1 and running_reward > 500:
print("Early Stopping: " + str(int(reward)))
break
if episode % log_interval == 0:
print("""Episode {}: started at {:.1f}, finished at {:.1f} because {} @ t={}, \
last reward {:.1f}, running reward {:.1f}""".format(episode, env.starting_portfolio_value, \
env.portfolio_value(), msg["msg"], env.cur_timestep, reward, running_reward))
# -
# ## Let's see how it does in practice, on the training data
# +
env = TradingEnvironment(max_stride=4, series_length=250, starting_cash_mean=1000, randomize_cash_std=100, starting_shares_mean=100, randomize_shares_std=10)
total_rewards = 0
total_profits = 0
failed_goes = 0
num_goes = 50
for j in range(num_goes):
env.reset()
reward_this_go = -1e8
for i in range(0,env.series_length + 1):
action = model.act(env.state)
next_state, reward, done, msg = env.step(action)
if msg["msg"] == "done":
reward_this_go = env.portfolio_value()
break
if done:
break
total_profits += (env.portfolio_value() - env.starting_portfolio_value) / env.starting_portfolio_value
if reward_this_go == -1e8:
failed_goes += 1
else:
total_rewards += reward_this_go
if failed_goes == num_goes:
print("Failed all")
else:
print("Failed goes: {} / {}, Avg Rewards per successful game: {}".format(failed_goes, num_goes, total_rewards / (num_goes - failed_goes)))
print("Avg % profit per game: {}".format(total_profits / num_goes))
print("Avg % profit per finished game: {}".format(total_profits / (num_goes - failed_goes)))
# -
# ## And here's how a sample trading run might look
env = TradingEnvironment(max_stride=4, series_length=250, starting_cash_mean=1000, randomize_cash_std=100, starting_shares_mean=100, randomize_shares_std=10)
env.reset()
print("starting portfolio value {}".format(env.portfolio_value()))
for i in range(0,env.series_length + 1):
action = model.act(env.state)
next_state, reward, done, msg = env.step(action)
if msg["msg"] == 'bankrupted self':
print('bankrupted self by 1')
break
if msg["msg"] == 'sold more than have':
print('sold more than have by 1')
break
print("{}, have {} aapl and {} msft and {} cash".format(msg["msg"], next_state[0], next_state[1], next_state[2]))
if msg["msg"] == "done":
print(next_state, reward)
print("total portfolio value {}".format(env.portfolio_value()))
break
apl_open_orig = apl_stock["Open"].values
apl_close_orig = apl_stock["Close"].values
msf_open_orig = msf_stock["Open"].values
msf_close_orig = msf_stock["Close"].values
apl_open_orig[:108] /= 7
apl_close_orig[:108] /= 7
env = TradingEnvironment(max_stride=4, series_length=250, starting_cash_mean=1000, randomize_cash_std=100, starting_shares_mean=100, randomize_shares_std=10)
env.reset()
complete_game = False
while not complete_game:
bought_apl_at = []
bought_msf_at = []
sold_apl_at = []
sold_msf_at = []
bought_apl_at_orig = []
bought_msf_at_orig = []
sold_apl_at_orig = []
sold_msf_at_orig = []
nothing_at = []
ba_action_times = []
bm_action_times = []
sa_action_times = []
sm_action_times = []
n_action_times = []
starting_val = env.starting_portfolio_value
print("Starting portfolio value: {}".format(starting_val))
for i in range(0,env.series_length + 1):
action = model.act(env.state)
if action == 0:
bought_apl_at.append(apl_open[env.cur_timestep])
bought_apl_at_orig.append(apl_open_orig[env.cur_timestep])
ba_action_times.append(env.cur_timestep)
if action == 1:
sold_apl_at.append(apl_close[env.cur_timestep])
sold_apl_at_orig.append(apl_close_orig[env.cur_timestep])
sa_action_times.append(env.cur_timestep)
if action == 2:
nothing_at.append(35)
n_action_times.append(env.cur_timestep)
if action == 3:
bought_msf_at.append(msf_open[env.cur_timestep])
bought_msf_at_orig.append(msf_open_orig[env.cur_timestep])
bm_action_times.append(env.cur_timestep)
if action == 4:
sold_msf_at.append(msf_close[env.cur_timestep])
sold_msf_at_orig.append(msf_close_orig[env.cur_timestep])
sm_action_times.append(env.cur_timestep)
next_state, reward, done, msg = env.step(action)
if msg["msg"] == 'bankrupted self':
env.reset()
break
if msg["msg"] == 'sold more than have':
env.reset()
break
if msg["msg"] == "done":
print("{}, have {} aapl and {} msft and {} cash".format(msg["msg"], next_state[0], next_state[1], next_state[2]))
val = env.portfolio_value()
print("Finished portfolio value {}".format(val))
if val > starting_val * 1.1: complete_game = True
env.reset()
break
plt.figure(1, figsize=(14,5))
apl = plt.subplot(121)
msf = plt.subplot(122)
apl.plot(range(0, len(apl_open)), apl_open)
msf.plot(range(0, len(msf_open)), msf_open)
apl.plot(ba_action_times, bought_apl_at, "ro")
apl.plot(sa_action_times, sold_apl_at, "go")
apl.plot(n_action_times, nothing_at, "yx")
msf.plot(n_action_times, nothing_at, "yx")
msf.plot(bm_action_times, bought_msf_at, "ro")
msf.plot(sm_action_times, sold_msf_at, "go")
plt.figure(1, figsize=(14,5))
apl = plt.subplot(121)
msf = plt.subplot(122)
apl.plot(range(0, len(apl_open_orig)), apl_open_orig)
msf.plot(range(0, len(msf_open_orig)), msf_open_orig)
apl.plot(ba_action_times, bought_apl_at_orig, "ro")
apl.plot(sa_action_times, sold_apl_at_orig, "go")
apl.plot(n_action_times, nothing_at, "yx")
msf.plot(n_action_times, nothing_at, "yx")
msf.plot(bm_action_times, bought_msf_at_orig, "ro")
msf.plot(sm_action_times, sold_msf_at_orig, "go")
| src/reference/Finance final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Projeto Ciência de Dados - Previsão de Vendas
#
# - Nosso desafio é conseguir prever as vendas que vamos ter em determinado período com base nos gastos em anúncios nas 3 grandes redes que a empresa Hashtag investe: TV, Jornal e Rádio
#
# - Base de Dados: https://drive.google.com/drive/folders/1o2lpxoi9heyQV1hIlsHXWSfDkBPtze-V?usp=sharing
# ### Passo a Passo de um Projeto de Ciência de Dados
#
# - Passo 1: Entendimento do Desafio
# - Passo 2: Entendimento da Área/Empresa
# - Passo 3: Extração/Obtenção de Dados
# - Passo 4: Ajuste de Dados (Tratamento/Limpeza)
# - Passo 5: Análise Exploratória
# - Passo 6: Modelagem + Algoritmos (Aqui que entra a Inteligência Artificial, se necessário)
# - Passo 7: Interpretação de Resultados
# # Projeto Ciência de Dados - Previsão de Vendas
#
# - Nosso desafio é conseguir prever as vendas que vamos ter em determinado período com base nos gastos em anúncios nas 3 grandes redes que a empresa Hashtag investe: TV, Jornal e Rádio
# #### Importar a Base de dados
# !pip install matplotlib
# !pip install seaborn
# !pip install scikit-learn
# +
import pandas as pd
tabela = pd.read_csv("advertising.csv")
display(tabela)
# -
# #### Análise Exploratória
# - Vamos tentar visualizar como as informações de cada item estão distribuídas
# - Vamos ver a correlação entre cada um dos itens
# +
import seaborn as sns
import matplotlib.pyplot as plt
sns.heatmap(tabela.corr(), annot=True, cmap="Wistia")
plt.show()
sns.pairplot(tabela)
plt.show()
# -
# #### Com isso, podemos partir para a preparação dos dados para treinarmos o Modelo de Machine Learning
#
# - Separando em dados de treino e dados de teste
# +
from sklearn.model_selection import train_test_split
y = tabela["Vendas"]
x = tabela.drop("Vendas", axis=1)
x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, test_size=0.3, random_state=1)
# -
# #### Temos um problema de regressão - Vamos escolher os modelos que vamos usar:
#
# - Regressão Linear
# - RandomForest (Árvore de Decisão)
# +
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
# cria as inteligencias aritificiais
modelo_regressaolinear = LinearRegression()
modelo_arvoredecisao = RandomForestRegressor()
# treina as inteligencias artificias
modelo_regressaolinear.fit(x_treino, y_treino)
modelo_arvoredecisao.fit(x_treino, y_treino)
# -
# #### Teste da AI e Avaliação do Melhor Modelo
#
# - Vamos usar o R² -> diz o % que o nosso modelo consegue explicar o que acontece
# +
from sklearn import metrics
# criar as previsoes
previsao_regressaolinear = modelo_regressaolinear.predict(x_teste)
previsao_arvoredecisao = modelo_arvoredecisao.predict(x_teste)
# comparar os modelos
print(metrics.r2_score(y_teste, previsao_regressaolinear))
print(metrics.r2_score(y_teste, previsao_arvoredecisao))
# -
# #### Visualização Gráfica das Previsões
# +
tabela_auxiliar = pd.DataFrame()
tabela_auxiliar["y_teste"] = y_teste
tabela_auxiliar["Previsoes ArvoreDecisao"] = previsao_arvoredecisao
tabela_auxiliar["Previsoes Regressao Linear"] = previsao_regressaolinear
plt.figure(figsize=(15,6))
sns.lineplot(data=tabela_auxiliar)
plt.show()
# -
# #### Qual a importância de cada variável para as vendas?
# +
sns.barplot(x=x_treino.columns, y=modelo_arvoredecisao.feature_importances_)
plt.show()
# Caso queira comparar Radio com Jornal
# print(df[["Radio", "Jornal"]].sum())
# -
# Como fazer uma nova previsao
# importar a nova_tabela com o pandas (a nova tabela tem que ter os dados de TV, Radio e Jornal)
# previsao = modelo_randomforest.predict(nova_tabela)
# print(previsao)
nova_tabela = pd.read_csv("novos.csv")
display(nova_tabela)
previsao = modelo_arvoredecisao.predict(nova_tabela)
print(previsao)
| datas/Aula 4 - Gabarito.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# This notebook was created by [<NAME>](http://www.im.ufrj.br/bernardofpc),
# and is licensed under Creative Commons BY-SA
import numpy as np
import matplotlib.pyplot as plt
# # Como saber se os resultados estão convergindo corretamente?
#
# Vamos começar com um método bastante simples, mas muitas vezes eficaz:
# veremos como o método de Euler se comporta em casos em que conhecemos a resposta,
# e buscaremos aí algumas intuições para o caso geral.
from funcionario import euler_npts
# Vejamos novamente uma EDO: $y' = 2y$.
# +
def F(t,y):
return 2*y
def ansF(t,t0,y0): return np.exp(2*(t-t0))*y0
# -
# ## Erros ao mudar o número de passos
# ### Gráficos das soluções
# +
ns = [10,20,30]
for n in ns:
ts, ys = euler_npts(F, [0,1], y0=1.2, npts=n, retpts=True)
plt.plot(ts, ys, 'x:', label=str(n))
# Tentar outros estilos: ., o; --, -., ...
plt.plot(ts, ansF(ts,0,1.2), label='exata')
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: solução')
plt.show()
# -
# ### Gráficos dos erros
# +
ns = [10,20,30]
for n in ns:
ts, ys = euler_npts(F, [0,1], y0=1.2, npts=n, retpts=True)
plt.plot(ts, ys - ansF(ts,0,1.2), 'x:', label=str(n))
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: erro')
plt.show()
# -
# ### Gráficos dos erros relativos
#
# O erro relativo é sempre dado por
# $$ \frac{\text{Valor calculado} - \text{Valor exato}}{\text{Valor exato}}. $$
#
# Ou, quando estamos com pressa de escrever:
# $$ \frac{\hat x - x}{x},$$
# onde subentende-se que $x$ é a resposta correta, enquanto $\hat x$, o valor que calculamos no computador.
# +
ns = [10,20,30]
for n in ns:
### Resposta aqui
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: erro relativo')
plt.show()
# -
# ### Generalizando: aumentando muito mais o número de passos
#
# Agora que temos um gráfico que parece ser bom para ver erros, vamos aumentar $n$ ;-)
ns = np.arange(100,500,step=30)
### Resposta aqui
# ### Exercício
#
# - Quão grande deve ser $n$ para o seu computador começar a demorar para calcular `ys`?
# - Quanto tempo ele leva para fazer o gráfico correspondente?
# - Será que você realmente precisa de todos estes $n \to \infty$ pontos **no gráfico**?
| comp-cientifica-I-2018-2/semana-2/Euler erros.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Accessing ICESat-2 Data
# ### Software Development Notebook
# This notebook outlines and begins development for functionality to ease ICESat-2 data access and download from the NASA NSIDC DAAC (NASA National Snow and Ice Data Center Distributed Active Archive Center). This space is meant to be transient and serve as a space for writing and testing code. Documentation and examples will be developed independently.
#
# #### Credits
# * contributers: <NAME>
# * based initially on and modified from the 'NSIDC DAAC ICESat-2 Customize and Access.ipynb' tutorial by <NAME>
# * some code from the ICESat-2 Hackweek topolib project was also modified and used in the development of is2_data.py
#
import os
import sys
print(sys.path)
sys.path.append(os.path.abspath('../../icepyx/core/'))
print(sys.path)
import os
import sys
print(sys.path)
sys.path.insert(0, os.path.abspath('../..'))
print(sys.path)
# ## Import packages, including icepyx
#
import requests
import getpass
import socket
import json
import zipfile
import io
import math
import os
import shutil
from pprint import pprint
import time
#import geopandas as gpd
#import matplotlib.pyplot as plt
#import fiona
import h5py
import re
# To read KML files with geopandas, we will need to enable KML support in fiona (disabled by default)
#fiona.drvsupport.supported_drivers['LIBKML'] = 'rw'
#from shapely.geometry import Polygon, mapping
#from shapely.geometry.polygon import orient
from statistics import mean
from requests.auth import HTTPBasicAuth
#change working directory
# %cd ../../..
# cd ./Scripts/github/icesat2py/icepyx
# %load_ext autoreload
from icepyx import icesat2data as ipd
# %autoreload 2
#in order to use "as ipd", you have to use autoreload 2, which will automatically reload any module not excluded by being imported with %aimport -[module]
# ## Test the icesat-2 data object class
region_a = ipd.Icesat2Data('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_a.spatial_extent
region_a.CMRparams
region_a.reqparams
region_a.subsetparams
region_a.avail_granules()
# + jupyter={"outputs_hidden": true}
region_a.granules.avail
# -
print(region_a.dataset)
print(region_a.dates)
print(region_a.start_time)
print(region_a.end_time)
print(region_a.dataset_version)
print(region_a.spatial_extent)
print(region_a.latest_version())
region_a.dataset_summary_info()
# %matplotlib inline
region_a.visualize_spatial_extent()
# ### Test the IS2 Class with polygon inputs
region_ap = ipd.Icesat2Data('ATL06',[(-55, 68), (-55, 71), (-48, 71), (-48, 68), (-55, 68)],\
['2019-02-20','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='3')
region_ap.CMRparams
region_ap = ipd.Icesat2Data('ATL06',[(-55, 68), (-55.2, 70), (-55, 71), (-50, 71.3), (-48, 71), (-47.9, 69), (-48, 68), (-51, 68.5), (-55, 68)],\
['2019-02-20','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='3')
region_ap.spatial_extent
region_ap._spat_extent
region_ap.visualize_spatial_extent()
region_ap2 = ipd.Icesat2Data('ATL06',[-55, 68, -55, 71, -48, 71, -48, 68, -55, 68],\
['2019-02-20','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='3')
region_ap2.spatial_extent
region_ap2._spat_extent
region_ap.subsetparams()
# + jupyter={"outputs_hidden": true}
region_ap.avail_granules()
# -
region_ap.earthdata_login('jessica.scheick', '<EMAIL>')
region_ap.order_granules()
# %matplotlib inline
region_ap2.visualize_spatial_extent()
region_p = ipd.Icesat2Data('ATL06','/home/jovyan/icepyx/doc/examples/supporting_files/data-access_PineIsland/glims_polygons.kml',\
['2019-10-01','2019-10-05'], \
start_time='00:00:00', end_time='23:59:59', version='2')
# + jupyter={"outputs_hidden": true}
region_p.spatial_extent
# -
region_p._spat_extent
# %matplotlib inline
region_p.visualize_spatial_extent()
region_p.avail_granules()
'Boundingshape': '{"type": "FeatureCollection", "features": [{"id": "0", "type": "Feature", "properties": {},
"geometry": {"type": "Polygon", "coordinates": [[[-55.0, 68.0], [-48.0, 68.0], [-48.0, 71.0], [-55.0, 71.0], [-55.0, 68.0]]]},
"bbox": [-55.0, 68.0, -48.0, 71.0]}], "bbox": [-55.0, 68.0, -48.0, 71.0]}'
# del region_p._subsetparams
region_p.subsetparams()
region_p.CMRparams
region_t = ipd.Icesat2Data('ATL06',[-86.622742,-74.908126,-86.561712,-74.870913,-86.868859,-74.730522,-86.962905,-74.605038,-89.02594,-74.316754,-89.630517,-74.192147,-89.830808,-74.065919,-90.746478,-73.956258,-91.668214,-74.023169,-92.049815,-73.929387,-93.420791,-73.929327,-93.997163,-73.882768,-94.277701,-73.714183,-95.133017,-73.966355,-96.513501,-74.127404,-99.889802,-74.085347,-100.114438,-74.019422,-100.355131,-74.080906,-100.462734,-74.240864,-100.827076,-74.373988,-101.795349,-74.369597,-102.424826,-74.497263,-101.188725,-74.7179,-101.564382,-75.02971,-103.37484,-75.273725,-103.914847,-75.426057,-104.012128,-75.5223,-103.029452,-75.748774,-102.350567,-75.749245,-101.837882,-75.943066,-101.899461,-76.014086,-101.280944,-76.192769,-101.325735,-76.246168,-101.190803,-76.27106,-101.250474,-76.342292,-101.175067,-76.345822,-101.402436,-76.52035,-101.326063,-76.523929,-101.449791,-76.666392,-101.310795,-76.691373,-101.357407,-76.744819,-101.217404,-76.769752,-101.295133,-76.85887,-101.058051,-76.962123,-100.447336,-77.117686,-98.433698,-77.320866,-97.28308,-77.355688,-97.491148,-77.423178,-96.514174,-77.485919,-96.552494,-77.558236,-96.384656,-77.562336,-96.441516,-77.670857,-97.139363,-77.836566,-97.193451,-77.926901,-97.64271,-78.080044,-96.297869,-78.388943,-96.327803,-78.44329,-95.721466,-78.511065,-95.748962,-78.565482,-94.940425,-78.617072,-94.988611,-78.726066,-94.911669,-78.763976,-95.609268,-78.843079,-95.637038,-78.897535,-95.37191,-78.9391,-95.693408,-79.006456,-95.269903,-79.124145,-95.323729,-79.233172,-95.430206,-79.249633,-95.155505,-79.291032,-95.191045,-79.363748,-94.81352,-79.406486,-94.847075,-79.479253,-94.747448,-79.48078,-94.772403,-79.535367,-93.90411,-79.638844,-93.843651,-79.749409,-93.967323,-79.802836,-93.788723,-79.87821,-93.816393,-79.951128,-93.230546,-80.085534,-91.707475,-79.87748,-91.801545,-79.822143,-91.488897,-79.805457,-91.465152,-79.641131,-90.447349,-79.5894,-90.545492,-79.534464,-90.042319,-79.37062,-90.140775,-79.334083,-90.041814,-79.24285,-88.982186,-79.076903,-90.230262,-78.914333,-90.32191,-78.804808,-90.689626,-78.676516,-91.150024,-78.638589,-92.035347,-78.414844,-92.106013,-78.30491,-91.651645,-78.271472,-91.365784,-78.127206,-91.188783,-78.128018,-91.090167,-78.019109,-90.737076,-77.983849,-90.909191,-77.946905,-90.732603,-77.911009,-90.727088,-77.819973,-91.070502,-77.800626,-91.14118,-77.636469,-91.90279,-77.613923,-91.984627,-77.595116,-91.972963,-77.522365,-92.466819,-77.463587,-92.199521,-77.374914,-92.352136,-77.300761,-92.335283,-77.209895,-91.434206,-77.234653,-91.426015,-77.16193,-91.015545,-77.145686,-91.008355,-77.054784,-91.086397,-77.018096,-91.647835,-76.97871,-91.640906,-76.924199,-91.873848,-76.868024,-91.779021,-76.759619,-90.823937,-76.710073,-90.345113,-76.52953,-86.988029,-75.856983,-86.945563,-75.711143,-86.872234,-75.710165,-87.034102,-75.63967,-86.965004,-75.620616,-87.075115,-75.440545,-87.003154,-75.439609,-87.021872,-75.349129,-86.835058,-75.219586,-86.850654,-75.147247,-86.717729,-75.109052,-86.737771,-75.018662,-86.602149,-74.998483,-86.622742,-74.908126],\
['2019-10-01','2019-10-05'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_t.visualize_spatial_extent()
region_t.earthdata_login('jessica.scheick','<EMAIL>')
region_t.subsetparams()
region_t.order_granules()
region_p.avail_granules()
path='/home/jovyan/icepyx/dev-notebooks/fakedir'
region_a.earthdata_login('icepyx_devteam','<EMAIL>')
region_a.show_custom_options()
region_a.CMRparams
region_a.subsetparams()
obs_keys = region_a.CMRparams.keys()
region_a.reqparams.keys()
# + jupyter={"outputs_hidden": true}
region_a.avail_granules()
# + jupyter={"outputs_hidden": true}
region_a.granules
# -
region_a.orderIDs
region_a.download_granules('/Users/jessica/Scripts/github/icesat2py/icepyx/download/', verbose=True)
# + [markdown] jupyter={"source_hidden": true}
# ## Steps required by the user
# - create icesat2data object with the minimum inputs (dataset, time period, spatial extent)
# - enter Earthdata login credentials and open an active session
# - download data (querying can be done prior to logging in)
# -
# ## Submitting the request - behind the scenes
# ### Submit the search query
#
# #### We will now populate dictionaries to be applied to our search query below based on spatial and temporal inputs. For additional search parameters, see the [The Common Metadata Repository API documentation](https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html "CMR API documentation").
#
# +
#Create CMR parameters used for granule search. Modify params depending on bounding_box or polygon input.
if aoi == '1':
# bounding box input:
params = {
'short_name': short_name,
'version': latest_version,
'temporal': temporal,
'page_size': 100,
'page_num': 1,
'bounding_box': bounding_box
}
else:
# If polygon input (either via coordinate pairs or shapefile/KML/KMZ):
params = {
'short_name': short_name,
'version': latest_version,
'temporal': temporal,
'page_size': 100,
'page_num': 1,
'polygon': polygon,
}
print('CMR search parameters: ', params)
# -
# #### Input the parameter dictionary to the CMR granule search to query all granules that meet the criteria based on the granule metadata. Print the number of granules returned.
# +
# Query number of granules using our (paging over results)
granule_search_url = 'https://cmr.earthdata.nasa.gov/search/granules'
granules = []
while True:
response = requests.get(granule_search_url, params=params, headers=headers)
results = json.loads(response.content)
if len(results['feed']['entry']) == 0:
# Out of results, so break out of loop
break
# Collect results and increment page_num
granules.extend(results['feed']['entry'])
params['page_num'] += 1
# Get number of granules over my area and time of interest
len(granules)
# -
granules = region_a.granules
len(granules)
# #### Although subsetting, reformatting, or reprojecting can alter the size of the granules, this "native" granule size can still be used to guide us towards the best download method to pursue, which we will come back to later on in this tutorial.
# ## Request data from the NSIDC data access API.
# #### We will now set up our data download request. The data access and service API (labeled EGI below) incorporates the CMR parameters that we explored above, plus customization service parameters as well as a few configuration parameters.
#
# 
#
# #### As described above, the API is structured as a URL with a base plus individual key-value-pairs (KVPs) separated by ‘&’. The base URL of the NSIDC API is: </br>
# `https://n5eil02u.ecs.nsidc.org/egi/request`
#
#Set NSIDC data access base URL
base_url = 'https://n5eil02u.ecs.nsidc.org/egi/request'
# #### Let's go over the configuration parameters:
#
# * `request_mode`
# * `page_size`
# * `page_num`
#
# `request_mode` is "synchronous" by default, meaning that the request relies on a direct, continous connection between you and the API endpoint. Outputs are directly downloaded, or "streamed" to your working directory. For this tutorial, we will set the request mode to asynchronous, which will allow concurrent requests to be queued and processed without the need for a continuous connection.
#
# **Use the streaming `request_mode` with caution: While it can be beneficial to stream outputs directly to your local directory, note that timeout errors can result depending on the size of the request, and your request will not be queued in the system if NSIDC is experiencing high request volume. For best performance, I recommend setting `page_size=1` to download individual outputs, which will eliminate extra time needed to zip outputs and will ensure faster processing times per request. An example streaming request loop is available at the bottom of the tutorial below. **
#
# Recall that we queried the total number and volume of granules prior to applying customization services. `page_size` and `page_num` can be used to adjust the number of granules per request up to a limit of 2000 granules for asynchronous, and 100 granules for synchronous (streaming). For now, let's select 10 granules to be processed in each zipped request. For ATL06, the granule size can exceed 100 MB so we want to choose a granule count that provides us with a reasonable zipped download size.
# +
# Set number of granules requested per order, which we will initially set to 10.
page_size = 10
#Determine number of pages basd on page_size and total granules. Loop requests by this value
page_num = math.ceil(len(granules)/page_size)
#Set request mode.
request_mode = 'async'
# Determine how many individual orders we will request based on the number of granules requested
print(page_num)
# -
# #### After all of these KVP inputs, what does our request look like? Here's a summary of all possible KVPs that we explored, both for CMR searching and for the subsetter:
#
# #### CMR search keys:
# * `short_name=`
# * `version=`
# * `temporal=`
# * `bounding_box=`
# * `polygon=`
#
# #### Customization service keys:
# * `time=`
# * `bbox=`
# * `bounding_shape=`
# * `format=`
# * `projection=`
# * `projection_parameters=`
# * `Coverage=`
#
# #### No customization (access only):
# * `agent=`
# * `include_meta=`
# * `Y` by default. `N` for No metadata requested.
#
# #### Request configuration keys:
# * `request_mode=`
# * `page_size=`
# * `page_num=`
# * `token=`
# * `email=`
# #### If we were to create an API request based on our request parameters and submit into a web browser for example, here's what we end up with:
#Print API base URL + request parameters --> for polygon
API_request = f'{base_url}?short_name={short_name}&version={latest_version}&temporal={temporal}&time={timevar}&polygon={polygon}&Coverage={coverage}&request_mode={request_mode}&page_size={page_size}&page_num={page_num}&token={token}&email={email}'
print(API_request)
#Print API base URL + request parameters --> for bbox
API_request = f'{base_url}?short_name={short_name}&version={latest_version}&temporal={temporal}&time={timevar}\
&bbox={bbox}&Coverage={coverage}&request_mode={request_mode}&page_size={page_size}&page_num={page_num}&token={token}&email={email}'
print(API_request)
# #### We'll also create a new dictionary of NSIDC API KVPs to be used in our subset request. Because we are looping through each page of requests, we'll add the `page_num` KVP to our dictionary within the loop below.
subset_params = {
'short_name': short_name,
'version': latest_version,
'temporal': temporal,
'time': timevar,
'polygon': polygon,
'Coverage': coverage,
'request_mode': request_mode,
'page_size': page_size,
'token': token,
'email': email,
}
print(subset_params)
subset_params = {
'short_name': short_name,
'version': latest_version,
'temporal': temporal,
'time': timevar,
'bbox': bbox,
'Coverage': coverage,
'request_mode': request_mode,
'page_size': page_size,
'token': token,
'email': email,
}
print(subset_params)
# #### We'll request the same data but without any subsetting services applied. Let's create another request parameter dictionary with the `time` and `coverage` service keys removed, and we'll add `agent=NO` instead.
# +
request_params = {
'short_name': short_name,
'version': latest_version,
'temporal': temporal,
'bbox': bbox, #'polygon': polygon,
'agent' : 'NO',
'include_meta' : 'Y',
'request_mode': request_mode,
'page_size': page_size,
'token': token,
'email': email,
}
print(request_params)
# -
# ## Request Data
#
# #### Finally, we'll download the data directly to this notebook directory in a new Outputs folder. The progress of each order will be reported.
#
# We'll start by creating an output folder if the folder does not already exist.
path = str(os.getcwd() + '/Outputs')
if not os.path.exists(path):
os.mkdir(path)
# First we'll submit our request without subsetting services:
# +
# Request data service for each page number, and unzip outputs
for i in range(page_num):
page_val = i + 1
print('Order: ', page_val)
request_params.update( {'page_num': page_val} )
# For all requests other than spatial file upload, use get function
request = session.get(base_url, params=request_params)
print('Request HTTP response: ', request.status_code)
# Raise bad request: Loop will stop for bad response code.
request.raise_for_status()
print('Order request URL: ', request.url)
esir_root = ET.fromstring(request.content)
print('Order request response XML content: ', request.content)
#Look up order ID
orderlist = []
for order in esir_root.findall("./order/"):
orderlist.append(order.text)
orderID = orderlist[0]
print('order ID: ', orderID)
#Create status URL
statusURL = base_url + '/' + orderID
print('status URL: ', statusURL)
#Find order status
request_response = session.get(statusURL)
print('HTTP response from order response URL: ', request_response.status_code)
# Raise bad request: Loop will stop for bad response code.
request_response.raise_for_status()
request_root = ET.fromstring(request_response.content)
statuslist = []
for status in request_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
print('Data request ', page_val, ' is submitting...')
print('Initial request status is ', status)
#Continue loop while request is still processing
while status == 'pending' or status == 'processing':
print('Status is not complete. Trying again.')
time.sleep(10)
loop_response = session.get(statusURL)
# Raise bad request: Loop will stop for bad response code.
loop_response.raise_for_status()
loop_root = ET.fromstring(loop_response.content)
#find status
statuslist = []
for status in loop_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
print('Retry request status is: ', status)
if status == 'pending' or status == 'processing':
continue
#Order can either complete, complete_with_errors, or fail:
# Provide complete_with_errors error message:
if status == 'complete_with_errors' or status == 'failed':
messagelist = []
for message in loop_root.findall("./processInfo/"):
messagelist.append(message.text)
print('error messages:')
pprint.pprint(messagelist)
# Download zipped order if status is complete or complete_with_errors
if status == 'complete' or status == 'complete_with_errors':
downloadURL = 'https://n5eil02u.ecs.nsidc.org/esir/' + orderID + '.zip'
print('Zip download URL: ', downloadURL)
print('Beginning download of zipped output...')
zip_response = session.get(downloadURL)
# Raise bad request: Loop will stop for bad response code.
zip_response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(zip_response.content)) as z:
z.extractall(path)
print('Data request', page_val, 'is complete.')
else: print('Request failed.')
# -
# Let's run our request loop again, this time with subsetting services applied. We will post the KML file directly to the API:
# +
# Request data service for each page number, and unzip outputs
for i in range(page_num):
page_val = i + 1
print('Order: ', page_val)
subset_params.update( {'page_num': page_val} )
# Post polygon to API endpoint for polygon subsetting to subset based on original, non-simplified KML file
# shape_post = {'shapefile': open(kml_filepath, 'rb')}
# request = session.post(base_url, params=subset_params, files=shape_post)
# FOR ALL OTHER REQUESTS THAT DO NOT UTILIZED AN UPLOADED POLYGON FILE, USE A GET REQUEST INSTEAD OF POST:
request = session.get(base_url, params=request_params)
print('Request HTTP response: ', request.status_code)
# Raise bad request: Loop will stop for bad response code.
request.raise_for_status()
print('Order request URL: ', request.url)
esir_root = ET.fromstring(request.content)
print('Order request response XML content: ', request.content)
# Look up order ID
orderlist = []
for order in esir_root.findall("./order/"):
orderlist.append(order.text)
orderID = orderlist[0]
print('order ID: ', orderID)
# Create status URL
statusURL = base_url + '/' + orderID
print('status URL: ', statusURL)
# Find order status
request_response = session.get(statusURL)
print('HTTP response from order response URL: ', request_response.status_code)
# Raise bad request: Loop will stop for bad response code.
request_response.raise_for_status()
request_root = ET.fromstring(request_response.content)
statuslist = []
for status in request_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
print('Data request ', page_val, ' is submitting...')
print('Initial request status is ', status)
# Continue to loop while request is still processing
while status == 'pending' or status == 'processing':
print('Status is not complete. Trying again.')
time.sleep(10)
loop_response = session.get(statusURL)
# Raise bad request: Loop will stop for bad response code.
loop_response.raise_for_status()
loop_root = ET.fromstring(loop_response.content)
# Find status
statuslist = []
for status in loop_root.findall("./requestStatus/"):
statuslist.append(status.text)
status = statuslist[0]
print('Retry request status is: ', status)
if status == 'pending' or status == 'processing':
continue
# Order can either complete, complete_with_errors, or fail:
# Provide complete_with_errors error message:
if status == 'complete_with_errors' or status == 'failed':
messagelist = []
for message in loop_root.findall("./processInfo/"):
messagelist.append(message.text)
print('error messages:')
pprint.pprint(messagelist)
# Download zipped order if status is complete or complete_with_errors
if status == 'complete' or status == 'complete_with_errors':
downloadURL = 'https://n5eil02u.ecs.nsidc.org/esir/' + orderID + '.zip'
print('Zip download URL: ', downloadURL)
print('Beginning download of zipped output...')
zip_response = session.get(downloadURL)
# Raise bad request: Loop will stop for bad response code.
zip_response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(zip_response.content)) as z:
z.extractall(path)
print('Data request', page_val, 'is complete.')
else: print('Request failed.')
# -
# #### Why did we get an error?
#
# Errors can occur when our search filter overestimates the extent of the data contained within the granule. CMR uses orbit metadata to determine the extent of the file, including the following parameters:
#
# Collection-level:
# * `SwathWidth`
# * `Period`
# * `InclinationAngle`
# * `NumberOfOrbits`
# * `StartCircularLatitude`
#
# Granule level:
# * `AscendingCrossing`
# * `StartLatitude`
# * `StartDirection`
# * `EndLatitude`
# * `EndDirection`
#
# However, the values themselves are not inspected during our search. This can be a relatively common error for ICESat-2 search and access because of the limitations of the metadata, but it only means that more data were returned in the search results as a "false positive" compared to what the subsetter found when cropping the data values.
# #### Clean up the Output folder by removing individual order folders:
# +
#Clean up Outputs folder by removing individual granule folders
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
try:
shutil.move(os.path.join(root, file), path)
except OSError:
pass
for root, dirs, files in os.walk(path):
for name in dirs:
os.rmdir(os.path.join(root, name))
# -
#List files
sorted(os.listdir(path))
# If you're interested in the streaming request method, an example loop is below:
# +
# Set page size to 1 to improve performance
page_size = 1
request_params.update( {'page_size': page_size})
# No metadata to only return a single output
request_params.update( {'include_meta': 'N'})
#Determine number of pages basd on page_size and total granules. Loop requests by this value
page_num = math.ceil(len(granules)/page_size)
print(page_num)
#Set request mode.
request_params.update( {'request_mode': 'stream'})
print(request_params)
os.chdir(path)
for i in range(page_num):
page_val = i + 1
print('Order: ', page_val)
request_params.update( {'page_num': page_val})
request = session.get(base_url, params=request_params)
print('HTTP response from order response URL: ', request.status_code)
request.raise_for_status()
d = request.headers['content-disposition']
fname = re.findall('filename=(.+)', d)
open(eval(fname[0]), 'wb').write(request.content)
print('Data request', page_val, 'is complete.')
# -
# ### Before we request the data and download the outputs, let's explore some simple comparisons of the data from s3 that we've already requested.
# +
# Define paths for output folders
opath = '/home/jovyan/data-access/data-access-outputs'
sopath = '/home/jovyan/data-access/data-access-subsetted-outputs'
# Choose the same native/subsetted file to compare
native_file = opath + '/ATL06_20190222031203_08500210_001_01.h5'
processed_file = sopath + '/processed_ATL06_20190222031203_08500210_001_01.h5'
# -
# Compare file sizes:
os.path.getsize(native_file)
os.path.getsize(processed_file)
# Read the files using h5py and compare the HDF5 groups and datasets:
# +
# Read files using h5py package
native = h5py.File(native_file, 'r')
processed = h5py.File(processed_file, 'r')
# -
# Native file groups:
printGroups = True
groups = list(native.keys())
for g in groups:
group = native[g]
if printGroups:
print('---')
print('Group: {}'.format(g))
print('---')
for d in group.keys():
print(group[d])
# Subsetted file groups:
printGroups = True
groups = list(processed.keys())
for g in groups:
group = processed[g]
if printGroups:
print('---')
print('Group: {}'.format(g))
print('---')
for d in group.keys():
print(group[d])
| doc/source/dev-notebooks/ICESat-2_DAAC_DataAccess_working.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! export CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
# %env CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
import os
import boto3
from time import time
import xarray as xr
def _mkdir(directory):
try:
os.stat(directory)
except:
os.mkdir(directory)
_mkdir('./junkbox')
# !ls
def return_s3_list(working_bucket, prefix):
aws_list = []
s3 = boto3.resource('s3')
bucket_name = working_bucket
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix=prefix):
obj_key = obj.key
obj_key = working_bucket + '/' + obj_key
aws_list.append((obj_key, obj.size))
return aws_list
# +
working_bucket = 'dev-et-data'
main_prefix = 'enduser/DelawareRiverBasin/Run09_13_2020/ward_sandford_customer/'
year = 2003
target_prefix = main_prefix + str(year)
the_list = return_s3_list(working_bucket, target_prefix)
# -
the_list
def create_s3_list_of_months(main_prefix, year, output_name='etasw_'):
the_list = []
for i in range(1,13):
month = f'{i:02d}'
file_object = main_prefix + str(year) + '/' + output_name + str(year) + month + '.tif'
the_list.append(file_object)
return the_list
my_tifs = create_s3_list_of_months(main_prefix, year, output_name='etasw_')
my_tifs
def get_year_month(product, tif):
fn = tif.split('/')[-1]
fn = fn.replace(product,'')
fn = fn.replace('.tif','')
print(fn)
return fn
def xr_build_cube_concat_ds(tif_list, product):
start = time()
my_da_list =[]
year_month_list = []
for tif in tif_list:
tiffile = 's3://dev-et-data/' + tif
print(tiffile)
da = xr.open_rasterio(tiffile)
#da = da.squeeze().drop(labels='band')
#da.name=product
my_da_list.append(da)
tnow = time()
elapsed = tnow - start
print(tif, elapsed)
year_month_list.append(get_year_month(product, tif))
da = xr.concat(my_da_list, dim='band')
da = da.rename({'band':'year_month'})
da = da.assign_coords(year_month=year_month_list)
DS = da.to_dataset(name=product)
return(DS)
ds = xr_build_cube_concat_ds(my_tifs, 'etasw_')
ds
help(ds.to_netcdf)
ds.to_netcdf('./junkbox/etasw_2003_ensemble.nc', engine='h5netcdf')
# ! pip list | grep netcdf
# # NOTE
#
# ## Ran into this issue
#
# https://github.com/pydata/xarray/issues/3374
#
#
# - engine='h5netcdf'
#
# - was the fix for me
# ! ls -lh ./junkbox
| wzell-sums/00-build-xarrays-and -netcdfs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Traitement de signal
# ## Atelier \#5 : Transformée de Fourier
# ### Support de cours disponible à l'adresse : [https://www.github.com/a-mhamdi/isetbz](https://www.github.com/a-mhamdi/isetbz)
#
# ---
# Rappelons d'abord la définition de la transformée de Fourier d'un signal $x$, soit encore $\mathfrak{F}\left\{x(t)\right\}$ qu'on dénote par $\mathcal{X}(f)$ :
#
# $$
# \mathcal{X}(f) \;=\; \displaystyle\int_{-\infty}^{+\infty}x(t)\mathrm{e}^{-2\jmath\pi f t}\,dt
# $$
#
# Par examen de cette transformation, nous observons qu'il est impossible d'implémenter cette intégrale en temps continu sur un calculateur. Ce dernier ne travaille que sur des valeurs discrètes, nous ferons recours à la *Transformée de Fourier Discrète*. Elle consiste d'abord à discrétiser et à tronquer $x$ en une série $x_0,\,\cdots,\, x_{n-1}$. Les coefficients discrets de $\mathcal{X}(f)$ sont calculés par la suite conformément à la formule suivante :
#
# $$
# \mathcal{X}_l \;=\; \displaystyle\sum_{p=0}^{n-1} x_p \mathrm{e}^{-\displaystyle\frac{2\jmath\pi pl}{n}}, \quad\text{avec}\quad l\,=\,0,\,\cdots,\, n-1
# $$
#
# Néanmoins, le calcul des coefficients $\mathcal{X}_l$, pour $l\,=\,0,\,\cdots,\, n-1$, à partir de la définition est souvent gourmand en temps. Un autre algorithme très répandu dans les applications d’ingénierie est la *Transformée de Fourier Rapide*, souvent abrégée *FFT*. Cette approche de calcul permet de réduire énormément la complexité du calcul des termes susmentionnés.
# Une explication détaillée avec une implémentation en **Python** est accessible via le lien suivant : [https://towardsdatascience.com/fast-fourier-transform-937926e591cb](https://towardsdatascience.com/fast-fourier-transform-937926e591cb)
# Commençons d'abord par importer les modules requis
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
plt.rcParams['figure.figsize'] = [15, 10]
plt.rc({"keymap.grid": "g", "font.serif": "Charter", "font.size": 10})
# -
# Nous générons maintenant un signal $x$ définit par :
#
# $$
# x(t) \;=\; \displaystyle\sum_{k=1}^{6} \mathcal{A}k \cos\left(2 \mathcal{A}k \pi t\right) \quad \text{avec}\quad \mathcal{A} = 10\,\text{Hz}.
# $$
nb_pts = 1000 # Nombre de points
Delta_t = 0.001 # Période d'échantillonnage
t = np.linspace(0.0, nb_pts * Delta_t, nb_pts) # Vecteur temps
wt = 2.0 * np.pi * t
x_lst = [10 * k * np.cos(10 * k * wt) for k in range(1, 7)]
xmat_t = np.asarray(x_lst, dtype = np.float32)
x_t = np.sum(xmat_t, axis = 0)
plt.plot(t, x_t)
plt.title(r'$x(t) \;=\; \sum_{k=1}^{6} 10 k \cos\left(20 k \pi t\right)$')
plt.grid()
plt.xlim(0, 0.35)
plt.xlabel("$t$ (sec)")
plt.show()
# Appliquons la transformée Fourier rapide *(en: FFT ou Fast Fourier Transform)* de $x(t)$. La quantité **x_f** dénote $\mathcal{X}(f)$.
x_f = np.fft.fft(x_t)
freqs = np.fft.fftfreq(nb_pts, Delta_t)
# Traçons par la suite les parties réelle et imaginaire de $\mathcal{X}(f)$
plt.subplot(2, 1, 1)
plt.plot(freqs, x_f.real)
plt.xlabel("$f$ (Hz)")
plt.ylabel(r"$\mathcal{R}e(\mathcal{X}(f))$")
plt.xlim(0, 100)
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(freqs, x_f.imag)
plt.xlabel("$f$ (Hz)")
plt.ylabel(r"$\mathcal{I}m(\mathcal{X}(f))$")
plt.xlim(0, 100)
plt.grid()
plt.show()
# Une autre manière de présentation de $\mathcal{X}(f)$ est de tracer les graphes de $\left|\mathcal{X}(f)\right|$ et $\angle{\mathcal{X}(f)}$.
plt.subplot(2, 1, 1)
plt.plot(freqs, np.abs(x_f))
plt.xlabel("$f$ (Hz)")
plt.ylabel(r"$\left|\mathcal{X}(f)\right|$")
plt.xlim(0, 100)
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(freqs, np.angle(x_f, deg = True))
plt.xlabel("$f$ (Hz)")
plt.ylabel(r"$\angle{\mathcal{X}(f)}$")
plt.xlim(0, 100)
plt.grid()
plt.show()
# Essayons de reconstruire $x(t)$ par application de la transformée de Fourier inverse **ifft** *(en: Inverse Fast Fourier Transform)*.
x_t_app = np.fft.ifft(x_f)
plt.plot(t, x_t_app.real, '-r', t, x_t_app.imag, '--b')
plt.title(r'$\hat{x}(t) \;=\; \mathfrak{F}^{-1}\left\{\mathcal{X}(f)\right\}$')
plt.legend(['$\mathcal{R}e$', '$\mathcal{I}m$'], fancybox = True, framealpha = 0.3, loc = 'best')
plt.grid()
plt.xlim(0, 0.35)
plt.xlabel("$t$ (sec)")
plt.show()
| Python/sig-proc/fouries-transform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
# +
# Loading the run data and deleting irrelevant columns
# Change this to your local path
df = pd.read_csv(r"C:\Users\bruno\github\SEN1211_project\output\experiment_1.csv", skiprows = 6)
df = df.drop(['percentage-female', 'percentage-stationary-staff',
'percentage-children', 'alarm?',
'verbose?', 'debug?', 'average-response-time',
'[step]'],1)
changes = {'precision ((count visitors with [evacuating? = false] / count visitors) * 100) 2': 'percentage_evacuating',
'[run number]': 'run_number'}
# Renaming relevant column
df = df.rename(columns = changes)
df.head()
# -
# Extracting the evacuation time of each run.
runs = df.groupby(['run_number']).max()
runs.head()
# Getting the average evacuation time per percentage visitors go to main door.
# Note that in the final model this slider was modified to prefered exit door,
# but the actual behaviour of the model was not changed.
avg = runs.groupby(['percentage-visitors-go-to-main-door']).mean()
avg
# Plotting the average evacuation time per percentage visitors go to main door.
fig, ax = plt.subplots(figsize = (9,5))
avg['evacuation-duration'].plot.bar(ax= ax)
ax.set_title('Relation between familiarity and evacuation time')
ax.set_ylabel('Evacuation time')
| python/Experiment_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
proverb = 'Хорошо написанная программа - это программа, написанная 2 раза'
while True:
index = proverb.find('программа')
if index == -1:
break
secret = proverb[:index].split()[-1]
proverb = proverb[index+9:]
# -
proverb = 'Хорошо написанная программа - это программа, написанная 2 раза'
index = proverb.find('программа')
proverb[:index].split()[-1]
email='<EMAIL>'
email[email.find('@')+1:]
number = 56.257
numstr = str(number)
newstr = numstr[numstr.find('.')+1:]
final = 0
for i in newstr:
final += int(i)
print(final)
emails_list = ['<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>']
emails_dict = {}
for email in emails_list:
domen = email[email.find('@')+1:]
num = domen in emails_dict
if num:
emails_dict[domen] += 1
else:
emails_dict[domen] = 1
emails_dict
food = "Овсянка"
if food.lower() == 'овсянка': print('Да Вы гурман!')
import string
string1 = 'Тяжёлая интернет-зависимость - это когда ты выходишь из интернета, а он из тебя нет.'
new_string = string1
for i, letter in enumerate(string1):
if letter in string.punctuation:
new_string = new_string.replace(letter, ':)')
else:
string1 = new_string
print(string1)
print(string.punctuation)
string = 'Тяжёлая интернет-зависимость - это когда ты выходишь из интернета, а он из тебя нет.'
punctuation = '.,:-!?'
new_string = string
for i, letter in enumerate(string):
if letter in punctuation:
new_string = new_string.replace(letter, ':)')
else:
string = new_string
print(string)
name = 'Севастиан'
odds = 'аоуиеяюэё'
for i in name:
if i.lower() in odds:
print('{} - гласная буква'.format(i))
else:
print('{} - согласная буква'.format(i))
text = 'Разработка языка Python была начата в конце 1980-х годов сотрудником голландского института CWI Гвидо ван Россумом. Для распределённой ОС Amoeba требовался расширяемый скриптовый язык, и Гвидо начал писать Python на досуге, позаимствовав некоторые наработки для языка ABC (Гвидо участвовал в разработке этого языка, ориентированного на обучение программированию). В феврале 1991 года Гвидо опубликовал исходный текст в группе новостей alt.sources. Название языка произошло вовсе не от вида пресмыкающихся. Автор назвал язык в честь популярного британского комедийного телешоу 1970-х "Летающий цирк <NAME>".'
import re
pattern = re.compile('\d+')
pattern.search(text)
pattern = re.compile('[A-Za-z]+')
pattern.findall(text)
pattern = re.compile('[А-Яа-я]+ка')
pattern.findall(text)
pattern = re.compile('а\w\wи')
pattern.findall(text)
pattern = re.compile('[А-Яа-я]*[аоуеиыёюэяАОИУЕЁЮЭЯ]\s[^аоуеиыёюэяАОИУЕЁЮЭЯ0-9][А-Яа-я]*')
pattern.findall(text)
f = open('StudentsPerformance.csv')
for line in f:
print(line)
f.close()
f = open('StudentsPerformance.csv')
males = 0
females = 0
for line in f:
info = line.split(',')
gender = info[0][1:-1]
if gender == 'female':
females += 1
elif gender == 'male':
males +=1
print('Мальчиков: {}, девочек: {}'.format(males, females))
f = open('StudentsPerformance.csv')
bachelors = 0
for line in f:
info = line.split(',')
if 'bachelor' in info[2]:
bachelors += 1
print(bachelors)
f = open('StudentsPerformance.csv')
edus = {}
for line in f:
info = line.split(',')
if 'gender' in info[0]:
continue
if info[2] not in edus:
edus[info[2]] = 0
edus[info[2]] += 1
print(edus)
f = open('StudentsPerformance.csv')
lunch = 0
total = 0
for line in f:
info = line.split(',')
if 'lunch' in info[3]:
continue
if 'standard' in info[3]:
lunch += 1
total += 1
print(lunch/total*100)
f = open('StudentsPerformance.csv')
group = 0
for line in f:
info = line.split(',')
if 'race/ethnicity' in info[1]:
continue
if 'group C' in info[1]:
group += 1
print(group)
f = open('StudentsPerformance.csv')
groups = {}
for line in f:
info = line.split(',')
if 'race/ethnicity' in info[1]:
continue
if info[1] not in groups:
groups[info[1]] = 0
groups[info[1]] += 1
print(groups)
# +
import re
pattern = re.compile('\d+')
exams = []
f = open('StudentsPerformance.csv')
for line in f:
info = line.split(',')
if info[0] == '"gender"':
continue
else:
new_line = []
for item in info:
if pattern.search(item) != None:
new_line.append(int(pattern.search(item)[0]))
else:
new_line.append(item[1:-1])
exams.append(new_line)
# -
reading = 0
for i in exams:
reading += i[6]
print(reading/len(exams))
reading = 69.169
under = 0
for i in exams:
if i[6] < reading:
under +=1
print(under)
reading = 0
females = 0
for i in exams:
if i[0] == 'female':
reading += i[6]
females += 1
print(reading/females)
writing = 0
for i in exams:
if i[7] > 90:
writing += 1
print(writing)
writing = 0
for i in exams:
if i[7] > 90 and i[3] == 'standard':
writing += 1
print(writing/68*100)
# +
students = {}
f = open('StudentsPerformance.csv')
for line in f:
info = line.split(',')
if info[0] == '"gender"':
continue
else:
ethnicity = info[1][1:-1]
parents = info[2][1:-1]
if ethnicity in students:
if parents in students[ethnicity]:
students[ethnicity][parents] += 1
else:
students[ethnicity][parents] = 1
else:
students[ethnicity] = {}
students[ethnicity][parents] = 1
print(students)
# -
lunch = 0
for i in exams:
if i[0] == 'male' and i[3] == 'standard':
lunch += 1
print(lunch)
prep = 0
for i in exams:
if i[0] == 'male' and i[4] == 'completed':
prep += 1
print(prep)
degree = 0
for i in exams:
if i[0] == 'female' and i[2] == "master's degree":
degree += 1
print(degree)
prep = 0
for i in exams:
if i[1] == 'group C' and i[4] == 'completed':
prep += 1
print(prep)
mark = 0
for i in exams:
if i[0] == 'female' and i[2] == "master's degree" and i[5] > 90:
mark += 1
print(mark)
string = 'very long string'
print(string[::-1])
reading = 0
males = 0
for i in exams:
if i[0] == 'male':
reading += i[6]
males += 1
print(reading/males)
max_mark = 0
for i in exams:
if i[5] > max_mark:
max_mark = i[5]
reading = 0
studs = 0
for i in exams:
if i[5] == max_mark:
reading += i[6]
studs += 1
print(reading/studs)
print(studs)
writing = 0
studs = 0
for i in exams:
if 'free/reduced' in i[3]:
writing += i[7]
studs += 1
print(writing/studs)
| DATA_ANALYST/PYTHON-3/3-Notebook.ipynb |
# # The bike rides dataset
#
# In this notebook, we will present the "Bike Ride" dataset. This dataset is
# located in the directory `datasets` in a comma separated values (CSV) format.
#
# We open this dataset using pandas.
# +
import pandas as pd
cycling = pd.read_csv("../datasets/bike_rides.csv")
cycling.head()
# -
# The first column `timestamp` contains a specific information regarding the
# the time and date of a record while other columns contain numerical value
# of some specific measurements. Let's check the data type of the columns more
# in details.
cycling.info()
# Indeed, CSV format store data as text. Pandas tries to infer numerical type
# by default. It is the reason why all features but `timestamp` are encoded as
# floating point values. However, we see that the `timestamp` is stored as an
# `object` column. It means that the data in this column are stored as `str`
# rather than a specialized `datetime` data type.
#
# In fact, one needs to set an option such that pandas is directed to infer
# such data type when opening the file. In addition, we will want to use
# `timestamp` as an index. Thus, we can reopen the file with some extra
# arguments to help pandas at reading properly our CSV file.
cycling = pd.read_csv("../datasets/bike_rides.csv", index_col=0,
parse_dates=True)
cycling.index.name = ""
cycling.head()
cycling.info()
# By specifying to pandas to parse the date, we obtain a `DatetimeIndex` that
# is really handy when filtering data based on date.
#
# We can now have a look at the data stored in our dataframe. It will help us
# to frame the data science problem that we try to solve.
#
# The records correspond at information derived from GPS recordings of a
# cyclist (`speed`, `acceleration`, `slope`) and some extra information
# acquired from other sensors: `heart-rate` that corresponds to the number of
# beats per minute of the cyclist heart, `cadence` that is the rate at which a
# cyclist is turning the pedals, and `power` that corresponds to the work
# required by the cyclist to go forward.
#
# The power might be slightly an abstract quantity so let's give a more
# intuitive explanation.
#
# Let's take the example of a soup blender that one uses to blend vegetable.
# The engine of this blender develop an instantaneous power of ~300 Watts to
# blend the vegetable. Here, our cyclist is just the engine of the blender (at
# the difference that an average cyclist will develop an instantaneous power
# around ~150 Watts) and blending the vegetable corresponds to move the
# cyclist's bike forward.
#
# Professional cyclists are using power to calibrate their training and track
# the energy spent during a ride. For instance, riding at a higher power
# requires more energy and thus, you need to provide resources to create this
# energy. With human, this resource is food. For our soup blender, this
# resource can be uranium, petrol, natural gas, coal, etc. Our body serves as a
# power plant to transform the resources into energy.
#
# The issue with measuring power is linked to the cost of the sensor: a cycling
# power meter. The cost of such sensor vary from $400 to $1000. Thus, our
# data science problem is quite easy: can we predict instantaneous cyclist
# power from other (cheaper) sensors.
target_name = "power"
data, target = cycling.drop(columns=target_name), cycling[target_name]
# We can have a first look at the target distribution.
# +
import matplotlib.pyplot as plt
target.plot.hist(bins=50, edgecolor="black")
plt.xlabel("Power (W)")
# -
# We see a pick at 0 Watts, it corresponds to whenever our cyclist does not
# pedals (descent, stopped). In average, this cyclist delivers a power around
# ~200 Watts. We also see a long tail from ~300 Watts to ~400 Watts. You can
# think that this range of data correspond to effort a cyclist will train to
# reproduce to be able to breakout in the final kilometers of a cycling race.
# However, this is costly for the human body and no one can cruise with this
# power output.
#
# Now, let's have a look at the data.
data.head()
# We can first have a closer look to the index of the dataframe.
data.index
# We see that records are acquired every seconds.
data.index.min(), data.index.max()
# The starting date is the August 18, 2020 and the ending date is
# September 13, 2020. However, it is obvious that our cyclist did not ride
# every seconds between these dates. Indeed, only a couple of date should be
# present in the dataframe, corresponding to the number of cycling rides.
data.index.normalize().nunique()
# Indeed, we have only four different dates corresponding to four rides. Let's
# extract only the first ride of August 18, 2020.
date_first_ride = "2020-08-18"
cycling_ride = cycling.loc[date_first_ride]
data_ride, target_ride = data.loc[date_first_ride], target.loc[date_first_ride]
data_ride.plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# Since the unit and range of each measurement (feature) is different, it is
# rather difficult to interpret the plot. Also, the high temporal resolution
# make it difficult to make any observation. We could resample the data to get
# a smoother visualization.
data_ride.resample("60S").mean().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# We can check the range of the different features:
axs = data_ride.hist(figsize=(10, 12), bins=50, edgecolor="black", grid=False)
# add the units to the plots
units = ["beats per minute", "rotations per minute", "meters per second",
"meters per second squared", "%"]
for unit, ax in zip(units, axs.ravel()):
ax.set_xlabel(unit)
plt.subplots_adjust(hspace=0.6)
# From these plots, we can see some interesting information: a cyclist is
# spending some time without pedaling. This samples should be associated with
# a null power. We also see that the slope have large extremum.
#
# Let's make a pair plot on a subset of data samples to see if we can confirm
# some of these intuitions.
# +
import numpy as np
rng = np.random.RandomState(0)
indices = rng.choice(np.arange(cycling_ride.shape[0]), size=500, replace=False)
# -
subset = cycling_ride.iloc[indices].copy()
# Quantize the target and keep the midpoint for each interval
subset["power"] = pd.qcut(subset["power"], 6, retbins=False)
subset["power"] = subset["power"].apply(lambda x: x.mid)
# +
import seaborn as sns
_ = sns.pairplot(data=subset, hue="power", palette="viridis")
# -
# Indeed, we see that low cadence is associated with low power. We can also
# the a link between higher slope / high heart-rate and higher power: a cyclist
# need to develop more energy to go uphill enforcing a stronger physiological
# stimuli on the body. We can confirm this intuition by looking at the
# interaction between the slope and the speed: a lower speed with a higher
# slope is usually associated with higher power.
| notebooks/datasets_bike_rides.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyTorch Basics
# Importing PyTorch and Numpy
import torch
import numpy as np
from torch.autograd import Variable
# +
# Declaring Variable in Pytorch
a = Variable(torch.Tensor([19]))
b = torch.Tensor([97])
# Types
print(a)
print(b)
print(type(a))
print(type(b))
# -
# Variables and Operations
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)
# 2 * 1 + 3
y = w * x + b
y
# Compute the gradients
y.backward()
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1
# +
# Tensors of MxN Dimensions.
# Creates a Tensor (A Matrix) of size 5x3.
t = torch.Tensor(5, 3)
print(t)
print(t.size())
# +
# Operations on Tensors
# Creating Tensors
p = torch.Tensor(4,4)
q = torch.Tensor(4,4)
ones = torch.ones(4,4)
# -
print(p, y, ones)
print("Addition:{}".format(p + q))
print("Subtraction:{}".format(p - ones))
print("Multiplication:{}".format(p * ones))
print("Division:{}".format(q / ones))
# ### Basic NeuralNetwork Introduction In PyTorch
# +
# Creating a basic Nueral Network in PyTorch
x = Variable(torch.randn(5, 3))
y = Variable(torch.randn(5, 2))
# Importing NN
import torch.nn as nn
# +
linear = nn.Linear(3, 2)
print(linear)
# Type of Linear
print(type(linear))
# -
print ('Weights: ', linear.weight)
print ('Bias: ', linear.bias)
pred = linear(x)
print(pred)
| 03 Diving into PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="OQTla_oS3O0E"
# # Step By Step Supervised Learning
#
# In this colaboratory notebook, you learn to work with python and machine learning models. and how to step by step develop machine learning project using Python.
#
#
#
# + [markdown] id="857zUnkM5HON"
# Python Packages
# learn to load and use machine learning packages in Python.
#
#
#
#
# 1. **numpy**
#
# NumPy is a library for the Python programming language, adding support for large, **multi-dimensional arrays and matrices**, along with a large collection of high-level mathematical functions to operate on these arrays.
#
# 2. **matplotlib**
#
# Matplotlib is a plotting library for the Python programming language and its numerical mathematics extension NumPy.
#
# 3. **pandas**
#
# pandas is an open source library providing high-performance, easy-to-use **data structures** and **data analysis tools** for the Python programming language.
#
# 4. **sklearn**
#
# Scikit-learn (formerly scikits.learn) is a free software machine learning library for the Python programming language.
#
# 6. **scipy**
#
# SciPy is a free and open-source Python library used for scientific computing and technical computing.
# + [markdown] id="gImLytR89eol"
# ### Import libraries
# We need to import all the modules, functions before using them
# + id="FbWaB53I5iMP"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="_caN2j0FoR7y" colab={"base_uri": "https://localhost:8080/"} outputId="2bb09e00-ad34-4887-91d9-50fe6381182a"
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="823rW-QYofPU"
# To make sure that every single line will be printed, even if they're in the same cell, we can use thf ollowing config:
# + id="H1rVke7zoXxM" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="91983a3a-8c1c-4fdd-aefd-2ed9d096bdde"
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
dataset = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/iris.csv',index_col=0)
dataset
# + [markdown] id="_-jRmYuU-4ql"
# ### Load Dataset
#
# **Iris Data**
#
# We want to use some data sets.
# We get start with the Iris dataset which usually is used to teach the fundamentals of machine learning algorithms.
#
# Data set has been used by almost every data science beginner. This data set is the machine learning practitioner’s equivalent of “Hello, World!”.
#
# Our goal is to train a machine learning model to correctly predict the flower species from the measured attributes.
#
#
# **species**:
#
# * Iris Setosa
# * Iris Versicolor
# * Iris Virginica
#
#
# **Variable**
#
# Each species of flower is quantified via four numerical attributes, all measured in centimeters:
#
# * Sepal length
# * Sepal width
# * Petal length
# * Petal width
#
#
# + id="tFBfkEL5-7MJ" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="0d43afb6-fe57-4f3a-e688-c518411d1ae8"
url = "https://raw.githubusercontent.com/FarnooshKh/Machine-Learning/master/Data/iris.csv"
dataset = pd.read_csv(url,index_col=0)
dataset
# + [markdown] id="kl9Vc9qXpg5w"
# ### Summarize the Dataset
#
#
#
# * Dimensions of the dataset.
# * Describtion of the dataset.
#
# Statistical summary of all attributes.
#
# * Class Distribution
#
#
# + id="y3LetoKjpkTc" colab={"base_uri": "https://localhost:8080/"} outputId="2a276536-adbb-4c97-e077-eb9f48b15ad4"
# shape
print(dataset.shape)
iris_data = dataset.iloc[:,:-2]
iris_target = dataset['target']
# + [markdown] id="nzzNwsom7gRk"
# **Statistical Summary**
# + id="3qrwM_QBqd0X" colab={"base_uri": "https://localhost:8080/"} outputId="8af60dd2-8165-4aa7-ff00-ff8c2a5cbc97"
# descriptions
print(iris_data.describe())
# + id="6tmegtr62RTW" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="494d9d2b-673d-4d32-8e81-e55134fbb98f"
iris_data.info()
# + id="1Q0f0Tu4KTwp" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="19ad90cd-b10d-4669-afa6-54838f6b16a9"
# The Pearson correlation or simply “correlation”.
# Pearson correlations are suitable only for metric variables
# (Because calculations are allowed, we typically analyze them with descriptive statistics)
iris_data.corr(method='pearson')
iris_data.corr(method='spearman')
# + id="6CNJkZ6F7vWq" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="cc120cfa-ac43-4fcd-c6ad-974cec28b980"
# class distribution
print(dataset.groupby('class').size())
#dataset.groupby('class').describe()
# + id="SdIg4KMkI57Z" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="1328c041-fd75-4a47-cff4-685a3b8e5724"
# datatype
dataset.dtypes
# + [markdown] id="FB03nh89Dpuk"
# ### Data Visualization
# + [markdown] id="daAqVT55L08U"
#
#
#
#
# * Univariate plots
#
# Better understanding of each attribute.
#
# Plots of each individual variable.
#
# This gives us a much clearer idea of the distribution of the input attributes. For example if variable have Gaussian distribution then we can choose algorithms that use this assumption.
#
# * Multivariate plots
#
# Better understand the relationships between attributes.
#
# Interactions between the variables.
#
# Correlation between variables
#
#
#
#
#
#
#
# + id="2M1SRxU78UzJ" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="ef47cd48-79b2-41d1-9b66-f5194ecbf04a"
# two different ways of plots
#dataset.iloc[:,:-2].hist(bins=20,figsize=(9,7),grid=False)
iris_data.plot(kind='hist', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
# + id="OaGAjb0Z8Xkm" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="67d4472a-3299-4ed7-81c9-43cd4b2203d0"
iris_data.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
# + id="2rShZy5BLlng" colab={"base_uri": "https://localhost:8080/", "height": 743} outputId="48ecba28-fae0-4867-b856-ac272ad51752"
from pandas.plotting import scatter_matrix
import seaborn as sns
sns.pairplot(dataset.drop("target", axis=1), kind="scatter", hue="class")
plt.show()
# + [markdown] id="2VUpvRFbvsm2"
# ## Building classification models
# Among the available classification methods in Python, we focus on the following five to build classification models of tissue type of the cancer cell lines in our dataset:
#
# * **K- nearest neighbour**
# * Logistic regression
# * Naive Bayes
# * Random forest
#
#
#
# + [markdown] id="Tcjuf63LLERE"
# ### Splitting data to training and testing sets
#
# If we use one of our labeled fruit examples in the data that we use to train the classifier, we can't also use that same irish data as a test sample to also evaluate the classifier.
#
# The machine learning Algorithm needs to work well on any input sample (Training Set), any new pieces of data that we might see in the future(Test Set).
#
#
#
# To investigate performance of our model, we need to split the data to training and testing sets(validation set). This will help us to check potential overfitting in our model training.
#
# **random_state** as the name suggests, is used for initializing the internal random number generator, which will decide the splitting of data into train and test indices in your case.
# + id="fuPez5sILHne"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris_data, dataset['class'], test_size=0.30, random_state=5)
# + id="Erm_hL66Q5pI" colab={"base_uri": "https://localhost:8080/"} outputId="553cd983-2c98-4f37-c230-7898f68edf4c"
print(f'train: {X_train.size}')
print(f'test: {X_test.size}')
# + [markdown] id="RXJQ0SgBRlF7"
# ## K nearest neighbour(KNN)
# K nearest neighbour uses a distance metric like Euclidean distance to identity similarity of target data point (sample) in test or validation set to the data points (samples) in the trainign set. Then based on the user specified k, it finds the k closest points (samples) to the target data point. Afterward, it chooses the most frequent label among the k closes points (majority voting) as the class label of the target sample. The class labels can be also assigned based on weighted voting of the k closest data points to the data point.
# + id="opESR8PXQ7RW" colab={"base_uri": "https://localhost:8080/"} outputId="df6be790-fdbd-4fc2-b17d-0d6ae92ec68f"
from sklearn.neighbors import KNeighborsClassifier
# Initialize our classifier, weight("uniform")
knn = KNeighborsClassifier(n_neighbors=2, weights='distance')
# Fitting the model with the data
knn.fit(X_train, y_train)
# + id="5XWwSRM6R105" colab={"base_uri": "https://localhost:8080/"} outputId="c3283a21-7883-42b1-f58d-8cbca13bd0e3"
y_pred = knn.predict(X_test)
print(y_pred)
print(y_test.values)
# + [markdown] id="s5wtdouySo0B"
# ## Performance measure
#
# To assess performance of the machine learning model, we can use the following measure of the performance of the model:
#
#
#
# * **precision** is also referred to as positive predictive value (PPV)
#
# How many selected item are relevant
#
# $${\displaystyle {\text{Precision}}=\text{True positive rate} = {\frac {tp}{tp+fp}}\,}$$
#
# * **Recall** in this context is also referred to as the true positive rate or sensitivity
#
# How many relevant item are selected
#
#
#
#
# $${\displaystyle {\text{Recall}}={\frac {tp}{tp+fn}}\,} $$
#
#
#
# * **specificity** True negative rate
#
#
#
# $${\displaystyle {\text{True negative rate}}={\frac {tn}{tn+fp}}\,}$$
#
# * **Accuracy**: This measure gives you a sense of performance for all the classes together as follows:
#
# $$ {\displaystyle {\text{Accuracy}}={\frac {tp+tn}{tp+tn+fp+fn}}\,}$$
#
#
# \begin{equation*} Accuracy=\frac{Number\:of\:correct\:predictions}{(Total\:number\:of\:data\:points (samples))} \end{equation*}
#
# $${\displaystyle {\text{Balanced Accuracy}}={\frac {Recall+Specificity
# }{2}}\,}$$
#
# * **Confusion matrix (or error matrix)**: True and false classification of the samples in all the classes can be shown in a matrix which is called confusion (or error) matrix. The columns are usually considered as the predicted classes and rows as actual classes. Hence, the diagonal elements of the matrix will be the total number of true classifcation in each class.
#
# + id="TT10jmXASEW0" colab={"base_uri": "https://localhost:8080/"} outputId="3278b76f-a100-4519-d290-900479f80c5c"
from sklearn import metrics
print("Confusion matrix of the predictions:\n", metrics.confusion_matrix(y_test, y_pred))
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("precision", metrics.precision_score(y_test, y_pred, average=None))
# + id="Ff_8H-hb065r" colab={"base_uri": "https://localhost:8080/"} outputId="7822e737-03c0-4715-8cfd-bc31db8dee28"
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("blanced accuracy of the predictions:", metrics.balanced_accuracy_score(y_test, y_pred))
print("MCC of the predictions:", metrics.matthews_corrcoef(y_test, y_pred))
print("Confusion matrix of the predictions:", metrics.confusion_matrix(y_test, y_pred))
# + id="Zjli08Uk-RhY" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="d61a197f-6b74-4886-bbde-4c7e3a2c5dd3"
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred, classes,
title=None,
cmap=plt.cm.Blues):
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes= dataset['class'].unique(),
title='Confusion matrix')
plt.show()
# + id="WOQA6HIJSy2f" colab={"base_uri": "https://localhost:8080/", "height": 987} outputId="161e45c0-958d-463e-82b2-8e38b70522f1"
k_range = list(range(1,20))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k for KNN')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors')
plt.show()
# + id="UvLZ4BLOZS1x" colab={"base_uri": "https://localhost:8080/", "height": 733} outputId="e3b54175-1193-46b7-9318-34cf281bf424"
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 3
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could avoid this ugly
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='k', s=20)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
# + [markdown] id="JgQfOTkabaUB"
# ## Load Dataset **diabetes**
#
#
# Ten baseline variables, age, sex, body mass index, average blood pressure, and six blood serum measurements were obtained for each of n = 442 diabetes patients, as well as the response of interest, a quantitative measure of disease progression one year after baseline.
#
# >- | -
# >---|---
# > Samples total |442
# > Dimensionality |10
# > Target Features | real, -.2 < x < .2
#
#
#
#
# **data : Bunch**
#
# Dictionary-like object, the interesting attributes are: ‘data’, the data to learn, ‘target’, the regression target for each sample, ‘data_filename’, the physical location of diabetes data csv dataset, and ‘target_filename’, the physical location of diabetes targets csv datataset (added in version 0.20).
#
# (data, target) : tuple if return_X_y is True
#
#
#
#
# + id="as270CambaUB"
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import metrics
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_df = pd.DataFrame(data= np.c_[diabetes['data'], diabetes['target']],
columns= diabetes['feature_names'] + ['target'])
# + id="xldCOk_imIv0" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="03355aa8-7246-4fa1-8525-a343e10ae580"
diabetes_df
# + [markdown] id="lpovnmFgbaUD"
# ### Summarize the Dataset
#
#
#
# * Dimensions of the dataset.
# * Describtion of the dataset.
#
# Statistical summary of all attributes.
#
# * Class Distribution
#
#
# + id="ALc3xboObaUF" colab={"base_uri": "https://localhost:8080/"} outputId="d2a37b3a-a919-42d2-bd6c-2e28c30bd019"
# shape
print(diabetes_df.shape)
# + [markdown] id="Ltlli8dDbaUJ"
# **Statistical Summary**
# + id="mhQqBGQtbaUJ" colab={"base_uri": "https://localhost:8080/"} outputId="934f717d-9adc-49d9-f4f1-62c89b30a2e5"
# descriptions
print(diabetes_df[['age']].describe())
# + id="G0zeGQNpbaUL" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="63729a97-1bdd-414e-b49d-0cbf1ef189a1"
# datatype
diabetes_df.dtypes
# + id="7nfdD-JsbaUO" colab={"base_uri": "https://localhost:8080/", "height": 373} outputId="cbc19c83-561c-44e0-9bb1-9a4e270514c3"
diabetes_df.corr(method='pearson')
# + [markdown] id="rJkoHgLgbaUW"
# ## Splitting data to training and testing sets
#
# To investigate performance of our model, we need to split the data to training and testing sets(validation set). This will help us to check potential overfitting in our model training.
#
# **random_state** as the name suggests, is used for initializing the internal random number generator, which will decide the splitting of data into train and test indices in your case.
# + id="m3Z5icWVbaUX"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(diabetes_X, diabetes.target, test_size=0.20, random_state=5)
# + [markdown] id="ndWY3Nl9baUa"
# # Building regression models
#
#
#
# + [markdown] id="3uqR6dzxbaUb"
# ## Regression Model
# + id="foBrgZHIbaUc" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c3c64ff7-4ef9-4101-e509-613d0351768f"
from sklearn.linear_model import LinearRegression
# Create linear regression object
regr = LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# + id="JiyQzD4HbaUf" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="7bff3ced-8c61-48f4-e821-09c47f8e8297"
# Make predictions using the testing set
y_pred = regr.predict(X_test)
print(y_pred)
# + id="QXoLxddWbaUi" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="3fc8c976-05b3-470f-f251-1e8d2fbfe5c2"
from sklearn import metrics
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% metrics.mean_squared_error(y_test, y_pred))
print("Mean absolute error: %.2f"
% metrics.mean_absolute_error(y_test, y_pred))
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# + id="UgTUNljHbaUk"
X_train, X_test, y_train, y_test = train_test_split(diabetes_df[diabetes['feature_names']], diabetes.target, test_size=0.30, random_state=5)
# + id="hw1_ePgxbaUm" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5d55c7db-31be-48b6-d651-3bbd78e68806"
# Train the model using the training sets
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# + [markdown] id="1tUY0rQYbaUi"
# # Performance measure
#
# Mean Absolute Error (MAE) and Mean squared error (MSE) are two of the most common metrics used to measure accuracy for continuous variables.
#
# Mean Absolute Error (MAE)
#
#
# $\text{MAE}(y, \hat{y}) = \frac{1}{n_{\text{samples}}} \sum_{i=0}^{n_{\text{samples}}-1} \left| y_i - \hat{y}_i \right|$
#
#
# Mean squared error (MSE)
#
#
# $\text{MSE}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples} - 1} (y_i - \hat{y}_i)^2$
#
#
# + id="Wtk5vhmxbaUo" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="9c3fba7d-a589-4c63-fafd-8fbe205648ae"
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% metrics.mean_squared_error(y_test, y_pred))
print("Mean absolute error: %.2f"
% metrics.mean_absolute_error(y_test, y_pred))
print(metrics.r2_score(y_test, y_pred))
# + [markdown] id="1y2FD1XJ8T3i"
#
# ## breast_cancer DataSet
#
#
# The breast cancer dataset is a classic and easy to use binary classification dataset.
#
# >- | -
# >---|---
# > Classes | 2
# > Samples per class |212(M),357(B)
# > Samples total |569
# > Dimensionality |30
# > Features | real, positive
#
#
#
# **data : Bunch**
#
# Dictionary-like object,
# * the interesting attributes are: ‘data’
# * the data to learn, ‘target’,
# * the classification labels, ‘target_names’
# * the meaning of the labels, ‘feature_names’
# * the meaning of the features, and ‘DESCR’, the full description of the dataset
# * ‘filename’, the physical location of breast cancer csv dataset (added in version 0.20).
#
#
# #### load data from sklearn data sets
#
# scikit-learn - the machine learning algorithms used for data analysis and data mining tasks
#
#
#
# + id="VpnLz5c51Lq2"
from sklearn.datasets import load_breast_cancer
breast_data = load_breast_cancer()
# + [markdown] id="rrD97Gul2fKD"
# Let's check the shapes of the dataframes:
# + id="2-univG61YbU" colab={"base_uri": "https://localhost:8080/"} outputId="70dcffc9-e543-42a1-d630-3690d3535ed7"
breast_data.data.shape
breast_data.target.shape
list(breast_data.target_names)
# + [markdown] id="phb4vRQm9jUa"
# ## Splitting data to training and testing sets
#
# To investigate performance of our model, we need to split the data to training and testing sets. This will help us to check potential overfitting in our model training.
#
# **random_state** as the name suggests, is used for initializing the internal random number generator, which will decide the splitting of data into train and test indices in your case.
# + id="czRPFHbi9mgh"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(breast_data.data, breast_data.target, test_size=0.30, random_state=5)
# + [markdown] id="DRsL_8AG5Gio"
# ## Logistic regression
# If we have set of features X1 to Xn, y can be obtained as:
# \begin{equation*} y=b0+b1X1+b2X2+...+bnXn\end{equation*}
#
# where y is the predicted value obtained by weighted sum of the feature values.
#
# Then probability of each class (for example tissue class BREAST) can be obtained using the logistic function
#
# \begin{equation*} p(class=BREAST)=\frac{1}{(1+exp(-y))} \end{equation*}
#
# Based on the given class labels and the features given in the trainign data, coefficients b0 to bn can be ontained during the optimization process.
#
# b0 to bn are fixed for all samples while X1 to Xn are feature values specific to each sample. Hence, the logistic function will give us probability of each class assigned to each sample. Finally, the model will choose the class with the highest probability for each sample.
#
#
# **Note.** The logistic regression model is parametric and the parameters are the regression coefficiets b0 to bn.
#
# + id="74gD0HCTq46K" colab={"base_uri": "https://localhost:8080/", "height": 373} outputId="46c3cc87-d16a-4ef6-f2f6-61c73e808685"
from sklearn.linear_model import LogisticRegression as LR
# Initialize our classifier
#logreg = LogisticRegression()
logreg = LR()
# Fitting the model with the data
logreg.fit(X_train, y_train)
# prediction in test set
y_pred = logreg.predict(X_test)
y_pred
# + id="ZSuig7RfPlls" colab={"base_uri": "https://localhost:8080/", "height": 448} outputId="371c95b6-88be-4eb8-da26-1ec8f5a460b3"
from sklearn import metrics
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("blanced accuracy of the predictions:", metrics.balanced_accuracy_score(y_test, y_pred))
print("MCC of the predictions:", metrics.matthews_corrcoef(y_test, y_pred))
print("Confusion matrix of the predictions:", metrics.confusion_matrix(y_test, y_pred))
print("Confusion matrix of the predictions:\n", metrics.confusion_matrix(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=[0,1],
title='Confusion matrix')
plt.show()
# + [markdown] id="62cLiquCQdd7"
# # Building classification models
#
# Among the available classification methods in Python, we focus on the following five to build classification models in our dataset:
#
# * Logistic regression
# * K- nearest neighbour
# * **Naive Bayes**
# * Random forest
#
# ## Naive Bayes
# To understand Naive Bayes algotirhm, we need to know what Bayes theorem. Bayes theorem related conditional rpobabilities as follows:
#
# \begin{equation*} p(A|B)p(B)=p(B|A)p(A) \end{equation*}
# that can be rewritten as
#
# \begin{equation*} p(A|B)=\frac{p(B|A)p(A)}{p(B)} \end{equation*}
#
# where p(A) and p(B) are probabilities of events A and B, respectively. p(A|B) and p(B|A) are also conditional probabilities of A given B and B given A, respectively.
# **Example without numbers**
#
# Now let's assume we have 3 features X1, X2 and X3 and we want to identify the probability of class C for sample A with feature values *x1*, *x2* and *x3*:
#
# \begin{equation*} p(class=C|X1=x1, X2=x2 , X3=x3)=\frac{p(X1=x1|class=C)p(X2=x2|lass=C)p(X3=x3|class=C)p(class=C)}{p(X1=x1)p(X2=x2)p(X3=x3)} \end{equation*}
#
# where
# \begin{equation*} p(X1=x1, X2=x2 , X3=x3)=p(X1=x1)p(X2=x2)p(X3=x3) \end{equation*}
# and
# \begin{equation*} p(X1=x1, X2=x2 , X3=x3|class=C)=p(X1=x1|class=C)p(X2=x2|class=C)p(X3=x3|lass=C)p(class=C) \end{equation*}
#
# as the features are independent variables.
#
# **Real life example with numbers**
# We want to know the chance of having breast cancer if the diagnosis test is positive for a woman with the age between 40 and 60. This example is mainly for understanding Bayes theorem not Naive Bayes classifier. In case of Naive Bayes algorithm, this process can be easily extended to multiple features as described in the above example.
#
# ***Assumptions (not necessarily correct)***
# * 2% of women between 40 and 60 have breast cancer
# * True positive rate is 95% (if a woman has breast cancer, it will be diagnosed with 95% probability). Therefore, 5% of the time the women without breast cancer will be diagnosed positively by the test.
#
# Now the question is *What is the chance of havign breast cancer if a woman has positive result from a diagnosis test?*
#
# \begin{equation*} p(having \quad breast \quad cancer|positive)=\frac{p(positive|breast \quad cancer)p(breast cancer)}{p(positive)} \end{equation*}
#
# where
#
#
# \begin{equation*} p(positive) = p(positive|having \quad breast \quad cancer)p(having \quad breast \quad cancer) \\+ p(positive|not \quad having \quad breast \quad cancer)p(not \quad having \quad breast \quad cancer)\\=
# 0.95*0.02+0.05*0.98\\=0.068\end{equation*}
#
# Therefore,
#
# \begin{equation*} p(having \quad breast \quad cancer|positive)=\frac{p(positive|breast \quad cancer)p(having \quad breast \quad cancer)}{p(positive)}\\= \frac{0.95*0.02}{0.068}\\=0.28\end{equation*}
#
#
# As we can see, there is only 28% chance of having cancer upon positive test result. Although the numbers were not clinically valid numbers, we deal with similar results in disease diagnosis. This is one of the reasons that further checkups by phycisions are mandatory upon positive results. Do not panic when you have a positive result but follow up with your doctor immediately.
#
# **Note.** Naive Bayes classifier is called ***Naive*** as it assumes each feature will independently contribute in prediction of a class for each data point (sample).
# + id="7yafr4ZVP9cY"
from sklearn.naive_bayes import GaussianNB
# Initialize our classifier
gnb = GaussianNB()
# Train our classifier
model = gnb.fit(X_train, y_train)
# + id="CQxu_bSpQ6LA" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="4e6f0909-0643-40ea-9d20-ddbb8baf7f84"
y_pred = model.predict(X_test)
print(y_pred)
# + id="YSIXwh-9rXr4" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="de82b466-55fe-4018-e2f2-1f87b5c64a88"
model.predict_proba(X_train)
# + id="VpZA-bjiQ_tj" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="af836490-c16f-44fb-e0cd-2340f6709cd8"
gnb = GaussianNB()
# Fitting the model with the data
gnb.fit(X_train, y_train)
# + [markdown] id="rMFl22-JRGYw"
# Testing the model on the testing set:
# + id="SPN2o7qBRD7j" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="215e1e62-dbea-47a1-ff83-90738ff6ce69"
y_pred = model.predict(X_test)
print(y_pred)
# + id="Bnr5IfAlRIit" colab={"base_uri": "https://localhost:8080/", "height": 338} outputId="b06a7cb2-2f48-4319-d453-06a8ab9df61f"
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("blanced accuracy of the predictions:", metrics.balanced_accuracy_score(y_test, y_pred))
print("Confusion matrix of the predictions:", metrics.confusion_matrix(y_test, y_pred))
# + id="tCEf1we6bIcc"
# + [markdown] id="T18GQ3dArCsh"
#
#
# # Building classification models
#
# Among the available classification methods in Python, we focus on the following five to build classification models in our dataset:
#
# * Logistic regression
# * K- nearest neighbour
# * Naive Bayes
# * **Random forest**
#
# + id="5bouEZY6y7kr" colab={"base_uri": "https://localhost:8080/"} outputId="ef106ab3-e518-4782-fd0a-63605478eadc"
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
RFclf = RandomForestClassifier(max_depth=2, random_state=0)
RFclf.fit(X_train, y_train)
y_pred = RFclf.predict(X_test)
print(RFclf.feature_importances_)
# + id="7Ef9rOrPzfPX" colab={"base_uri": "https://localhost:8080/"} outputId="1e22dbb1-d55e-415d-f403-df7bc7e628ab"
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("blanced accuracy of the predictions:", metrics.balanced_accuracy_score(y_test, y_pred))
print("Confusion matrix of the predictions:", metrics.confusion_matrix(y_test, y_pred))
print("Confusion matrix of the predictions:\n", metrics.confusion_matrix(y_test, y_pred))
# + id="bag66EQ7Q-IU" colab={"base_uri": "https://localhost:8080/", "height": 398} outputId="5e9c6b41-75ab-4b99-8693-f2db28546c38"
print("accuracy of the predictions:", metrics.accuracy_score(y_test, y_pred))
print("blanced accuracy of the predictions:", metrics.balanced_accuracy_score(y_test, y_pred))
print("MCC of the predictions:", metrics.matthews_corrcoef(y_test, y_pred))
print("Confusion matrix of the predictions:", metrics.confusion_matrix(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=[0,1],
title='Confusion matrix')
plt.show()
# + id="XmIXXn0It6er"
| python/Session2_Supervised_Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Is hep-ex postdoc's affiliation an indicator of success?
# %%HTML
<style type="text/css">
table.dataframe td, table.dataframe th {
border: 1px black solid !important;
color: black !important;
}
</style>
# <img src="http://phdcomics.com/comics/archive/phd082313s.gif">
# As an experimental High Energy Physics (hep-ex) grad student, I often wonder which university/national lab should I choose for doing a postdoc to increase my odds of getting a faculty position, if I plan to stay in academia. But unlike other sub-fields in Physics, we have huge world-wide collaborations for hep-ex experiments like the Large Hadron Collider. In such collaborative environment, it is not very clear if it really matters where one does his/her postdoc, in terms of finding an academic faculty (research scientist) position. It might not be hard to convince oneself that there is actually no such correlation between a postdoc's affiliation and possibility of finding an academic job (faculty position) eventually. This has prompted me to put this hypothesis to test. So, let's explore here whether such a correlation between a postdoc's affiliation and future success in finding an academic faculty position in hep-ex exists.
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from sklearn import linear_model
# ## Data collection
#
# hepexrumor (https://sites.google.com/site/hepexrumor/) is a popular unofficial site which has latest rumors about the hep-ex jobs (in the US and ouside). I parse this website for getting the job rumors from 2005-2019. For this short study, I did not consider temporal variation in job patterns and combined the data of all the years.
#
# I use the latest affiliation of a postdoc while applying for job. I only consider the postdocs who cleared the short-list round for a job as the total candidate pool, with a presumptuous assumption that postdocs not clearing the shortlist were not serious candidates for the job.
# Parsing hepexrumor:
hepexjobsite = 'https://sites.google.com/site/hepexrumor/'
year = {2005: '2005-rumor' ,
2006: '2006-rumor' ,
2007: '2007-rumor' ,
2008: '2008-rumor' ,
2009: '2009-rumor-1',
2010: '2009-rumor' ,
2011: '2011-rumors' ,
2012: '2012-rumors' ,
2013: '2013-rumors' ,
2014: '2014-rumors' ,
2015: '2015-rumors' ,
2016: '2016-rumors' ,
2017: '2016-2017' ,
2018: '2018-rumors' ,
2019: '2019-rumors' }
df = {}
for i in range(2005,2020):
p = pd.read_html(hepexjobsite+year[i])
print(i, len(p))
if (i < 2016 ):
tUS = p[3].iloc[1:]
tUS.columns = p[3].iloc[0]
else:
tnonUS = p[4].iloc[1:]
tnonUS.columns = p[4].iloc[0]
tnonUS = tnonUS.drop(columns=['Field'])
tUS = p[5].iloc[1:]
tUS.columns = p[5].iloc[0]
tUS = tUS.drop(columns=['Field'])
tUS.append(tnonUS, ignore_index=True)
tUS.columns = ["Institution", "Short List", "Offers"]
df[i] = tUS
df[2017].head()
# ## Data cleaning
#
# There is ambiguity associated to the names of some of the universities and labs, like Fermilab is listed as 'Fermilab' in some places and 'FNAL' elsewhere. The function below removes this ambiguity by replacing the ambiguous names to a standard name for the organizations:
def UniNameAmbiguityFix(dfk):
Uni_name_ambiguity = {'Argonne': 'ANL',
'Boston University': 'Boston U',
'BU': 'Boston U',
'Brown University': 'Brown',
'Cal Tech': 'Caltech',
'Carnegie': 'Carnegie Mellon',
'Colorado State University': 'Colorado State',
'Fermilab': 'FNAL',
'FNAL/Chicago': 'FNAL',
'Industry/Fermilab': 'FNAL',
'Chicago/FNAL': 'FNAL',
'Göttingen': 'Gottingen',
'Imperial': 'Imperial College London',
'Indiana': 'Indiana University',
'KSU': 'Kansas State',
'Los Alamos': 'LANL',
'LBL': 'LBNL',
'MSU': 'Michigan State',
'Northeastern University': 'Northeastern',
'Northwestern University': 'Northwestern',
'OSU': 'Ohio State',
'SUNY Stony Brook': 'Stony Brook',
'Texas A&M': 'TAMU',
'Triumf': 'TRIUMF',
'U Chicago': 'UChicago',
'Chicago': 'UChicago',
'University of Chicago': 'UChicago',
'Berkeley': 'UC Berkeley',
'University of Colorado Boulder': 'UC Boulder',
'CU Boulder': 'UC Boulder',
'Colorado': 'UC Boulder',
'Davis': 'UC Davis',
'Irvine': 'UC Irvine',
'UCSD': 'UC San Diego',
'UCSB': 'UC Santa Barbara',
'UCSC': 'UC Santa Cruz',
'UIC': 'University of Illinois Chicago',
'University of Illinois Urbana-Champaign': 'UIUC',
'University of North Carolina': 'UNC',
'University of Pennsylvania': 'UPenn',
'University of Texas Austin': 'UT Austin',
'Florida': 'University of Florida',
'Geneva': 'University of Geneva',
'Hawaii': 'University of Hawaii',
'Maryland': 'University of Maryland',
'Michigan': 'University of Michigan',
'Minnesota': 'University of Minnesota',
'Sheffield': 'University of Sheffield',
'Victoria': 'University of Victoria',
'Virginia': 'University of Virginia',
'Washington': 'University of Washington',
'University of Wisconsin Madison': 'UW Madison',
'Wisconsin': 'UW Madison',
'UW': 'UW Madison',
'UW-Madison': 'UW Madison'}
Uni_name_ambiguity.keys()
dfk = dfk.replace({'Affiliation': Uni_name_ambiguity})
dfk = dfk.groupby(['Applicant', 'Affiliation'])['Attempts'].sum().reset_index()
return dfk
# ## Extracting data about job interviews performances of postdocs
# Extracting tables for applicant job performance (along with their latest affiliation at the time of job application) from tables for job results.
ApplicantTable = {}
for i in range(2005, 2020):
attempt = df[i]['Short List'].str.split("\)", expand=True)
attempt = attempt.unstack()
attempt = attempt.str.split(r"\[.*?\]").str.join('')
attempt = attempt.str.strip()
attempt = attempt.value_counts()
attempt = attempt.to_frame()
attempt.reset_index(level=0, inplace=True)
attempt.columns = ['Applicant', 'Attempts']
attemptTable = attempt['Applicant'].str.split('(', expand=True)
attemptTable.columns = ['Applicant', 'Affiliation']
attemptTable['Attempts'] = attempt['Attempts']
attemptTable = attemptTable.iloc[1:]
indexDrop = attemptTable[attemptTable['Applicant'].str.contains("\)" or "\(" or "[" or "]")].index
attemptTable.drop(indexDrop , inplace=True)
attemptTable.Affiliation.str.strip()
attemptTable = UniNameAmbiguityFix(attemptTable)
offerTable = df[i]['Offers'].str.split(r"\(.*?\)", expand=True)
offerTable = offerTable.unstack()
offerTable = offerTable.str.strip()
offerTable = offerTable.value_counts()
offerTable = offerTable.to_frame()
offerTable.reset_index(level=0, inplace=True)
offerTable.columns = ['Applicant', 'Offers']
offerTable['Applicant'] = offerTable['Applicant'].str.replace(u'† \xa0', u'')
offerTable = offerTable.iloc[1:]
attemptTable.Applicant = attemptTable.Applicant.str.strip()
offerTable.Applicant = offerTable.Applicant.str.strip()
ApplicantTable[i] = attemptTable.merge(offerTable, how='left', left_on='Applicant', right_on='Applicant')
ApplicantTable[i] = ApplicantTable[i].fillna(0)
ApplicantTable[i].Offers = ApplicantTable[i].Offers.astype(int)
#applicants with no affiliations listed are dropped
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Affiliation'].str.strip() == ""].index , inplace=True)
#blank applicant dropped
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Applicant'].str.strip() == ""].index , inplace=True)
#theory or non-hep jobs to be dropped
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Applicant'].str.lower().str.contains('theory')].index , inplace=True)
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Applicant'].str.lower().str.contains('hep')].index , inplace=True)
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Affiliation'] == 'IAS'].index , inplace=True)
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Affiliation'] == 'theory'].index , inplace=True)
#other misc. cleaning
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Affiliation'] == 'notes below'].index , inplace=True)
ApplicantTable[i].drop(ApplicantTable[i][ApplicantTable[i]['Affiliation'] == 'Ultralytics'].index , inplace=True)
ApplicantTable[i] = ApplicantTable[i].sort_values(by=['Offers', 'Attempts'], ascending=False)
ApplicantTable[i]
ApplicantTable[2015].head()
# Combining data of all the years.
ApplicantTableAllYears = pd.concat(ApplicantTable, ignore_index=True)
ApplicantTableAllYears = ApplicantTableAllYears.groupby(['Applicant', 'Affiliation'])['Attempts', 'Offers'].sum().reset_index()
ApplicantTableAllYears = ApplicantTableAllYears.sort_values(by=['Offers', 'Attempts'], ascending=False)
ApplicantTableAllYears.head()
# I define a success as getting at least one job offer, ie assign an applicant success = 1. With no offers at all, I define the (short-listed) candidate to be unsuccessful, ie assign the applicant success = 0.
ApplicantTableAllYears['Success'] = (ApplicantTableAllYears['Offers'] > 0).astype(int)
ApplicantTableAllYears.head()
# ## University Metric
# In order to understand if there is any role of a university/lab in the success of its postdoc in finding a permanent job in academia, we define a few metrics to quantify the track record of a university/lab in producing successful postdocs (postdocs who could find permanent jobs immediately after finishing their current postdoc at that university/lab).
# For our positive hypothesis, we assume that every university/affiliation develops some qualities in its postdocs, which influences their employability in academia. Then the rate at which its postdocs get job offers every year (academic cycle) can be modelled by Poisson distribution:
#
# $$ P(k\ job\ offers\ per\ year\ from\ university\ u) = \frac{\lambda_u^{k} e^{-\lambda_u}}{k!} $$
#
# where the rate parameter $\lambda_u$ encoding those qualities which influence the overall employability of postdocs from university/lab $u$. Here k can theoretically range from 0, 1, 2, .., total no. of hepex job positions available globally for that year.
# Here, we made three assumptions:
# * Total number of jobs applied by all the postdocs from a university/lab in a year is very large.
# * All postdocs of a university/lab are of similar academic calibre when they start looking for permanent jobs, which the universities may ensure during their postdoc recruitment process and then through adequate research/academic training of their postdoctoral researchers throughout the term of their affiliation.
# * Success or failure of a postdoc in one job application does not affect the outcomes of other job application for that postdoc or other postdocs of that university in any way. (In reality, if one postdoc gets a job, that job becomes unavailable to other postdocs).
#
# With these three assumptions, $\lambda_u$ becomes an indicator of the contribution of a university/lab in the overall success of its postdoctoral fellows.
# **Average no. of successful offers/year** is a metric for estimating the rate at which postdocs of a university can crack hepex job interviews, as it is an unbiased estimator of $\lambda_u$.
# **Average no. of successful offers/year**, however, does not take into account the no. of the postdoc fellows in a university/lab, but the size of a research group often inluences the skills of its members. The no. of postdocs a university hires varies from year to year based on various factors like - funding secured by the university/lab, no. of projects, no. of professors, etc.
# Since we assume that each postdoc's outcomes are independent of each other from the same university/lab, so model university's role for each postdoc success as independent poisson process. This assumption makes the rate $\lambda_u$ as:
# $\lambda_u$ = $\Sigma_{i_u = 0}^{N}$ $\lambda_{i_u}$ where N is the total no. of postdocs in a university/lab
# Here, $i_u$ is the i-th postdoc of the university/lab u for a given year. Now, since we also assume all the postdocs of the same university/lab are at par in academic/research calibre when applying for permanent jobs, we assume the rates $\lambda_{u_i}$ for each candidate as identical to others. Therefore, $\lambda_{u_i} = \lambda^{indiv}_{u}$ (constant).
# $\lambda_u = \Sigma_{i_u = 0}^{N}$ $\lambda_{i_u} = N\lambda^{indiv}_{u}$
#
# Therefore, $\lambda^{indiv}_{u} = \frac{\lambda_u}{N}$
# Although, N (no. of postdocs applying for jobs) varies every year based on many factors such as funding available to the university/lab, no. of projects the university/lab is working on, no. of principal investigators working at the time etc.
#
# To average out these variations, we use the **average no. of postdocs applying to jobs/year** from a university/lab as an estimator of N.
# Now we can define the ___university metric___ (estimator of $\lambda^{indiv}_{u}$):
#
# **Offers/candidate = $\frac{Average\ no.\ of\ successful\ offers\ per\ year}{Average\ no.\ of\ postdocs\ applying\ to\ jobs\ per\ year} = \frac{Total\ no.\ of\ successful\ offers\ from\ 2005-19}{Total\ no.\ of\ postdocs\ applying\ to\ jobs\ from\ 2005-19}$**
UniversityTableAllYears = ApplicantTableAllYears.drop(columns=['Applicant', 'Attempts'])
UniversityTableAllYears['Failure'] = (UniversityTableAllYears['Offers'] == 0).astype(int)
UniversityTableAllYears = UniversityTableAllYears.groupby(['Affiliation'])['Offers', 'Success', 'Failure'].sum().reset_index()
UniversityTableAllYears['Offers/candidate'] = UniversityTableAllYears['Offers']*1./(UniversityTableAllYears['Success'] + UniversityTableAllYears['Failure'])
UniversityTableAllYears.columns = ['Affiliation', 'Total Offers', 'Total successful candidates', 'Total unsuccessful candidates', 'Offers/candidate']
UniversityTableAllYears = UniversityTableAllYears.sort_values(by=['Offers/candidate'], ascending=False)
UniversityTableAllYears.head()
# Candidates with at least one offer are counted as successful, while ones with no offer are counted as unsuccessful candidates.
# +
plt.style.use('ggplot')
u_total_success = UniversityTableAllYears.sort_values(by=['Total successful candidates'], ascending=False)
x_pos = [i for i, _ in enumerate(u_total_success['Affiliation'].iloc[:5])]
plt.bar(x_pos, u_total_success['Total successful candidates'].iloc[:5], color='green')
plt.xlabel("Postdoc affiliation")
plt.ylabel("Total successful candidates")
plt.title("Universities/labs which produced largest number of successful candidates (from 2005-2019)")
plt.xticks(x_pos, u_total_success['Affiliation'].iloc[:5])
plt.show()
# -
# FNAL (Fermilab) has a huge particle physics group especially during the tevatron days! :)
# +
plt.style.use('ggplot')
x_pos = [i for i, _ in enumerate(UniversityTableAllYears['Affiliation'].iloc[:5])]
plt.bar(x_pos, UniversityTableAllYears['Offers/candidate'].iloc[:5], color='green')
plt.xlabel("Postdoc affiliation")
plt.ylabel("Avg. offer per candidate")
plt.title("Universities/labs which have highest offers per candidate (from 2005-2019)")
plt.xticks(x_pos, UniversityTableAllYears['Affiliation'].iloc[:5])
plt.show()
# -
def checkmodeling(uniname):
uni_offers = []
UniversityTable = {}
for i in range(2005,2020):
UniversityTable[i] = ApplicantTable[i].sort_values(by=['Offers', 'Attempts'], ascending=False)
UniversityTable[i] = UniversityTable[i].groupby(['Applicant', 'Affiliation'])['Attempts', 'Offers'].sum().reset_index()
UniversityTable[i]['Success'] = (UniversityTable[i]['Offers'] > 0).astype(int)
UniversityTable[i] = UniversityTable[i].drop(columns=['Applicant', 'Attempts'])
UniversityTable[i]['Failure'] = (UniversityTable[i]['Offers'] == 0).astype(int)
UniversityTable[i] = UniversityTable[i].groupby(['Affiliation'])['Offers', 'Success', 'Failure'].sum().reset_index()
d = UniversityTable[i]
o = d[d['Affiliation'] == uniname]['Offers']
if (len(o.values)!=0): uni_offers.append(int(o))
uni_offers = np.array(uni_offers)
def factorial (n):
if (n > 0): return n*factorial(n-1)
else: return 1
def poisson(k, lamb):
"""poisson pdf, parameter lamb is the fit parameter"""
return (lamb**k/factorial(k)) * np.exp(-lamb)
lamb = uni_offers.mean()
uni_offers.sort()
p = [poisson(_, lamb) for _ in range(uni_offers.max()+1)]
binboundary = np.array(range(-1,uni_offers.max()+1)) + 0.5
plt.hist(uni_offers, bins=binboundary, normed=True, alpha=0.5,histtype='stepfilled', color='steelblue', edgecolor='none');
plt.plot(range(uni_offers.max()+1), p, 'ro-', label='Poiss(%.2f)'%lamb)
plt.xlabel("offers per year")
plt.ylabel("Arbitrary units")
plt.title("offers/year to %s postdocs (from 2005-2019)"%uniname)
plt.legend()
plt.show()
# Let's check for some universities/labs how well (badly) does the Poisson modeling of the offers per year work..
uninames = ['Columbia', 'FNAL', 'CERN', '<NAME>', 'UPenn']
[checkmodeling(uniname) for uniname in uninames]
# ## Postdoc Metrics
# We can define individual success of a postdoc using ___postdoc metric 1___:
#
# **Success odds** = $\frac{total\ offers}{total\ rejections}$ (for a postdoc)
# ___postdoc metric 2___ is the binary form of ___postdoc metric 1___:
#
# **Success** = 1 if (**success odds** > 0) else 0
#
# ie, if a postdoc got at least one job offer, that postdoc is counted as successful.
# Adding **success odds** to the table:
ApplicantTableAllYears['Success odds'] = ApplicantTableAllYears['Offers']/(ApplicantTableAllYears['Attempts'] - ApplicantTableAllYears['Offers'])
ApplicantTableAllYears = ApplicantTableAllYears[~ApplicantTableAllYears.isin([np.nan, np.inf, -np.inf]).any(1)]
ApplicantTableAllYears.head()
# Checking the distribution of **success odds**:
plt.hist(ApplicantTableAllYears['Success odds'], bins=20)
plt.xlabel("success odds")
plt.ylabel("no. of postdocs")
plt.title("postdocs from all uni/labs included (from 2005-2019)")
plt.show()
# **Success odds** distributions mostly 0 (no offers) and a peak at 1 (no. of offers = no. of rejections) per candidate.
UniApplicantTableAllYear = ApplicantTableAllYears.merge(UniversityTableAllYears[['Affiliation', 'Offers/candidate', 'Total successful candidates', 'Total unsuccessful candidates']], how='left', left_on='Affiliation', right_on='Affiliation')
UniApplicantTableAllYear[UniApplicantTableAllYear['Success']==0].head()
# ## Postdoc metrics vs. university metric
# ### Postdoc metric 1 (*success odds*) vs. university metric (*offers/candidate*)
plt.scatter(UniApplicantTableAllYear['Offers/candidate'], UniApplicantTableAllYear['Success odds'], marker = '.')
plt.xlabel('University metric: offers per candidate')
plt.ylabel('Postdoc metric: success odds')
# Pearson correlation:
correlation = UniApplicantTableAllYear[['Offers/candidate', 'Success odds']]
correlation.corr()
# Since there are other factors contributing to a postdocs success, the variation of the *median of* ***success odds*** w.r.t ***offers/candidate*** is useful in understanding the effect of university on postdoc's success.
bp = UniApplicantTableAllYear.boxplot(column='Success odds',by='Offers/candidate')
bp.set_xlabel('offers per candidate')
bp.set_ylabel('success odds')
bp.set_title('')
bp
# Homoscedasticity doesn't hold very well here
x = UniApplicantTableAllYear['Offers/candidate'].values
y = UniApplicantTableAllYear['Success odds'].values
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
regr = LinearRegression()
regr.fit(x, y)
plt.scatter(UniApplicantTableAllYear['Offers/candidate'],
UniApplicantTableAllYear ['Success odds'], alpha=0.5,
color='blue', marker='.', label='data')
plt.plot(x, regr.predict(x), color='black', linewidth=3)
plt.xlabel('offers per candidae')
plt.ylabel('success odds')
plt.legend()
plt.show()
UniApplicantTableAllYearLog = UniApplicantTableAllYear
UniApplicantTableAllYearLog['Success logit'] = np.log(UniApplicantTableAllYearLog['Success odds'])
UniApplicantTableAllYearLog = UniApplicantTableAllYearLog[~UniApplicantTableAllYearLog.isin([np.nan, np.inf, -np.inf]).any(1)]
UniApplicantTableAllYearLog.head()
bp = UniApplicantTableAllYearLog.boxplot(column='Success logit',by='Offers/candidate')
bp.set_xlabel('offers per candidate')
bp.set_ylabel('success logit')
bp.set_title('')
bp
# Homoscedasticity better with **success logit**.
# +
x = UniApplicantTableAllYearLog['Offers/candidate'].values
logy = UniApplicantTableAllYearLog['Success logit'].values
x = x.reshape(-1, 1)
logy = logy.reshape(-1, 1)
#adding column of 1 to estimate slope and intercept
UniApplicantTableAllYearLog['const'] = 1
regrOLSlog = sm.OLS(UniApplicantTableAllYearLog['Success logit'],
UniApplicantTableAllYearLog[['Offers/candidate', 'const']]).fit()
regrlog = LinearRegression()
regrlog.fit(x, logy)
plt.scatter(UniApplicantTableAllYearLog['Offers/candidate'],
UniApplicantTableAllYearLog['Success logit'], alpha=0.5,
color='blue', marker='.', label='data')
plt.plot(x, regrlog.predict(x), color='black', linewidth=3)
plt.xlabel('offers per candidate')
plt.ylabel('success logit')
plt.legend()
plt.show()
# +
## slope of the regression
slope = regrlog.coef_[0][0]
## intercept of the regression
intercept = regrlog.intercept_[0]
## R^2 value
rsq = regrlog.score(x, logy)
slope, intercept, rsq
# -
print(regrOLSlog.summary())
# p-value of 0.062 for the slope of the linear regression suggests that the dependence of **success logit** on **offers/candidate** is ***NOT*** _statistically significant_ with 95% CL. So, the role of university in the success of its postdocs cannot be established with statistical significance using this pair of university and postdoc metrics.
# ### Postdoc metric 2 (*success*) vs. university metric (*offers/candidate*)
UniApplicantTableAllYear.head()
# +
t = UniApplicantTableAllYear[['Offers/candidate', 'Success']]
t = t.sort_values(by=['Offers/candidate'], ascending=False)
tsuccess = t[t['Success'] == 1]
tfailure = t[t['Success'] == 0]
bins=8
plt.hist(tsuccess['Offers/candidate'], bins, alpha=0.3, label='Success')
plt.hist(tfailure['Offers/candidate'], bins, alpha=0.3, label='Failure')
plt.xlabel('Offers/candidate')
plt.ylabel('no. of postdocs')
plt.legend(loc='best')
plt.show()
# -
logisticRegr = linear_model.LogisticRegression(C=1e5, solver='lbfgs')
logisticRegr.fit(UniApplicantTableAllYear[['Offers/candidate']], UniApplicantTableAllYear['Success'])
logisticRegr.score(UniApplicantTableAllYear[['Offers/candidate']], UniApplicantTableAllYear['Success'])
print(logisticRegr.coef_[0][0], logisticRegr.intercept_[0])
#xLR = np.arange(UniApplicantTableAllYear['Offers/candidate'].min(),
# UniApplicantTableAllYear['Offers/candidate'].max(), 0.01)
xLR = np.arange(-0.5, 2, 0.01)
xLR = xLR.reshape(-1,1)
from scipy.special import expit
logistic = expit(xLR * logisticRegr.coef_ + logisticRegr.intercept_).ravel()
plt.scatter(UniApplicantTableAllYear['Offers/candidate'],
UniApplicantTableAllYear['Success'], marker='.', alpha = 0.2, color='green')
plt.plot(xLR, logistic, color='blue', linewidth=3)
plt.xlabel('offers per candidate')
plt.ylabel('success')
plt.legend()
plt.show()
# **offers per candidate** metric is not very good in discriminating postdocs into successful (at least one job offer) and unsuccessful candidates, as the accuracy is ~69%.
# ## Summary
# * Statistically significant relationship between **Offers/candidate**, the _university metric_, and the _postdoc metrics_ could not be established.
# * We could not establish, with statisical significance, if the affiliation of a experimental high energy physics (hepex) postdoc is an indicator of future success in finding permanent academic position.
# # Future steps
#
# * temporal variations in hep-ex job market not taken into account
# * US and non-US jobs to be treated separately
# * should separate the study into energy, intensity and cosmic frontiers, as the job trends and funding are different for each
# * Look into other indicators of postdoc success like research productivity, etc.
| PostdocAffiliationSuccessCorrelation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
val=np.array([6,None,7,5,6])
val
# %timeit np.arange(1E5, dtype='object').sum()
# %timeit np.arange(1E5, dtype='int').sum()
val1=np.array([1,np.nan,7,1,8])
val1.dtype
6+np.nan
7*np.nan
val1
np.nansum(val1)
pd.Series([1,np.nan,2,None])
isnull()
notnull()
dropna()
fillna()
raw_data={'first_name':['Jason',np.nan,'Tina','John','Amy'],
'last_name':['Miller',np.nan,'Ali','Milner','Cooze'],
'age':[42,np.nan,36,24,73],
'sex':['m','m','f','f','f'],
'preTestScore':[4,np.nan,np.nan,2,3],
'postTestScore':[25,np.nan,np.nan,62,70]}
df=pd.DataFrame(raw_data,columns=['first_name','last_name','age','sex','preTestScore','postTestScore'])
df
df_no_missing=df.dropna()
df_no_missing
df_cleaned=df.dropna(how='all')
df_cleaned
df
df['location']=np.nan
df
df.dropna(axis=1,how='all')
df.dropna(thresh=5)
df.fillna(5)
df['preTestScore'].fillna(df['preTestScore'].mean(),inplace=True)
df
# # Fill in missing in postTestScore with each sex's mean value of postTestScore
df['postTestScore'].fillna(df.groupby('sex')['postTestScore'].transform('mean'),inplace=True)
df
# # Select some raws but ignore the missing data points
df
df[df['age'].notnull() & df['sex'].notnull()]
df
df.fillna(method='bfill')
df.fillna(method='bfill',axis=1)
df
raw_data={'regiment':['Nighthawks','Nighthawks','Nighthawks','Nighthawks','Dragoons','Dragoons','Dragoons','Dragoons','Scouts','Scouts','Scouts','Scouts'],
'company':['1st','1st','2nd','2nd','1st','1st','2nd','2nd','1st','1st','2nd','2nd'],
'name':['Miller','Jacobson','Ali','Milner','Cooze','Jacon','Ryaner','Sone','Sloan','Piger','Riani','Ali'],
'preTestScore':[4,24,31,2,3,4,24,31,2,3,2,3],
'postTestScore':[25,94,57,62,70,25,94,57,62,70,62,70]}
df=pd.DataFrame(raw_data,columns=['regiment','company','name','preTestScore','postTestScore'])
df
df.set_index(['regiment','company'])
df.set_index(['regiment','company'],inplace=True)
df
df.index
df.swaplevel('regiment','company')
df.sum(level='regiment')
#class method constructor
pd.MultiIndex.from_arrays([['a','a','b','b'],[1,2,1,2]])
# list of tuples
pd.MultiIndex.from_tuples([('a',1),('a',2),('b',1),('b',2)])
#Cartesian product of single indices
pd.MultiIndex.from_product([['a','b'],[1,2]])
pd.MultiIndex(levels=[['a','b'],[1,2]],
codes=[[0,0,1,1],[0,1,0,1]])
# hierarchical indices and columns
index=pd.MultiIndex.from_product([[2013,2014],[1,2]],
names=['year','visit'])
columns=pd.MultiIndex.from_product([['Bob','Guido','Sue'],['HR','Temp']],
names=['subject','type'])
data=np.round(np.random.randn(4,6),1)
health_data=pd.DataFrame(data,index=index,columns=columns)
health_data
#data[:,::2]*=10
data+=37
health_data=pd.DataFrame(data,index=index,columns=columns)
health_data
health_data['Guido','HR']
health_data.iloc[:2,:2]
health_data.loc[:,('Bob','HR')]
index=pd.MultiIndex.from_product([['b','a','c'],[1,2]])
data=pd.Series(np.random.rand(6),index=index)
data.index.names=['char','int']
data
try:
data['a':'b']
except KeyError as e:
print(type(e))
print(e)
data['a':'b']
data=data.sort_index()
data
data['a':'b']
| Data-Science-HYD-2k19/Day-based/Day 32.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="OBmELWHxAOR7" colab_type="text"
# ## Install
# + id="3hWX8kqN6_kk" colab_type="code" colab={}
# !pip3 install bayesian-optimization==0.6 --user
# + [markdown] id="ZgzBVDZX6_lP" colab_type="text"
# I use `bayesian-optimization==0.6`, my backend pretty much stick with this version, so migrating will break the code.
# + [markdown] id="A-4w4W5R__-l" colab_type="text"
# ## Imports
# + id="Xgenym7A6_lV" colab_type="code" colab={}
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
import random
from bayes_opt import BayesianOptimization
sns.set()
# + id="NYtPbMi-6_ll" colab_type="code" colab={}
import pkg_resources
import types
def get_imports():
for name, val in globals().items():
if isinstance(val, types.ModuleType):
name = val.__name__.split('.')[0]
elif isinstance(val, type):
name = val.__module__.split('.')[0]
poorly_named_packages = {'PIL': 'Pillow', 'sklearn': 'scikit-learn'}
if name in poorly_named_packages.keys():
name = poorly_named_packages[name]
yield name
imports = list(set(get_imports()))
requirements = []
for m in pkg_resources.working_set:
if m.project_name in imports and m.project_name != 'pip':
requirements.append((m.project_name, m.version))
for r in requirements:
print('{}=={}'.format(*r))
# + [markdown] id="y7yYNUCO_6Su" colab_type="text"
# ## Data
# + [markdown] id="9rsN62uj6_mN" colab_type="text"
# TSLA Time Period: **Mar 23, 2018 - Mar 23, 2019**
# + id="I3TnTXyr7PQ8" colab_type="code" colab={}
# !mkdir dataset
# !wget https://raw.githubusercontent.com/huseinzol05/Stock-Prediction-Models/master/dataset/TSLA.csv -P dataset
# + id="zI83Cntk6_mR" colab_type="code" colab={}
df = pd.read_csv('../dataset/TSLA.csv')
df.head()
ticker = "TSLA"# display ticker name in plot
# + [markdown] id="X6Xs9mSZ_gXg" colab_type="text"
# ## Strategy, Model, & Agent
# + id="pDyWgvPW6_ml" colab_type="code" colab={}
close = df.Close.values.tolist()
window_size = 30
skip = 5
l = len(close) - 1
# + id="9LVBzfGI6_l-" colab_type="code" colab={}
def get_state(data, t, n):
d = t - n + 1
block = data[d : t + 1] if d >= 0 else -d * [data[0]] + data[0 : t + 1]
res = []
for i in range(n - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
# + id="qLCLaj336_nQ" colab_type="code" colab={}
class Deep_Evolution_Strategy:
inputs = None
def __init__(
self, weights, reward_function, population_size, sigma, learning_rate
):
self.weights = weights
self.reward_function = reward_function
self.population_size = population_size
self.sigma = sigma
self.learning_rate = learning_rate
def _get_weight_from_population(self, weights, population):
weights_population = []
for index, i in enumerate(population):
jittered = self.sigma * i
weights_population.append(weights[index] + jittered)
return weights_population
def get_weights(self):
return self.weights
def train(self, epoch = 100, print_every = 1):
lasttime = time.time()
for i in range(epoch):
population = []
rewards = np.zeros(self.population_size)
for k in range(self.population_size):
x = []
for w in self.weights:
x.append(np.random.randn(*w.shape))
population.append(x)
for k in range(self.population_size):
weights_population = self._get_weight_from_population(
self.weights, population[k]
)
rewards[k] = self.reward_function(weights_population)
rewards = (rewards - np.mean(rewards)) / np.std(rewards)
for index, w in enumerate(self.weights):
A = np.array([p[index] for p in population])
self.weights[index] = (
w
+ self.learning_rate
/ (self.population_size * self.sigma)
* np.dot(A.T, rewards).T
)
if (i + 1) % print_every == 0:
print(
'iter %d. reward: %f'
% (i + 1, self.reward_function(self.weights))
)
print('time taken to train:', time.time() - lasttime, 'seconds')
class Model:
def __init__(self, input_size, layer_size, output_size):
self.weights = [
np.random.randn(input_size, layer_size),
np.random.randn(layer_size, output_size),
np.random.randn(layer_size, 1),
np.random.randn(1, layer_size),
]
def predict(self, inputs):
feed = np.dot(inputs, self.weights[0]) + self.weights[-1]
decision = np.dot(feed, self.weights[1])
buy = np.dot(feed, self.weights[2])
return decision, buy
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
# + id="lBV-Yr3s6_nb" colab_type="code" colab={}
class Agent:
def __init__(
self,
population_size,
sigma,
learning_rate,
model,
money,
max_buy,
max_sell,
skip,
window_size,
):
self.window_size = window_size
self.skip = skip
self.POPULATION_SIZE = population_size
self.SIGMA = sigma
self.LEARNING_RATE = learning_rate
self.model = model
self.initial_money = money
self.max_buy = max_buy
self.max_sell = max_sell
self.es = Deep_Evolution_Strategy(
self.model.get_weights(),
self.get_reward,
self.POPULATION_SIZE,
self.SIGMA,
self.LEARNING_RATE,
)
def act(self, sequence):
decision, buy = self.model.predict(np.array(sequence))
return np.argmax(decision[0]), int(buy[0])
def get_reward(self, weights):
initial_money = self.initial_money
starting_money = initial_money
self.model.weights = weights
state = get_state(close, 0, self.window_size + 1)
inventory = []
quantity = 0
for t in range(0, l, self.skip):
action, buy = self.act(state)
next_state = get_state(close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
elif action == 2 and len(inventory) > 0:
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
quantity -= sell_units
total_sell = sell_units * close[t]
initial_money += total_sell
state = next_state
return ((initial_money - starting_money) / starting_money) * 100
def fit(self, iterations, checkpoint):
self.es.train(iterations, print_every = checkpoint)
def buy(self):
initial_money = self.initial_money
state = get_state(close, 0, self.window_size + 1)
starting_money = initial_money
states_sell = []
states_buy = []
inventory = []
quantity = 0
for t in range(0, l, self.skip):
action, buy = self.act(state)
next_state = get_state(close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
states_buy.append(t)
print(
'day %d: buy %d units at price %f, total balance %f'
% (t, buy_units, total_buy, initial_money)
)
elif action == 2 and len(inventory) > 0:
bought_price = inventory.pop(0)
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
if sell_units < 1:
continue
quantity -= sell_units
total_sell = sell_units * close[t]
initial_money += total_sell
states_sell.append(t)
try:
invest = ((total_sell - bought_price) / bought_price) * 100
except:
invest = 0
print(
'day %d, sell %d units at price %f, investment %f %%, total balance %f,'
% (t, sell_units, total_sell, invest, initial_money)
)
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
print(
'\ntotal gained %f, total investment %f %%'
% (initial_money - starting_money, invest)
)
plt.figure(figsize = (20, 10))
plt.title(ticker + ": Predicted Buy/Sell for " +str(len(close))+ " Days with ROI: " + str(int(invest))+"%",fontsize=24, y=1)
plt.plot(close, label = 'true close', c = 'g')
plt.plot(
close, 'X', label = 'predict buy', markevery = states_buy, c = 'b'
)
plt.plot(
close, 'o', label = 'predict sell', markevery = states_sell, c = 'r'
)
plt.legend()
plt.show()
# + [markdown] id="_d7Gdz_T_wXd" colab_type="text"
# ## Optimzier
# + id="-ZvP59gL6_nu" colab_type="code" colab={}
def best_agent(
window_size, skip, population_size, sigma, learning_rate, size_network
):
model = Model(window_size, size_network, 3)
agent = Agent(
population_size,
sigma,
learning_rate,
model,
10000,
5,
5,
skip,
window_size,
)
try:
agent.fit(100, 1000)
return agent.es.reward_function(agent.es.weights)
except:
return 0
# + id="je6LFcLd6_oF" colab_type="code" colab={}
def find_best_agent(
window_size, skip, population_size, sigma, learning_rate, size_network
):
global accbest
param = {
'window_size': int(np.around(window_size)),
'skip': int(np.around(skip)),
'population_size': int(np.around(population_size)),
'sigma': max(min(sigma, 1), 0.0001),
'learning_rate': max(min(learning_rate, 0.5), 0.000001),
'size_network': int(np.around(size_network)),
}
print('\nSearch parameters %s' % (param))
investment = best_agent(**param)
print('stop after 100 iteration with investment %f' % (investment))
if investment > accbest:
costbest = investment
return investment
# + [markdown] id="rl7M4CTn_pcJ" colab_type="text"
# ## Run optimizer
# + id="Msa9_cMd6_oQ" colab_type="code" colab={}
accbest = 0.0
NN_BAYESIAN = BayesianOptimization(
find_best_agent,
{
'window_size': (2, 50),
'skip': (1, 15),
'population_size': (1, 50),
'sigma': (0.01, 0.99),
'learning_rate': (0.000001, 0.49),
'size_network': (10, 1000),
},
)
NN_BAYESIAN.maximize(init_points = 30, n_iter = 50, acq = 'ei', xi = 0.0)
# + id="7q7_Vrqu6_od" colab_type="code" colab={}
print('Best AGENT accuracy value: %f' % NN_BAYESIAN.res['max']['max_val'])
print('Best AGENT parameters: ', NN_BAYESIAN.res['max']['max_params'])
# + [markdown] id="Mz7zaOir6_or" colab_type="text"
# #### My selected parameters
# + id="ukgYprrX6_ou" colab_type="code" colab={}
best_agent(
window_size = 30,
skip = 1,
population_size = 15,
sigma = 0.1,
learning_rate = 0.03,
size_network = 500
)
# + [markdown] id="loQpyZ766_o8" colab_type="text"
# #### bayesian parameters
# + id="xNgQ3lPi6_pA" colab_type="code" colab={}
best_agent(
window_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['window_size'])),
skip = int(np.around(NN_BAYESIAN.res['max']['max_params']['skip'])),
population_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['population_size'])),
sigma = NN_BAYESIAN.res['max']['max_params']['sigma'],
learning_rate = NN_BAYESIAN.res['max']['max_params']['learning_rate'],
size_network = int(np.around(NN_BAYESIAN.res['max']['max_params']['size_network']))
)
# + [markdown] id="M4xtEYT_6_pY" colab_type="text"
# #### My selected parameters
# + id="vyLTMbeK6_pg" colab_type="code" colab={}
model = Model(input_size = 30,
layer_size = 500,
output_size = 3)
agent = Agent(population_size = 15,
sigma = 0.1,
learning_rate = 0.03,
model = model,
money = 10000,
max_buy = 5,
max_sell = 5,
skip = 1,
window_size = 30)
agent.fit(500, 100)
agent.buy()
# + [markdown] id="lXk6xQzn6_p7" colab_type="text"
# #### bayesian parameters
# + id="qlwR0CPO6_p_" colab_type="code" colab={}
model = Model(input_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['window_size'])),
layer_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['size_network'])),
output_size = 3)
agent = Agent(population_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['population_size'])),
sigma = NN_BAYESIAN.res['max']['max_params']['sigma'],
learning_rate = NN_BAYESIAN.res['max']['max_params']['learning_rate'],
model = model,
money = 10000,
max_buy = 5,
max_sell = 5,
skip = int(np.around(NN_BAYESIAN.res['max']['max_params']['skip'])),
window_size = int(np.around(NN_BAYESIAN.res['max']['max_params']['window_size'])))
agent.fit(500, 100)
agent.buy()
# + id="g8t92K2B6_qL" colab_type="code" colab={}
| free-agent/evolution-strategy-bayesian-agent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
import shutil
# Creates directory, if directory exists removes if remove parameter is set to True
def create_directory(directory_path, remove=False):
if remove and os.path.exists(directory_path):
try:
shutil.rmtree(directory_path)
os.mkdir(directory_path)
except:
print("Could not remove directory : ", directory_path)
return False
else:
try:
os.mkdir(directory_path)
except:
print("Could not create directory: ", directory_path)
return False
return True
# Removes directory, if directory exists
def remove_directory(directory_path):
if os.path.exists(directory_path):
try:
shutil.rmtree(directory_path)
except:
print("Could not remove directory : ", directory_path)
return False
return True
def clear_directory(directory_path):
dirs_files = os.listdir(directory_path)
for item in dirs_files:
# item_path = os.path.join(directory_path, item)
item_path = directory_path+ item
try:
if os.path.isfile(item_path):
os.unlink(item_path)
elif os.path.isdir(item_path):
shutil.rmtree(item_path)
except Exception as e:
print(e)
return True
def remove_empty_folders(path, removeRoot=True):
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
remove_empty_folders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0 and removeRoot:
print("Removing empty folder:", path)
os.rmdir(path)
def dir_file_count(directory):
return sum([len(files) for r, d, files in os.walk(directory)])
| file_directory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # The IS-LM model
# Imports and set magics:
# +
# %matplotlib inline
import numpy as np
from scipy import optimize
import sympy as sp
from sympy import symbols
import matplotlib.pyplot as plt
sp.init_printing()
# autoreload modules when code is run
# %load_ext autoreload
# %autoreload 2
# local modules
import modelproject
# -
# # Model description
# For this project we will analyze the famous IS-LM model, identifying a mutual equilibrium for the goods and the loans markets. The equilibrium is expressed in terms of a combination of the income and the interest rate. We will compute an IS schedule and a LM schedule where each point of the curves represents an equilibrium in the respective market.
#
# In order to derive the Investment-Saving schedule, we use the following 3 equations describing the output market:
# $$Y = C + I + \overline G$$
# where Y is the aggregate output demand, C is private consumption, I is private fixed capital investments, and G is public spending on goods and services. G is considered an exogenous variable, determined by the policy makers and thus represented as $\overline G$.
#
# Private consumption is given by the keynesian consumption function:
# $$C = \alpha + \beta(Y - T)$$
# where $\alpha$ ≥ 0 and $\beta \in (0,1)$ represents the level of consumption independent of income and the marginal propensity to consume, respectively. T represents taxes.
#
# Lastly, the output market is also described by the linear relationship between investment, I, and interest rate, i:
# $$I = \overline I - d * i$$
# where $\overline I$ is the level of I when i = 0, and d is the rate at which an increase in i leads to a fall in I.
# Next, we derive the Liquidity Preference-Money Supply (LM) schedule.
#
# The money supply is exogenous, decided upon by the central bank and thus independent of the interest rate. It is given by:
# $$\frac {M^S} P = \frac {M^S_0} P$$
#
# ---------What is $M^S_0$?-----------
#
#
# The money demand, however, depends on the amount of money needed for precautionary savings, transactions and investments, given by the following equation:
#
# $$\frac {M^D} P = c_1 + c_2*Y - c_3*i$$
#
# where $c_1$, $c_2$, and $c_3$ represents precautionary reasons, transactional reasons, and speculative reasons, respectively.
#
# In both cases, P is the current price level, which is treated as an exogenous variable. In equilibrium, $M^S = M^D$.
#
# + [markdown] tags=[]
# ## Analytical solution
# -
# In this part we will solve the IS-LM relation analytically. The idea is to isolate for the interrest rate for both the IS and LM curve. In equilibrium, the interest rate should be equal in both the output and asset markets. Thus, by setting them equal to each other, we can find the repective level of outcome Y that matches the interest rate.
# ### The IS curve
# We will start by considering the IS relation in a closed economy:
#
# $$Y = C + I + \overline G$$
#
# If we replace the equations given above for C and I, we get the following expression:
#
# $$Y = \alpha + \beta(Y - T) + \overline I - d * i + \overline G$$
# In order to solve the model analytically, we first have to define all the variables as sympy symbols. Once that is done, we can express the above equation using sympy.
# +
# Defining known parameters of IS relation
alpha = sp.symbols('alpha', real=True)
beta = sp.symbols('beta', real=True)
G = sp.symbols('\overline{G}', real=True)
T = sp.symbols('T', real=True)
d = sp.symbols('d', real=True)
i = sp.symbols('i', real=True)
I = sp.symbols('I', real=True)
I_bar = sp.symbols('\overline{I}', real=True)
Y = sp.symbols('Y', real=True)
# Defining output and the IS equation
output = alpha + beta*(Y-T) + I_bar - (d*i) + G
IS = sp.Eq(Y, output)
display(IS)
# -
# Since we know that in equilibrium, the interest rate will be the same in both markets, we want to isolate for i. We can use the sympy solve function to do that:
# +
# Solving for interest rate in the IS curve
sol_IS = sp.solve(IS, i)[0]
display(sp.Eq(i, sol_IS))
# -
# ### The LM curve
# Now we will do the same for the LM curve that is solve for the interest rate. Recall that in equilbrium $M^S = M^D$. Thus, we want to isolate for i in the following equation:
#
# $$ \frac {M^S_0} P = c_1 + c_2*Y - c_3*i$$
# Again, we start by defining the variables as sympy symbols and displaying the above equation using sympy.
# +
#Defning known parameters of LM relation
Money = sp.symbols('M^S_0', real=True)
P = sp.symbols('P', real=True)
c_1 = sp.symbols('c_1', real=True)
c_2 = sp.symbols('c_2', real=True)
c_3 = sp.symbols('c_3', real=True)
i = sp.symbols('i', real=True)
Y = sp.symbols('Y', real=True)
#Showing money demand and the LM equation
M_demand = c_1+c_2*Y-c_3*i
LM = sp.Eq(Money/P, M_demand)
display(LM)
# -
# Now, solving for i we get the following:
# +
# Solving for interest rate in the LM curve
sol_LM = sp.solve(LM, i)[0]
display(sp.Eq(i, sol_LM))
# -
# ### Finding the equilibrium
# Now we have solved for the interest rate in both the IS and LM schedules. Thus, we can set them equal to each other and solve for the equilibrium output, $Y^*$.
# +
# Defining equilibrium parameters
Y_star = sp.symbols('Y^*', real=True)
i_star = sp.symbols('i^*', real=True)
# Setting IS=LM
ISLM = sp.Eq(sol_IS, sol_LM)
display(ISLM)
# -
# To solve for Y, we then use the sympy solve function.
# +
# Solving for Y
ISLM_Y = sp.solve(ISLM, Y)[0]
display(sp.Eq(Y_star, ISLM_Y))
# -
# The equilbrium $Y^*$ can then be plugged in to either of the equations for the equilibrium interest rate that we found above. That way we will obtain the general equilibrium $i^*$ which completes the analytical solution of the IS-LM model.
# +
# Replacing Y with the variable Y*
sol_LM_star1 = sol_LM.subs(Y, Y_star)
display(sp.Eq(i_star, sol_LM_star1))
# -
# Plugging in the above equation we found for $Y^*$ into the equation for $i^*$, we get the equilibrium expression for the interest rate:
# +
# Replacing the variable Y* with the equation for Y*
sol_LM_star2 = sol_LM_star1.subs(Y_star, ISLM_Y)
display(sp.Eq(i_star, sol_LM_star2))
# -
# The pair ($Y^*, i^*$) represents the analytical general equilibrium of the IS-LM model.
# ## Numerical solution
# Now that we have solved the IS-LM model analytically, we will proceed to solving it numerically. We start by looking at the IS schedule.
# ### The IS curve
# The IS curve is defined by the output, consumption and investment. Thus, in order to solve the model numerically, we start by assigning specific numeric values to the other paramters in the IS schedule.
# + tags=[]
# Assigning numerical values to parameters
Y_size = 100
Y = np.arange(Y_size)
alpha = 100
beta = 0.2
T = 1
d = 2
G_bar = 20
I_bar = 10
M_S0 = 23500
c_1 = 2500
c_2 = 0.75
c_3 = 5
P = 10
# -
# In the .py file, we have defined the function for C and for I, as stated in the model description. We vectorize these two variables by storing them inside the vector X1. Since the IS curve has Y on the x-axis and i on the y-axis, we need to define both of those variables. This is also done in the .py file, and this is where the vectorization of C and I comes in handy: by vectorizing, we are able to run both variables inside the function for Y_IS and i_IS.
def IS(T, beta, Y, G_bar, I_bar, alpha, d):
return (-T*beta + Y*beta - Y + G_bar + I_bar + alpha) / d
# Once this is done, we can plot the IS schedule in a graph to illustrate what it looks like.
# +
iIS = IS(T, beta, Y, G_bar, I_bar, alpha, d)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(Y, iIS)
ax.set_xlabel('Output (Y)')
ax.set_ylabel('Interest rate (i)')
ax.set_title('IS schedule')
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
plt.show()
# + tags=[]
def LM(M_S0, P, Y, c_1, c_2, c_3):
return (- M_S0 + P*(Y*c_2 + c_1))/(P*c_3)
# +
iLM = LM(M_S0, P, Y, c_1, c_2, c_3)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(Y, iLM)
ax.set_xlabel('Output (Y)')
ax.set_ylabel('Interest rate (i)')
ax.set_title('LM schedule')
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
plt.show()
# + tags=[]
def obj(Y):
return IS(T, beta, Y, G_bar, I_bar, alpha, d) - LM(M_S0, P, Y, c_1, c_2, c_3)
Y_guess = 10
res = optimize.root(obj, Y_guess, method = 'broyden1')
res.x
#lecture 11 does numerical optimization, avsnitt 7.1
# -
iLM = LM(M_S0, P, c_1, c_2, c_3, Y=res.x)
# +
import ipywidgets as widgets
def plot_islm( )
# instructions on how to make interactive plot lecture 7, avsnitt 3.2.1
#want to be able to change tax rate T
# + [markdown] tags=[]
# # Further analysis
# -
# Make detailed vizualizations of how your model changes with parameter values.
#
# Try to make an extension of the model.
# The extension of this model will feature net export, which means we will be modelling an open economy from now on.
# # Conclusion
# Add concise conclusion.
| modelproject/modelproject-copy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <h1>Functions</h1>
# <h2>Calling a function</h2>
x=5
y=7
z=max(x,y) #max is the function. x and y are the arguments
print(z) #print is the function. z is the argument
# <h2>Installing libraries and importing functions</h2>
# !pip install easygui
#pip: python installer program
# # ! run the program from the shell (not from python)
# easygui: a python library for GUI widgets
import easygui #Imports easygui into the current namespace. We now have access to functiona and objects in this library
easygui.msgbox("To be or not to be","What Hamlet elocuted") #msgbox is a function in easygui.
# <h2>Importing functions</h2>
#
import math #imports the math namespace into our program namespace
math.sqrt(34.23) #Functions in the math namespace have to be disambiguated
import math as m #imports the math namespace into our program namespace but gives it the name 'm'
m.sqrt(34.23) #Functions in the math namespace have to be disambiguated using the name 'm' rather than 'math'
from math import sqrt #imports the sqrt function into our program namespace. No other math functions are accessible
sqrt(34.23) #No disambiguation necessary
# <h3>Returning values from a function</h3>
# The <b>return</b> statement tells a function what to return to the calling program
def spam(x,y,k):
if x>y:
z=x
else:
z=y
p = z/k
return p #Only the value of p is returned by the function
spam(6,4,2)
# <h3>If no return statement, python returns None </h3>
# +
def eggs(x,y):
z = x/y
print(eggs(4,2))
# -
# <h3>Returning multiple values</h3>
# +
def foo(x,y,z):
if z=="DESCENDING":
return max(x,y),min(x,y),z
if z=="ASCENDING":
return min(x,y),max(x,y),z
else:
return x,y,z
# -
a,b,c = foo(4,2,"ASCENDING")
print(a,b,c)
# <h4>Python unpacks the returned value into each of a,b, and c. If there is only one identifier on the LHS, it won't unpack</h4>
a = foo(4,2,"ASCENDING")
print(a)
# <h4>If there is a mismatch between the number of identifiers on the LHS and the number of values returned, you'll get an error</h4>
a,b = foo(4,2,"DESCENDING")
# <h2>Value assignment to arguments</h2>
# <li>Left to right
# <li>Unless explicitly assigned to the argument identifiers in the function definition
def bar(x,y):
return x/y
bar(4,2) #x takes the value 4 and y takes the value 2
def bar(x,y):
return x/y
bar(y=4,x=2) #x takes the value 2 and y takes the value 4 (Explicit assignment)
# <h2>A function can have function arguments</h2>
#
# +
def order_by(a,b,order_function):
return order_function(a,b)
print(order_by(4,2,min))
print(order_by(4,2,max))
# -
| _intro/Functions_part_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Q.1
# #### (a) Draw n = 300 real numbers uniformly at random on [0, 1], call them x1, . . . , xn.
# #### (b) Draw n real numbers uniformly at random on [− 1 , 1 ], call them ν1,...,νn.
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
np.random.seed(42)
n=300
X = np.random.randint(0,2,size = n).reshape((n,1))
V = np.random.uniform(-.1,.1,size = n).reshape((n,1))
# -
# #### (c) Let di = sin(20xi) + 3xi + νi, i = 1, . . . , n. Plot the points (xi, di), i = 1, . . . , n.
D = np.sin(20*X) + 3*X + MU
plt.plot(X,D,'b-o')
plt.show()
# We will consider a 1 × N × 1 neural network with one input, N = 24 hidden neurons, and 1 output neuron. The network will thus have 3N + 1 weights including biases. Let w denote the vector of all these 3N + 1 weights. The output neuron will use the activation function φ(v) = v; all other neurons will use the activation function φ(v) = tanhv. Given input x, we use the notation f(x,w) to represent the network output.
# +
# define a function to calculate the MSE:
def msefunction (w):
mse =np.sum()/n
return mse
# Number of hidden layer's neurons
N = 24
W = np.random.uniform(-1,1,size = (3*N+1)).reshape((3*N+1,1))
eta = 1e-2
epsilon = 5e-4
mse = list()
j=0
# -
# Given n, N, and $\epsilon$.
#
# Initialize $\eta \in \mathbb{R}, W \in \mathbb{R}^{(3N+1) \times 1}$.
#
# Initialize epoch = 0.
#
# Initialize mse(epoch) = 0 for epoch = 0, 1, ... .
#
# Do
# for $i=1 to n$ do (this loop is where we !!!)
#
#
| Hwk5&6/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Iocane Powder
#
# ## Overview
#
# > Man in Black: All right. Where is the poison? The battle of wits has begun. It ends when you decide and we both drink, and find out who is right... and who is dead.
#
# The line above is from the perennial favorite 1980s movie adaptation of <NAME>man's *The Princess Bride*, wherein a mysterious hero sits down to a battle of wits with a villainous Sicilian kidnapper. The setup: two cups positioned between the two, one of which (purportedly) contains a colorless, odorless, lethal poison (viz., iocane powder). After a guess is made as to which cup contains the poison, both drink, and the winner is the one left standing.
#
# For this machine problem you will write a program that simulates multiple rounds of this battle of wits, allowing the player to repeatedly guess which cup is poisoned. The computer will "place" the poison before the player guesses, and
# will reveal who is right... and who is dead, afterwards.
#
# At the outset, the computer will always place the poison in cup 2 before letting the player guess, but after enough guesses have been entered the computer will start to place the poison based on the pattern of previous guesses so as to outsmart the player.
#
# Here's a sample game session (note how the silly player keeps alternating guesses, and that the computer catches on to this fact after a while):
#
# Where is the iocane powder: my cup (1) or yours (2)? 1
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 1 times, and I drank the poison 0 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 2
# Good guess! Ack! I drank the poison!
#
# You died 1 times, and I drank the poison 1 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 1
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 2 times, and I drank the poison 1 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 2
# Good guess! Ack! I drank the poison!
#
# You died 2 times, and I drank the poison 2 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 1
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 3 times, and I drank the poison 2 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 2
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 4 times, and I drank the poison 2 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 1
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 5 times, and I drank the poison 2 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 2
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 6 times, and I drank the poison 2 times
#
# Where is the iocane powder: my cup (1) or yours (2)? 1
# Wrong! Ha! Never bet against a Sicilian!
#
# You died 7 times, and I drank the poison 2 times
#
#
# ## Implementation
#
# To keep track of the pattern of previous guesses, you will use a dictionary that maps a pattern (of fixed length) to a list of counts for the subsequent guess.
#
# For instance, imagine that the computer observes the player continuing to alternate guesses across ten separate attempts, like so: '1', '2', '1', '2', '1', '2', '1', '2', '1', '2'. If we are using a pattern detection length of three, then after the fourth guess we can create an entry in our dictionary that maps the key '121' to the list [0, 1], where the second value (1) in the list indicates that the player guessed '2' following the sequence '1', '2', '1'. After the fifth guess, we create the entry '212' → [1, 0], and after the sixth guess we update the value for '121' to [0, 2] (since the user guesses '2' again, after the sequence '1', '2', '1').
#
# Once the player enters a series of guesses that matches a previously seen pattern, the computer should place the poison in the cup that the player is *least likely to guess next*. When the player enters the next guess, the dictionary should be updated to reflect the actual guess.
#
# This means that if the computer has yet to see a given pattern of guesses, or when the counts are tied, it will have to place the poison "blindly" --- your implementation should simply place the poison furthest away from itself (cup 2).
#
# ### `record_guess`
#
# The first function you are to complete is `record_guess`. It will take the following arguments:
#
# - a dictionary to update (possibly containing previously recorded pattern → list mappings)
# - a pattern string
# - a guess -- which is either '1' or '2'.
#
# If necessary, the function will create a new entry for the pattern (if one doesn't already exist), then record the updated count for the guess. Since the dictionary is updated in place (i.e., mutated), the function will not return anything.
#
# Complete the function below, checking your implementation with the test cases that follow when you're ready. Note that in the future, the bulk of the description for functions we ask you to implement will simply be placed in the functions' docstrings, as below.
#
# *Hints: the [`int`](https://docs.python.org/3/library/functions.html#int) function can be used to convert strings to integers, and you might find the dictionary's [`setdefault`](https://docs.python.org/3/library/stdtypes.html?highlight=setdefault#dict.setdefault) method useful.*
# +
from keras.utils import to_categorical
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
data_train = pd.read_csv(r"input\fashion-mnist_train.csv")
data_test = pd.read_csv(r"input\fashion-mnist_test.csv")
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
X = np.array(data_train.iloc[:, 1:])
#y = to_categorical(np.array(data_train.iloc[:, 0]))
y = np.array(data_train.iloc[:, 0])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.33)
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
# -
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_train.shape
# + deletable=false nbgrader={"checksum": "c33c5a4d938bd527487d20fc5715775a", "grade": false, "grade_id": "record_guess", "locked": false, "schema_version": 1, "solution": true}
from numpy import array
def record_guess(pattern_dict, pattern, guess):
"""Updates the `pattern_dict` dictionary by either creating a new entry
or updating an existing entry for key `pattern`, increasing the count
corresponding to `guess` in the list."""
score = pattern_dict.setdefault(pattern, [0,0])
score[int(guess) - 1] += 1
return pattern_dict
# + deletable=false editable=false nbgrader={"checksum": "faad914be24ffa38575ec1639a046b1c", "grade": true, "grade_id": "test_record_guess", "locked": true, "points": 5, "schema_version": 1, "solution": false}
# (5 points)
from unittest import TestCase
tc = TestCase()
d = {}
record_guess(d, '121', '1')
tc.assertDictEqual(d, {'121': [1, 0]})
record_guess(d, '222', '2')
record_guess(d, '121', '1')
tc.assertDictEqual(d, {'121': [2, 0], '222': [0, 1]})
record_guess(d, '122', '2')
record_guess(d, '121', '2')
record_guess(d, '222', '2')
tc.assertDictEqual(d, {'121': [2, 1], '122': [0, 1], '222': [0, 2]})
# -
# ### `next_placement`
#
# The next function you'll write will take a dictionary of pattern → counts mappings and a string representing the pattern of most recent guesses, and return the next best location (either '1' or '2') for the poison (i.e., to try and outwit the player). If the pattern hasn't been seen previously or the counts are tied, the function should return '2'.
# + deletable=false nbgrader={"checksum": "3c8aef1ed4490aa2487995b54eeed79a", "grade": false, "grade_id": "next_placement", "locked": false, "schema_version": 1, "solution": true}
def next_placement(pattern_dict, pattern):
try:
hist = pattern_dict[pattern]
except: return '2'
return '1' if hist[0] < hist[1] else '2'
# + deletable=false editable=false nbgrader={"checksum": "8200c6cd8dece561de8217774bd0347b", "grade": true, "grade_id": "test_next_placement", "locked": true, "points": 5, "schema_version": 1, "solution": false}
# (5 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual(next_placement({}, '121'), '2')
tc.assertEqual(next_placement({'121': [2, 0]}, '121'), '2')
tc.assertEqual(next_placement({'121': [2, 5]}, '121'), '1')
tc.assertEqual(next_placement({'121': [2, 5]}, '212'), '2')
tc.assertEqual(next_placement({'121': [5, 5]}, '121'), '2')
tc.assertEqual(next_placement({'121': [15, 5]}, '121'), '2')
tc.assertEqual(next_placement({'121': [2, 5],
'212': [1, 1]}, '212'), '2')
tc.assertEqual(next_placement({'121': [2, 5],
'212': [1, 3]}, '212'), '1')
# -
# ### `play_interactive`
#
# Now for the fun bit. The function `play_interactive` will take just one argument --- the length of patterns to use as keys in the dictionary --- and will start an interactive game session, reading either '1' or '2' from the player as guesses, using the functions you wrote above and producing output as shown in the sample game session at the beginning of this writeup. If the player types in any other input (besides '1' or '2'), the game should terminate.
#
# *Hint: the [`input`](https://docs.python.org/3/library/functions.html#input) function can be used to read input from the user as a string.*
# + deletable=false nbgrader={"checksum": "9121e502052a36232a304dc9f34892b4", "grade": true, "grade_id": "play_interactive", "locked": false, "points": 0, "schema_version": 1, "solution": true}
def play_interactive(pattern_length=4):
pattern = '' #nereast pattern_length guesses
hist = [] #history of guesses
pattern_dict = {} #pattern with history of guesses
win = 0
loss = 0
while True:
#placing poison
placement = next_placement(pattern_dict, pattern)
guess = input("Where is the iocane powder: my cup (1) or yours (2)? ")
# if guess != '1' or '2':
# break;
#taking record
hist.append(guess)
if placement == guess:
print("Good guess! Ack! I drank the poison!")
win += 1
else:
print("Wrong! Ha! Never bet against a Sicilian!")
loss += 1
print("You died {} times, and I drank the poison {} times".format(loss, win))
#guessing pattern
if len(hist) == pattern_length:
record_guess(pattern_dict, pattern, guess)
pattern = ''.join(hist)
hist.pop(0)
# -
# ### `play_batch`
#
# Finally, so that we can check your implementation against a lengthier sequence of guesses without having to play an interactive session, implement the `play_batch` function, which will take the `pattern_length` argument as your `play_interactive` function did, but will also take a sequence of guesses. The function will return the total numbers of wins and losses, as determined by the same algorithm as before.
# + deletable=false nbgrader={"checksum": "f492f4d864337bfd3fea87a03170beff", "grade": false, "grade_id": "play_batch", "locked": false, "schema_version": 1, "solution": true}
def play_batch(guesses, pattern_length=4):
pattern = '' #nereast pattern_length guesses
hist = [] #history of guesses
pattern_dict = {} #pattern with history of guesses
win = 0
loss = 0
for guess in guesses:
#placing poison
placement = next_placement(pattern_dict, pattern)
#taking record
hist.append(guess)
if placement == guess:
win += 1
else:
loss += 1
#guessing pattern
if len(hist) == pattern_length:
record_guess(pattern_dict, pattern, guess)
pattern = ''.join(hist)
hist.pop(0)
return (win, loss)
# + deletable=false editable=false nbgrader={"checksum": "deb2e6a906f75d1473cf49f3409ad421", "grade": true, "grade_id": "test_play_batch", "locked": true, "points": 10, "schema_version": 1, "solution": false}
# (10 points)
from unittest import TestCase
tc = TestCase()
tc.assertEqual(play_batch(['1', '1', '1', '1', '1', '1'], 3), (0, 6))
tc.assertEqual(play_batch(['1', '2', '1', '2', '1', '2'], 3), (2, 4))
tc.assertEqual(play_batch(['1', '2', '1', '2', '1', '2'], 4), (3, 3))
tc.assertEqual(play_batch(['1', '2'] * 100, 5), (3, 197))
tc.assertEqual(play_batch(['1', '1', '2', '1', '2', '1'] * 100, 2), (398, 202))
tc.assertEqual(play_batch(['1', '1', '2', '1', '2', '1'] * 100, 3), (201, 399))
tc.assertEqual(play_batch(['1', '1', '2', '1', '2', '1'] * 100, 5), (4, 596))
import random
random.seed(0, version=2)
tc.assertEqual(play_batch((random.choice(['1', '2']) for _ in range(10000)), 4), (5047, 4953))
| CS331/Py/Iocane.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# # Lista 1 - MS211
# ## <NAME>
# ## Ex 1:
# ### Considere um sistema de ponto flutuante com base 10, 4 dígitos para mantissa e 2 para o expoente.
# ### (a) Qual o maior número (estritamente) positivo representável?
# A representação de fonto flutuante consiste na utilização de uma mantissa com os n termos mais significativos do número e um expoente para definir o grau de grandeza além do sinal do número.
#
# Sabendo disso, como o podemos representar apenas 4 dígitos na matissa e o maior expoente é 2, o maior número representado nesse sistema é 0,9999 * 10^2 = 99,99.
# ### (b) E qual é o menor?
# O menor número representado por esse sistema é 0,1000 * 10^(-2) = 0,001000
# ### (c) Qual o épsilon dessa máquina?
# O épsilon da máquina é o maior número que somado a 1 ainda é interpretado como 1. No sistema proposto o menor número representado é 0,001 , logo, se somarmos qualquer número entre 0 e 0,0005 o sistema interpretará como uma adição nula. Por isso épsilon da máquina é 0,0005.
# ### (d) Dado x ∈ R dentro da faixa dada pelos ítens (a) e (b). Seja x̄ sua respresentação no sistema de ponto flutuante. Quais os máximos erro absoluto e erro relativo que podem ocorrer ao se tentar representar x por x̄?
# Considerando os números entre 0 e 99,99 e sabendo que a fórmula do erro absoluto é dada por $|x-\bar{x}|$ e a formula do erro relativo é dado por $\frac{|x-\bar{x}|}{x}$ temos que o maior erro absoluto é:
#
# $$|99,984999999999... - 99,99| = 0,005$$
#
# e o maior erro relativo é:
#
# $$\frac{|0,0010004999... - 0,001000|}{0,0010004999...} = 0,0005$$
using Plots
using LaTeXStrings
pyplot()
# +
# erro reltivo
function expr(x_simples, x)
return abs.(x - x_simples)/x
end
# erro absoluto
function expr1(x_simples, x)
return abs.(x - x_simples)
end
x_6 = LinRange{Float64}(0.001, 0.01, 200)
x_simples_6 = round.(x_6, digits = 6)
x_5 = LinRange{Float64}(0.01, 0.1, 100)
x_simples_5 = round.(x_5, digits = 5)
x_4 = LinRange{Float64}(0.1, 1, 100)
x_simples_4 = round.(x_4, digits = 4)
x_3 = LinRange{Float64}(1, 10, 100)
x_simples_3 = round.(x_3, digits = 3)
x_2 = LinRange{Float64}(10, 100, 200)
x_simples_2 = round.(x_2, digits = 2)
x_simples = vcat(x_simples_6,x_simples_5,x_simples_4,x_simples_3,x_simples_2)
x = vcat(x_6,x_5,x_4,x_3,x_2)
maior = 0
x1 = 0
x2 = 0
for i in 1:700
valor = expr(x_simples[i],x[i])
if valor > maior
maior = valor
x1 = x_simples[i]
x2 = x[i]
end
end
println(maior)
println(x1)
println(x2)
# -
# erro absoluto
plot(x_simples, expr1.(x_simples, x))
# erro relativo
plot(x_simples, expr.(x_simples, x))
# ## Ex 2:
# ### Para cada uma das expressões abaixo diga em que situação pode ocorrer erro de cancelamento e reescreva a expressão para evitar esse problema nessa situação:
# ### (a) $\sqrt{1 + x} − 1$
# Essa função apresenta erro de cancelamento quando x -> 0 por se tratar de uma subrtação de números muito próximos.
#
# Esse erro pode ser evitado com a seguinte refatoração
#
# $$\sqrt{1+x}-1 => (\sqrt{1+x}-1)(\sqrt{1+x}+1) = 1+x-1 = x => \sqrt{1+x}-1 = \frac{x}{\sqrt{1+x}+1}$$
# Erro relativo
function erro_rel(x, xh)
return abs(x - xh) / abs(xh)
end
# +
function original(x)
return sqrt(1+x)-1
end
function arrumada(x)
return x/(sqrt(1+x)+1)
end
x = LinRange(10^(-10), 200, 100)
dupla = original.(x)
x_simples = map(Float32, x)
simples = original.(x_simples)
arrumado = arrumada.(x_simples)
erro1 = log10.(erro_rel.(simples, dupla))
erro2 = log10.(erro_rel.(arrumado, dupla))
plot(x, [-erro1,-erro2], xaxis=:log10,
title=L"Digitos corretos em funcao de $x$",
ylabel="Digitos corretos", xlabel=L"$x$", label = ["simples" "arrumada"])
# -
# ### (b) log x − log y
# Essa função apresenta erro de cancelamento quando x -> y pois se trata da subtração de dois números muito próximos.
#
#
# Esse erro pode ser evitado com a seguinte refatoração
#
# $$log(x) - log(y) = log(\frac{x}{y})$$
# +
function original(x)
return log(x)-log(100)
end
function arrumada(x)
return log(x/100)
end
x = LinRange(50, 150, 100)
dupla = original.(x)
x_simples = map(Float32, x)
simples = original.(x_simples)
arrumado = arrumada.(x_simples)
erro1 = log10.(erro_rel.(simples, dupla))
erro2 = log10.(erro_rel.(arrumado, dupla))
plot(x, [-erro1,-erro2],
title=L"Digitos corretos em funcao de $x$",
ylabel="Digitos corretos", xlabel=L"$x$", label = ["simples" "arrumada"])
# -
# ### (c) (1 − cos x)/ sin x
# Essa função apresenta erro de cancelamento quando x -> 0 ou x -> $2\cdot \pi \cdot k$ por se tratar de uma subrtação de números muito próximos.
#
# Esse erro pode ser evitado com a seguinte refatoração
#
# $$\frac{(1-cos(x))}{(sin(x))} = \sqrt{\frac{(1-cos(x))^2}{(sin(x))^2}} = \sqrt{\frac{(1-cos(x))^2}{1-cos^2(x)}} = \sqrt{\frac{1-cos(x)}{1+cos(x)}} = \sqrt{\frac{\frac{1-cos(x)}{2}}{\frac{1+cos(x)}{2}}} = \frac{sin(x/2)}{cos(x/2)} = tan(x/2)$$
# +
function original(x)
return (1-cos(x))/(sin(x))
end
function arrumada(x)
return tan(x/2)
end
x = LinRange(1, 10, 100)
dupla = original.(x)
x_simples = map(Float32, x)
simples = original.(x_simples)
arrumado = arrumada.(x_simples)
erro1 = log10.(erro_rel.(simples, dupla))
erro2 = log10.(erro_rel.(arrumado, dupla))
plot(x, [-erro1,-erro2],
title=L"Digitos corretos em funcao de $x$",
ylabel="Digitos corretos", xlabel=L"$x$", label = ["simples" "arrumada"])
# -
# ## Ex 3
# ### Dadas $f(x) = x(\sqrt{x+1} - \sqrt{x})$ e $g(x) = \frac{x}{\sqrt{x+1}+\sqrt{x}}$
#
# ### calcule f (500) e g(500) em um sistema de ponto flutuante com 6 dígitos significativos (na mantissa) e compare a qualidade dos resultados. Obs: você pode usar uma calculadora para ajudar nas contas, é claro, mas lembre de representar o resultado de cada operação com 6 dígitos antes de continuar para o próximo passo.
# Função f em ponto flutuante:
#
# $f(500) = 500(\sqrt{500+1}-\sqrt{500}) = 500(22,3830 - 22,3606) = 500 (0,0224) = 11,2$
#
# Função g em ponto flutuante:
#
# $g(500) = \frac{500}{\sqrt{500+1}+\sqrt{500}} = \frac{500}{22,3830+22,3606} = \frac{500}{44,7436} = 11.1748$
#
# Resultado esperado: 11.174755300747198473819744625...
#
# Podemos perceber que a função g(x) consegue 3 casas decimais a mais de precisão do que a função f(x) quando computadas em ponto flutuante.
# ## Ex 4
# ### Apresente a fórmula geral do polinômio de Taylor de grau n de cada uma das expressões abaixo em torno do x 0 :
# Formula da série de Taylor:
# $$ \sum_{n=0}^{\infty} \frac{f^{(n)}(x_0)}{n!}(x-x_0)^n$$
# ### (a) $f(x) = \frac{1}{(1-x)}$, $x_0 = 0$
# Calculando os 5 primeiros termos:
#
# n = 0 -> 1, n = 1 -> x, n = 2 -> x^2, n = 3 -> x^3, n = 4 -> x^4
#
# com isso percebemos que:
# $$\frac{1}{1-x} = \sum_{n=0}^{\infty} x^n$$
# ### (b) $f(x) = sin(x)$, $x_0 = 0$
# Calculando os 4 primeiros termos:
#
# n = 0 -> $x = \frac{x^{1+2*0}}{(1+2*0)!}$
#
# n = 1 -> $-\frac{x^3}{6} = -\frac{x^{1+2*1}}{(1+2*1)!}$
#
# n = 2 -> $\frac{x^5}{120} = \frac{x^{1+2*2}}{(1+2*2)!}$
#
# n = 3 -> $-\frac{x^7}{5040} = -\frac{x^{1+2*3}}{(1+2*3)!}$
#
# Com isso percebemos que:
#
# $$sin(x) = \sum_{n=0}^{\infty} \frac{(-1)^n \cdot x^{1+2n}}{(1+2n)!}$$
# ### (c) $f(x) = \sqrt{x}$, $x_0 = 1$
# Calculando os 5 primeiros termos:
#
# n = 0 -> $1 = \frac{(x-1)^0}{0!}$
#
# n = 1 -> $(x-1) = \frac{(x-1)^1}{1!}$
#
# n = 2 -> $\frac{(x-1)^2}{2} = \frac{(x-1)^2}{2!}$
#
# n = 3 -> $\frac{(x-1)^3}{6} = \frac{(x-1)^3}{3!}$
#
# n = 4 -> $\frac{(x-1)^4}{24} = \frac{(x-1)^4}{4!}$
#
# Com isso percebemos que:
#
# $$\sqrt{x} = \sum_{n=0}^{\infty} \frac{(x-1)^n}{n!}$$
# ### (d) $f(x) = e^x$, $x_0 = 1$
# Calculando os 6 primeiros termos:
#
# n = 0 -> $e = \frac{e(x-1)^0}{0!}$
#
# n = 1 -> $e (x - 1) = \frac{e(x-1)^1}{1!}$
#
# n = 2 -> $1/2 e (x - 1)^2 = \frac{e(x-1)^2}{2!}$
#
# n = 3 -> $1/6 e (x - 1)^3 = \frac{e(x-1)^3}{3!}$
#
# n = 4 -> $1/24 e (x - 1)^4 = \frac{e(x-1)^4}{4!}$
#
# n = 5 -> $1/120 e (x - 1)^5 = \frac{e(x-1)^5}{5!}$
#
# Com isso percebemos que:
#
# $$e^{x} = \sum_{n=0}^{\infty} \frac{e(x-1)^n}{n!}$$
# ## Ex 5
# ### Mostre que matematicamente $f''(x) ≈ \frac{f (x + h) − 2f (x) + f (x − h)}{h^2}$ em que h é número pequeno em relação a x. O erro (matemático) que é feito nessa aproximação é proporcional a h ou a h^2 ?
# Para a primeira derivada podemos partir da fórmula:
# $$f'(x) = lim_{x->0} \frac{f(x)-f(x-h)}{h} = lim_{x->0} \frac{f(x+h)-f(h)}{h}$$
#
# aplicando o polinômio de taylor em $f(x-h)$ e $f(x+h)$ bteos as seguintes expressões:
# $$f(x+h) = f(x) + h \cdot f'(x) + \frac{h^2 \cdot f''(x)}{2} + \frac{h^3 \cdot f'''(z)}{6} + + \frac{h^4 \cdot f^{(4)}(z)}{24}$$
#
# $$f(x-h) = f(x) - h \cdot f'(x) + \frac{h^2 \cdot f''(x)}{2} - \frac{h^3 \cdot f'''(z)}{6} + + \frac{h^4 \cdot f^{(4)}(z)}{24}$$
#
# Agora somando esses dois polinômios conseguimos obter:
#
# $$f(x+h) + f(x-h) =$$
# $$ f(x) + h \cdot f'(x) + \frac{h^2 \cdot f''(x)}{2} + \frac{h^3 \cdot f'''(z)}{6} + \frac{h^4 \cdot f^{(4)}(z)}{24}+ $$
# $$ f(x) - h \cdot f'(x) + \frac{h^2 \cdot f''(x)}{2} - \frac{h^3 \cdot f'''(z)}{6} + \frac{h^4 \cdot f^{(4)}(z)}{24}$$
# $$f(x+h) + f(x-h) = 2(x) + h^2 \cdot f''(x) + \frac{h^4 \cdot f^{(4)}(z)}{12}$$
#
# isolado $f''(x)$ temos:
#
# $$f''(x) = \frac{f(x+h) - 2(x) + f(x-h)}{h^2} - \frac{h^2 \cdot f^{(4)}(z)}{12}$$
#
# com isso percebemos que o erro feito ($\frac{h^2 \cdot f^{(4)}(z)}{12}$) é proporcionla a $h^2$
# ## Ex 6
# ### Seguindo os passos da análise feita em sala de aula, determine qual é o valor ótimo do h a ser usado para calular a derivada de f (x) = ln(x) por diferenças centradas para pontos no intervalor [24, 26]. Lembre que a fórmula depende dos valores possíveis do máximos de f'' e f' que são facilmente calculáveis nesse caso.Verifique se o valor que você calculou é de fato bom fazendo o gráfico do erro relativo com respeito a derivada exata para h = 10 −1 , 10 −2 , . . . , 10 −14 em x = 25.
# $$f(x + h) = f(x) + f'(x)h + \frac{f''(z)}{2}h^2$$
# $ z = (x,x+h) \in [24,26]$ onde $26 = x+H$ e $24 = x-H$
#
# para encontrar a melhor estimativa de h podemos usar a expressão:
# $$|h| = 2\sqrt{\frac{L_f}{L_{f''}}}\sqrt{E_{mac}}$$
#
# sendo que $L_{f''}$ é o módulo máximo de $f''$ em [x-H,x+H]
#
# e $L_f$ é o limite superior para os valores de f no intervalo de interesse
#
# como a função estudada ($ln(x)$) apresenta valores sempre crescentes no eixo positivo, seu limite superior no intervalo é dado por $ln(26) = 3,258$
#
# e como a derivada segunda de $ln(x)$ é $-\frac{1}{x^2}$ temos que seu módulo máximo para o intervalo é $|-\frac{1}{24^2}| = 0,0017$
#
# Sendo $E_{mac}$ dado pela célula de código abaixo, temos que o h ideal é:
#
# $$|h| = 2\sqrt{\frac{3,258}{0,0017}}\sqrt{2,220446049250313 \cdot 10^(-16)} = 1.2910507797... × 10^-6$$
eps(1.0)
# +
ponto = 25 # ponto estudado
# aproximação da derivada
function dif(f, x, h = 1.29105078 * 10^(-6))
return (f(x + h) - f(x)) / h
end
# Função para teste e sua derivada.
f(x) = log(x)
df(x) = 1/x
function erro_rel(aprox, exato)
return abs(aprox - exato) / abs(exato)
end
# Teste da aproximação
println("Valor exato da derivada: ", df(ponto))
println("Valor da aproximação: ", dif(f, ponto))
println("Erro relativo na aproximação: ", erro_rel(dif(f, ponto), df(ponto)))
# -
# A ordem de grandeza do erro é de 8 negativo, ou seja, temos uma precisão de 8 casas. Comparando com o gráfico abaixo podemos ver que está é uma boa precisão, mas temos valores de h melhores
expoentes = LinRange(-1, -14, 100)
h = 10.0.^expoentes
aproxs = dif.(f, ponto, h)
plot(h, -log10.(erro_rel.(aproxs, df(ponto))),
xaxis=:log10, label="", marker=:c,
xlabel=L"h", ylabel="Casas corretas")
title!(L"Número de casas corretas em função de $h$")
# Para tentar melhorar o resultado, utilizarei a furmula da diferença centrada:
# $$f'(x) \approx \frac{f(x + h) - f(x - h)}{2h}.$$
#
# onde o erro é limitado por $\frac{L_{f'''}}{6} h^2$. Ou seja, é proporcional a $h^2$ ao invés de $h$ e podemos estimar o h ideal pela fórmula: $$h = (3\epsilon_{mac})^{1/3} = 6,055454452... * 10^{-6}$$
#
# com isso, como mostrado abaixo, consegui uma precisão de 10 casas.
eps(1.0)^(1/3)
# +
function dif_centrada(f, x, h=eps(1.0)^(1/3))
return (f(x + h) - f(x - h)) / (2*h)
end
# Teste da aproximação
println("Valor exato da derivada: ", df(ponto))
println("Valor da aproximação: ", dif_centrada(f, ponto))
println("Erro relativo na aproximação: ", erro_rel(dif_centrada(f, ponto), df(ponto)))
# -
# Apenas de do excelente resultado, ao aumentar o número do pontos no gráfico acima percebi que aparentemente á um valor de h ainda melhor, gerando 12 casas de precisão, mas apenas consegui encontralo por força bruta.
# +
expoentes = LinRange(-1, -14, 100000)
casas = -log10.(erro_rel.(aproxs, df(ponto)))
melhor = findmax(casas)
println(melhor)
plot(h, casas,
xaxis=:log10, label="", marker=:c,
xlabel=L"h", ylabel="Casas corretas")
title!(L"Número de casas corretas em função de $h$")
# +
top_exp = expoentes[melhor[2]]
# Teste da aproximação
println("Melhor expoente: ", top_exp)
println("Valor exato da derivada: ", df(ponto))
println("Valor da aproximação: ", dif_centrada(f, ponto, 10.0^top_exp))
println("Erro relativo na aproximação: ", erro_rel(dif_centrada(f, ponto, 10.0^top_exp), df(ponto)))
| lista01-nota700/lista01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
# To support both python2 and python 3
from __future__ import division,print_function,unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "classification"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# +
# MNIST
| 03_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import gym
import numpy as np
from matplotlib import pyplot as plt
# -
env = gym.envs.make("MountainCar-v0")
# +
env.reset()
plt.figure()
plt.imshow(env.render(mode='rgb_array'))
[env.step(0) for x in range(10000)]
plt.figure()
plt.imshow(env.render(mode='rgb_array'))
env.close()
| FA/MountainCar Playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Team Convolution Workbook: AlexNet Submission
#
# We have created a general structure for our notebooks. This structure is followed below and changes can be made to it, however, it provides the user with an easy starting point for testing different types of network architectures, data augmentation techniques, and hyperparameters. As is explained in the README.md, we have attempted to make our project as modular as possible by encapuslating any "significant" amount of code in .py files. This has allowed for the members of the team who are investigating the best network architectures and hyperparameters to quickly make use of any new functionality that is implemented and allows the notebook to be kept clean and simple.
#
# The pretrained AlexNet achieved Team Convolution's best results on the public leaderboards with an Area Under Receiver Operating Charactersitic Curve (AUC - ROC) score at 0.99256. However, it alexnet fell behind resnet when it came to looking at the larger private dataset. The final (AUC-ROC) score on the public leaderboard was 0.98146. This is a fairly significant difference. From insights provided by <NAME> about the nature of the public scoring it seems that our AlexNet was overfitting for the prediction of COVID-19 patients.
#
# The choice made to use AlexNet was largely based on its wide success as a CNN in classification of medical imaging.
# + colab={"base_uri": "https://localhost:8080/"} id="NT9tSCBhnwcA" outputId="2f5c8ac0-9981-426a-c116-6d7e258b9ea9" tags=[]
# !pip install pycm livelossplot
# !pip install wandb
# %pylab inline
# + colab={"base_uri": "https://localhost:8080/"} id="5hjt6feqn3e8" outputId="2a1ff094-0eb4-4027-956f-aa0862eb79e3"
import os
import random
import copy
import numpy as np
import torch
from torchvision import models, datasets, transforms
import torchvision.models as models
from torch.utils.data import DataLoader, Subset, SubsetRandomSampler, TensorDataset
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import StratifiedShuffleSplit
import wandb
# our modules
#----------------------------------------------------------------------------------
from utils.helper_functions import * # Importing Helper functions from helper_function.py
from utils.train_tools import * # Training Functions
from utils.networks import * # Import Network Architectures we would like to use
from utils.write_results import *
from utils.wandb import * #Import Weights and Biases tracking tools
#----------------------------------------------------------------------------------
device = 'cpu'
if torch.cuda.device_count() > 0 and torch.cuda.is_available():
print("Cuda installed! Running on GPU!")
device = 'cuda'
else:
print("No GPU available!")
wandb.login()
# + [markdown] id="tPo7grner5T8"
# ## Download data (google colab only, this may cause some path issues)
# + id="h1YSldEpjgoa"
# Run line below if you want to download data directly from kaggle
# kaggle_download("kaggle.json")
#Run lines below if you want to mount the google drive to obtain data
#from google.colab import drive
#drive.mount('/content/gdrive/')
#if zip file manually uploaded run this line
# ##!unzip gdrive/MyDrive/acse4-ml-2020.zip #change to correct file directory
# + [markdown] id="UsEUijFzEkmG"
# ## Load Images
#
# Here data is loaded using ImageFolder - https://pytorch.org/vision/stable/datasets.html#id30
#
# The user can set some transformations that they would like to apply by editing transform_train, and transform_test.
#
# A few pointers, the normalizing transform used for the transform_test should be be the same as the transform_train.
#
# Pre-processing functionalities need to be implemented i.e. data augmentation
# + id="D8w6LGmnnwgI"
# set normalization means and stdevss
means = [0.485, 0.456, 0.406]
stdevs = [0.229, 0.224, 0.225] # <----- these are the ones for ImageNet, we use them as we are transfer learning
# set how many channels you would like for the data
channels = 3
full_data_set = get_data("xray-data/xray-data/train", transform=None, rgb=True)
# load in validation and trainset apply augmentations to the trainset, normalizations to both. Makes the split using stratified shuffle split
trainset, validset, _ = get_data_augmented("xray-data/xray-data/train", "xray-data/xray-data/testouter", 0.15, means, stdevs)
# + [markdown] id="3X6flO-nFMAA"
# ### Some initial plots to check the dataset has been loaded correctly
# + id="YpJIEVYXgdbY"
visualise_rand(full_data_set) # this function visualises a set of random nine images from xray_train_dat
# + [markdown] id="_iEfPW5nFRnX"
# ----------------------------------------------------------
# # Network Training
#
# This section of the notebook is where models are trained. The training can be split into three sections which the user may like to run:
#
# ----------------------
# 1) Training the model with the validation and training splits.
# - User manually sets hyperparameters
# - Doesn't preform a grid search i.e. runs only for one set of parameters
# - Displays a LiveLoss plot of how the area under the curve (AUC-ROC) accuracy changes with epoch
# - outputs a model trained test set with the validation split
#
# ----------------------
# 2) Train the model using weights and biasis
# - Allows the user to perform a grid search that is tracked from wandb
# - Lots of useful metrics displayed
# - Doesn't output a trained model
# - Some grid parameters (optimizer and model) need to be set from within utils/wandb.py
# - NOTE: this will take a while to run as it will preform a grid search!
#
# ----------------------
# 3) Train the model on the full data set (produce submission model)
# - User manually sets hyperparameters
# - This can be seen as a "complete model" i.e. one from which we would make predictions for submission
#
# ----------------------
#
# Before you carry on with this lets set the Network. See next two cells.
#
# NOTE: if you would like to simply reproduce our submission model skip to part 3 in the notebook (after running the next two cells)
# -
"""
This cell shows how you can import Networks that are custom made.
The Network classes for networks that are user defined can be found in utils/networks.py
The example here shows how you would import the LeNet5() architecture from /networks.py
list of available options:
LeNet5() <----- LeNet5 network with (1, 299, 299) input size
AlexNet() <----- AlexNet network with (1, 299, 299) input size
"""
#Net = LeNet5()
#print(Net) # <----- print the network to check it looks alright!
# +
"""
This cell shows how you can import networks directly from pytorch
see https://pytorch.org/vision/stable/models.html for list of available models
Can import either a trained or pretrained network using model.<model name>(pretrained=True)
The user will need to change the final layer of the network such that it has 4 output features
as is shown below!
"""
Net = models.alexnet(pretrained=True)
Net.fc = nn.Linear(in_features=4096, out_features=4)
print(Net)
# -
# ## 1) Training the model with the validation and test splits.
#
# ##### Set training hyperparameters for training with validation split
# + id="A-SsodF5irFv"
# Set Training Hyperparameters for training with validation split
lr = 1e-4
momentum = 0.5
weight_decay = 0
batch_size = 32
test_batch_size = 1000
n_epochs = 12
seed = 42
# set data loaders
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
validation_loader = DataLoader(validset, batch_size=test_batch_size, shuffle=False, num_workers=0)
# set seed
set_seed(seed)
# # copy the model
model = copy.deepcopy(Net).to(device)
# pick an optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0)
# pick a loss criterion
criterion = nn.CrossEntropyLoss()
# -
model = train_model(model, optimizer, criterion, train_loader, validation_loader, channels, device, lr, momentum, batch_size, test_batch_size, n_epochs)
# ## 2) Wandb.ai grid search run
# ##### Set params for grid search
# Set Training Hyperparameters for training with validation split (these wont all be used in the grid search)
lr = 1e-4
momentum = 0.5
weight_decay = 0
batch_size = 32
test_batch_size = 1000
n_epochs = 20
seed = 42
#Hyperparameters that we mat want to vary during our run, can be extended upon as needed
hyperparameters = {
"learning_rate": lr,
"momentum": momentum,
"batch_size": batch_size,
"n_epochs": n_epochs,
"test_batch_size": test_batch_size,
"channels": channels
}
#Sweep configuration 1: value ranges for a grid search, program itself determines reliable hyperparameters to test over
bayes_sweep_config = {
"name": "bayes",
"method": "bayes",
"metric": {
"name": "validation_roc_auc",
"goal": "maximize"
},
"parameters": {
"learning_rate": {
"min": 1e-4,
"max": 1e-1
},
"momentum": {
"min": 0.0,
"max": 0.99999
}
},
"early_terminate": {
"type": "hyperband",
"max_iter": hyperparameters["n_epochs"] // 2,
"s": 5
}
}
# sets the grid sweep configurations
# this particular example will run a grid sweep for learning rate and weight_decay using AdamW optimizer
grid_sweep_config = {
"name": "grid_search",
"method": "grid",
"parameters": {
"learning_rate": { #grid search value is happening
"values": [1e-4] #grid search values
},
"weight_decay": {
"values": [0, 0.1]
}
},
"early_terminate": {
"type": "hyperband",
"max_iter": hyperparameters["n_epochs"] // 2, #stops earlier if a bad run
"s": 5
}
}
# ##### Run the grid search
# +
model = copy.deepcopy(Net).to(device)
sweep_wandb(seed, model, device, trainset, validset, grid_sweep_config, hyperparameters, count=4, entity="4-2-convolution", project="AlexNet_augment_ph320")
# -
# ## 3) Start Training with validation and training set (produce submission model)
# ##### Set params for final training
# +
lr = 1e-4
momentum = 0.5
weight_decay = 0
batch_size = 32
test_batch_size = 1000
n_epochs = 12
seed = 42
model = copy.deepcopy(Net).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0)
criterion = nn.CrossEntropyLoss()
# -
# ###### Start training on full dataset (i.e. train + validation)
train_loader = DataLoader(full_data_set, batch_size=batch_size, shuffle=True, num_workers=0) # loads the full dataset i.e. validation + training
model = train_on_full(model, optimizer, criterion, train_loader, channels,
device, lr, momentum, batch_size, test_batch_size, n_epochs)
# ----------------------------------------------
# ## This is where we evaluate and save the model
#
# Models will be saved in the /saved_models folder.
#
# csv files containing our predictions for the test set will be saved in the /submissioncsv folder
#
# The cell below is where the user can, and should, control the save file names from to avoid things being overwritten!
# #### Set save names for model and prediciton/submission csv
#
# These need to be changed by the individual
model_save_name = "AlexNet_pretrain_final.pt" # name to save model under
submission_csv_name = "AlexNet_pretrain_final.csv" # name to save submission csv under
# saving the trained model
torch.save(model.state_dict(), "saved_models/" + model_save_name)
from torch.utils.data import Dataset
from PIL import Image
class CustomDataSet(Dataset):
"""
Custom data loader so that files are loaded and in the correct order
This was only really necessary for the purposes of submitting the csv to kaggle in correct order
"""
def __init__(self, main_dir, transform):
self.main_dir = main_dir
self.transform = transform
all_imgs = os.listdir(main_dir)
self.total_imgs = ["test_" + str(i) + ".png" for i in range(len(os.listdir(main_dir)))]
def __len__(self):
return len(self.total_imgs)
def __getitem__(self, idx):
img_loc = os.path.join(self.main_dir, self.total_imgs[idx])
image = Image.open(img_loc).convert("RGB")
tensor_image = self.transform(image)
return tensor_image
my_dataset = CustomDataSet("xray-data/xray-data/testouter/test", transform=train_transform)
test_loader = DataLoader(my_dataset , batch_size=batch_size, shuffle=False, num_workers=4, drop_last=True)
# get predictions made by model on my_dataset
def get_predictions(model, X, device, channels=3):
"""
Get predictions on a set of data X
"""
model.eval()
y_preds = []
for x in X:
with torch.no_grad():
x = x.to(device)
a2 = model(x.view(-1, 3, 299, 299))
y_pred = F.log_softmax(a2, dim=1).max(1)[1]
y_preds.append(y_pred.item())
return y_preds
preds = get_predictions(model, my_dataset, device, channels=3)
print('done')
# write predictions to csv file, csv saved in /submission_csvs
write_file(submission_csv_name, preds)
| alexnet_submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
a="coding blocks"
print(a)
print(a[2])
print(a[-3])
print(len(a))
#string are not mutable
a[1]= "a"
a="code"
print(a)
a
for i in range(len(a)):
print(a[i])
for c in a:
print(c)
a="code"
b="mango"
print(b)
b("mango")
a="mango"
b="juice"
a+b
print(a*5)
print(a*b)
a[2:4]
a[2:-1]
a[2:-4]
a[:-1]
a[-1:]
a[:-2]
a[-1:0]
a[-1:]
a[-1:-5]
a[-2:-1]
#membership is checking substring present in bigger string
'go' in a
#string formating
age=int(input())
print("i am %d"%(age))
para="""this
is a
para"""
print(para)
print(para)
para.split()
l=para.split()
l
fruit="Mango"
a=fruit.upper()
print(a)
b=fruit
print(b)
c=fruit.lower()
print(c)
shake = " apple shake "
print(shake)
print(len(shake))
shake=shake.lstrip()
print(shake)
print(len(shake))
shake=shake.rstrip()
print(shake)
print(len(shake))
shake=" apple juice"
shake=shake.strip()
print(shake)
print(len(shake))
a="812"
a.isdigit()
# +
a.isalpha()
# -
a="12adad"
print(a.isalnum())
print(a.isspace())
#find
a= "i love have apple juice and i love having apple"
print(a.rfind("apple"))
print(a.find("apple",30,40))#if not found
print(a.index("apple"))
a=a.replace("apple","mango")
a
a.count("mango")
l=a.split()
print(l)
print("_".join(l))
name= "ayush"
print(name.capitalize())
| python-basics/strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ananaymital/Cifar-10_CNN/blob/master/Cifar10_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="b-t82xu7KWCt" colab_type="code" colab={}
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
# + [markdown] id="hziI5iTbp5n0" colab_type="text"
# Unpickling training and testing data
# + id="yhLRFvyrNXSZ" colab_type="code" colab={}
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def reshape_rgb_values(x, h=32, w=32):
return np.reshape([[x[i], x[i+1024], x[i+2048]] for i in range(x.shape[0]//3)], (h, w, 3)).tolist()
def create_image(x):
w, h = 32, 32
np_img = reshape_rgb_values(x, h, w)
img = Image.fromarray(np_img, 'RGB')
display(img)
return np_img
# + [markdown] id="Qu9QHbsJqD6e" colab_type="text"
# Image classes
# + id="9v1Mhfl-dqqv" colab_type="code" outputId="3d57d937-ef1d-4f4c-ae3d-9eb6ad351962" colab={"base_uri": "https://localhost:8080/", "height": 221}
meta_data_filename = "/content/drive/My Drive/ComputerVision/Image classification/data/cifar-10-batches-py/batches.meta"
meta_data = unpickle(meta_data_filename)
meta_data
# + [markdown] id="zsPLsXSWqcph" colab_type="text"
# Reading Train files
# + id="juas3qEvNZAz" colab_type="code" outputId="92028260-795a-442c-8c51-cc79d2c2c271" colab={"base_uri": "https://localhost:8080/", "height": 289}
def read_files(filename=None):
final_df = pd.DataFrame()
if filename == None:
for i in range(1, 6):
in_filename = "/content/drive/My Drive/ComputerVision/Image classification/data/cifar-10-batches-py/data_batch_{}".format(i)
data = unpickle(in_filename)
df = pd.DataFrame()
df["data"] = pd.Series(x for x in data.get(b"data"))
df["labels"] = data.get(b"labels")
labels = [i.decode("utf-8") for i in meta_data.get(b"label_names")]
df["label_names"]= df.labels.map(dict(enumerate(labels)))
df.data = df.data.apply(reshape_rgb_values)
final_df = pd.concat([final_df, df])
print("loaded {}".format(in_filename))
return final_df
else:
data = unpickle(filename)
df = pd.DataFrame()
df["data"] = pd.Series(x for x in data.get(b"data"))
df["labels"] = data.get(b"labels")
labels = [i.decode("utf-8") for i in meta_data.get(b"label_names")]
df["label_names"]= df.labels.map(dict(enumerate(labels)))
df.data = df.data.apply(reshape_rgb_values)
return df
train = read_files()
train.head()
# + [markdown] id="2_I47F6QqkQW" colab_type="text"
# Reading Test file
# + id="AP_n6py_kGcg" colab_type="code" colab={}
test = read_files("/content/drive/My Drive/ComputerVision/Image classification/data/cifar-10-batches-py/test_batch")
# + [markdown] id="_tzS_wbLq0uv" colab_type="text"
# Normalizing pixels and splitting into X_train, y_train, X_test, y_test
# + id="kGMeEZC1FvAl" colab_type="code" outputId="eb6c36b9-c2b5-4964-8dfa-03d0d8de973f" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_train = train.labels.astype("float32")
y_test = test.labels.astype("float32")
X_train = np.asarray(train.data.apply(lambda x: np.asarray(x)/255.0).values.tolist())
X_test = np.asarray(test.data.apply(lambda x: np.asarray(x)/255.0).values.tolist())
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# + id="rX4SM9nOlBC-" colab_type="code" colab={}
from tensorflow.nn import fractional_max_pool
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Lambda
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.losses import SparseCategoricalCrossentropy
# + [markdown] id="dOcK4e6QrF5p" colab_type="text"
# Building CNN model
# + id="ME0G2bHLk1De" colab_type="code" colab={}
def frac_max_pool(x):
return fractional_max_pool(x,2**0.5)[0]
def build_cnn():
model = Sequential()
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(Lambda(frac_max_pool))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(Lambda(frac_max_pool))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Lambda(frac_max_pool))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Lambda(frac_max_pool))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(10, activation="relu"))
model.compile(optimizer='adam',
loss=SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
# + [markdown] id="R0mNx8HSrmZ8" colab_type="text"
# Deleting train model to reduce memory consumption
# + id="HzNwSkKbw4rD" colab_type="code" colab={}
try:
del train
# del test
except:
pass
# + [markdown] id="SA-GRzGEsT_U" colab_type="text"
# Fitting on X_train & y_train on 25 epochs and validating on X_test & y_test
# + id="TedDkbYspNHJ" colab_type="code" outputId="baa09719-5fc3-4a6b-f61d-44baf4c1461a" colab={"base_uri": "https://localhost:8080/", "height": 867}
model = build_cnn()
history = model.fit(x=X_train, y=y_train, epochs=25, validation_data=(X_test, y_test))
# + id="QseO74WTyyaM" colab_type="code" outputId="c960dc75-725c-4248-a595-18c897bee328" colab={"base_uri": "https://localhost:8080/", "height": 867}
model.summary()
# + [markdown] id="7_5YtSB-s514" colab_type="text"
# Saving the model
# + id="X6Iba3TAjcU1" colab_type="code" colab={}
model.save("/content/drive/My Drive/ComputerVision/Image classification/cifar10_cnn.h5")
# + [markdown] id="knxTbTZ8s9pO" colab_type="text"
# Deleting model to be loaded later
# + id="eyVGPD6JkM66" colab_type="code" colab={}
del model
# + [markdown] id="30UXFJbGtD-F" colab_type="text"
# Loading saved model
# + id="G1qHtrJzj7nA" colab_type="code" colab={}
from tensorflow import keras
saved_model = keras.models.load_model('/content/drive/My Drive/ComputerVision/Image classification/cifar10_cnn.h5')
# + [markdown] id="TicwkJ8JtG5S" colab_type="text"
# Predicting on the test set to analyze confusion matrix and f1 scores
# + id="RZGTd5cOy0GV" colab_type="code" outputId="b7b205b7-4ee2-4066-dd78-b3bac8673d66" colab={"base_uri": "https://localhost:8080/", "height": 204}
pred = saved_model.predict(X_test)
pred[:5]
# + id="UE3fbEVtzQ3n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="43675653-a6a5-44ed-9020-74dc5300e225"
df_pred = pd.DataFrame(pred)
df_pred = df_pred.apply(lambda x: x.idxmax(), axis=1)
df_pred.head()
# + id="Aa1o3Lpa0meW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="adbcf9b6-dc84-4dce-cf8d-b41d7368f98b"
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, classification_report
confusion_matrix(y_true=test.labels, y_pred=df_pred)
# + id="DSXR4S2UOBEg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5aacf0d8-7e07-4c31-fda2-0856e50d9b01"
accuracy_score(y_true=test.labels, y_pred=df_pred)
# + id="kogSAAlL1m7e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7003948c-0283-49db-c52a-3234f7beb5b0"
f1_score(y_true=test.labels, y_pred=df_pred, average="weighted")
# + id="LL3miq9bbp-I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="4b451c22-c96d-464d-a811-d2890b843604"
print(classification_report(test.labels, df_pred))
| Cifar10_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # System identification and parameter estimation
#
# This tutorial goes through necesarry steps in order to estimate parameter and identify system as modeled by model.
# This is jupyter notebook, with text-cells and code-cells with python code.
#
# You can run the code cells and execute scripts there by pressing `Shift-Enter`.
#
# There are Ellipsis in the code `...` which means you need edit the cell and place some correct value,parameter,identifier based on the task.
#
# We will simulate model of glucose insulin regulation which describes well the real regulation within human body. [Type-1 diabetes][1] is a form of diabetes in which very little or no insulin is produced by the pancreas.
#
# [1]:https://en.wikipedia.org/wiki/Type_1_diabetes
# ## 1. PyFmi library
# First you need to import pyfmi library. If the following command doesn't produce any error you may continue.
from pyfmi import load_fmu
# if error happens, you need to install pyfmi, e.g. `conda install -c conda-forge pyfmi`.
# ## 2. Simulate FMU in Python
#
# The GlucoseInsulin model was exported as FMU and is next to this notebook file. (In OpenModelica OMEdit, open the desired model and select FMI -> Export FMU)
#
# Load the FMU:
#
model = load_fmu('seminar11hw.GIExperiment.fmu')
# Create default simulation options, we will set it further
opts = model.simulate_options()
def simulate_relativebeta(relbeta=1,sopts=opts):
model.reset()
model.instantiate()
param_name = 'glucoseInsulinRegulation.beta.k'
model.set(param_name,relbeta*1430/3600)
res = model.simulate(final_time=3*24*60*60,options = sopts) # 2.1 replace ... with 3 days recalculated in seconds
return res
# Now try to simulate with default values (relbeta `1` and already defined global `opts`)
r1 = simulate_relativebeta()
# Now define a function to plot a graph from simulation the `%matplotlib inline%` creates noninteractive image:
def plot(x,y,z=None):
# %matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(x,y,'r')
if z is not None:
plt.plot(x,z,'b')
plt.show()
plot(r1['time'],r1['glucoseInsulinRegulation.out2'])
# Task 2.2 simulate with relative beta to 50% of norm and show results
r2 = simulate_relativebeta(...)
plot(r2['time'],r2['glucoseInsulinRegulation.out2'])
# ## 3. Glucose tolerance test data
#
# Glucose tolerance test is a medical test in which glucose is given and blood samples taken afterward to determine how quickly it is cleared from the blood. The test is usually used to test for diabetes, insulin resistance, impaired beta cell function.[Wikipedia][1]
#
# The intravenous glucose tolerance test (GTT) aids in evaluating the time to onset of diabetes among persons expressing anti-islet autoantibodies.[ScienceDirect][2]
#
# [1]: https://en.wikipedia.org/wiki/Glucose_tolerance_test
# [2]: https://www.sciencedirect.com/topics/medicine-and-dentistry/intravenous-glucose-tolerance-test
#
# Import the GTT data containing concentration of insulin measured Data were taken for 2 hours every 12 minutes. The intravenous infusion of glucose starts at time 12 minutes (720s) with glucose infusion rate 27.78 mg/s for 12 minutes.
import numpy
my_data_raw = numpy.genfromtxt('PatientInsulinConcentration.csv',delimiter=',')
# See what is inside
my_data_raw
# First column contains time, second column contains concentration. In order to have the same shape as simulation data we need to delete first row with nan and transpose i.e. have rows instead of columns. see https://docs.scipy.org/doc/numpy/reference/generated/numpy.delete.html
my_data = numpy.delete(my_data_raw,...,...) # 3.1 delete first row with nan
my_data = numpy.transpose(my_data) # transpose columns to rows
my_data
plot(my_data[...],my_data[...]) # task 3.2 plot time on x axis and insulin conccentration on y axis
# ## 4. Calibrate model with data
#
# In order to compare simulation model with data we need to calibrat first the coresponding data.
#
# * 4.1 find appropriate time interval - recalculate number of simulation steps equally spread among simulation time 3 days
# * 4.2 find corresponding area to compare with data
#
# ### Set simulation interval to be equal to experiment interval
# +
simulation_time = ... # Task 4.1 3 days - model simulation will get into steady state during this time
sample_duration = ... # 12 minutes - to be equal to sample interval of experiment data
number_of_steps = simulation_time // sample_duration;
number_of_steps
# -
opts["ncp"] = number_of_steps # we set it to simulation options
# simulate with options
r2 = simulate_relativenu(sopts=opts)
r2['time'].size # see the size of row - number of steps being taken during simulation
# If this differs from the desired 360 + 1 initial step - then numerical solver probably stores event points. Disable storing extra event points [more info in jmodelica sources][1]
#
# [1]: https://jmodelica.org/pyfmi/_modules/pyfmi/fmi_algorithm_drivers.html
opts["CVode_options"]["store_event_points"] = False # by default it is true and
# CVODE solver will create additional event points during simulation to the requested ncp points
r3 = simulate_relativenu()
r3['time'].size
# Now the step of simulation agrees of experiment.
r3['time']
# ### select coresponding simulation part
# simulation infusion is made sometime at day 2, count index at day 2
index = ... // 720; # task 4.2 // is integer division
index
# +
# how many samples we need to compare? size of sample data
# -
sample_size = my_data[0].size
sample_size
# see when the infusion is really made
r3['glucoseInsulinRegulation.in1'][index:index+sample_size]
# Thus we need to increase index by 9.
index2 = index + 9
print(index)
print(r3['glucoseInsulinRegulation.in1'][index2:index2+sample_size])
print(r3['glucoseInsulinRegulation.out2'][index2:index2+sample_size])
my_data[1]
print("Number of steps to simulate model calibrated to experiment data:",number_of_steps)
print("Index of simulation data coresponding to experiment data:",index2)
print("Number of samples to be compared:",sample_size)
# ## 5. Compare data and simulation
#
# We define function `compareshow` which will simulate the model with desired parameter value and creates a chart comparing the insulin concentration from model simulation with experimental data
def compareshow(relbeta,comp_index=index2,comp_size=sample_size,data=my_data):
# simulate with parameter
res = simulate_relativebeta(...) # task 5.1 which argument to be passed here
# get the times on the interval to be compared
times = res['time'][comp_index:comp_index+comp_size]
# get subset of data on the interval to be compared
simdata = res['glucoseInsulinRegulation.out2'][comp_index:comp_index+comp_size]
# set matplotlib to generate inline graphs other options can be 'notebook' etc.
# %matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(times,simdata,'b', label='simulated data for '+str(relbeta))
plt.plot(times,data[1],'r--', label='experimental data')
plt.legend(loc='lower right')
plt.show()
# task 5.1a - add optional parameter which will simulate in other relbeta values and plot additional chart
# thus having 3-4 curves in chart
compareshow(1)
#5.2 task try to compare with simulation parameter to 120%, 50%, 20%, 5%, 1%
compareshow(...)
# ## 6 Parameter sweep model/experiment comparison
#
# In this secion we will sweep parameter over some range and compute objective function $ y=\sum_{i=1}^n (s_i - d_i)^2 $
#
#
def my_diff(arr1,arr2):
sum = 0
for i in range(0,arr1.size):
sum+=... #task 6.2 - place how to compute difference as per equation above
return sum
# Test the implementation of my_diff function with test bellow. The ressult should be 2.
arr1 = numpy.array([1,1,1,1,1,1,1,1])
arr2 = numpy.array([1,1,1,1,2,1,1,2])
arr3 = my_diff(arr1,arr2)
print(arr3) # should output 2
if (arr3 == 2):
print('You can continue')
else:
print('Wrong implementation of my_diff. Repair it!')
# Now we define `sweep_parameter` function which will call simulation with different parameters and sum the difference.
def sweep_parameter(min,max,step,data=my_data):
# returns array of tuple - first is parameter value, second is distance from experiment data
import numpy
diffs = []
for x in numpy.arange(min,max,step):
r = simulate_relativebeta(x)
rsample = r['glucoseInsulinRegulation.out2'][index2:index2+sample_size]
sum = my_diff(rsample,data[1])
diffs.append([x,sum])
return diffs
# +
#task 6.2 do parameter sweep and simulate with different parameters first from 0.5 to 1 with step 0.1
diffs = sweep_parameter(...,...,...)
# -
# Now we can see `diffs` array. Contains array of tupple. First is parameter value, second the value of objective function.
diffs
# ## Find minimum
# Find parameter value, where objective function is minimal.
#
# E.g. sort the diffs array by the second element
diffs.sort(key=lambda x:x[1])
diffs
# Now the first element of diffs contains parameter and value of objective function which is minimal.
#
# Show the model simulation and data in chart:
compareshow(...) # task 6.3
compareshow(...) # task 6.4 show second best solution
# # Summary
#
# In the above excercise we
# * imported Pyfmi library and load FMU of the Modelica model
# * simulated the model with different parameters and plot the simulation result
# * read and plot experiment data from csv
# * calibrate the model - find simulation parameters - number of intervals and index of result file corresponding to the experiment data
# * compared experiment data with simulation data
# * do parameter sweep - simulation performed per each parameter value
# * compute difference between experimental data and simulation data - value of objective function
# * minimum of objective function corresponds to optimal parameter value based on experimental data
# * this process is sometimes called also matching the curve, parameter estimation, system identification
# * solution to tasks within the python code is in comments bellow
#
# * Last but not least - you learn something about glucose insulin regulation and type1 diabetes
#
#2.1 259200 res = model.simulate(final_time=259200,options = sopts)
#2.2 r2 = simulate_relativebeta(0.5)
#3.1 numpy.delete(array,0,0)
#3.2 plot(my_data[0],my_data[1])
#4.1 simulation_time = 3 * 24 * 60 * 60
# sample_duration = 12 * 60
#4.2 index = 2*24*60*60 // 720; # task 4.2
#5.1 res = simulate_relativebeta(relbeta) # task 5.1 which argument to be passed here
#5.2 task try to compare with simulation parameter to 120%, 50%, 20%, 5%, 1%
#compareshow(1.2)
#compareshow(0.5)
#compareshow(0.2)
#compareshow(0.05)
#compareshow(0.01)
#6.1 sum+=(arr1[i]-arr2[i])**2
#6.2 diffs = sweep_parameter(0.5,1,0.1)
#6.3 compareshow(diffs[0][0])
| Seminar8FmiIdentification/ModelicaIdentification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python3
# name: python3
# ---
# # Only
#
# This tests for support of the only directive with an html only block to follow this (unless option **jupyter_allow_html_only** is True)
#
# there should be no html block above this text
#
# and one related to Jupyter
#
# this should show up
#
# there should be: **this should show up** above this text
| tests/base/ipynb/only.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="8d0bbac2"
# # Finetuning FastPitch for a new speaker
#
# In this tutorial, we will finetune a single speaker FastPitch (with alignment) model on 5 mins of a new speaker's data. We will finetune the model parameters only on new speaker's text and speech pairs.
#
# We will download the training data, then generate and run a training command to finetune Fastpitch on 5 mins of data, and synthesize the audio from the trained checkpoint.
#
# A final section will describe approaches to improve audio quality past this notebook.
# + [markdown] id="nGw0CBaAtmQ6"
# ## License
#
# > Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# >
# > Licensed under the Apache License, Version 2.0 (the "License");
# > you may not use this file except in compliance with the License.
# > You may obtain a copy of the License at
# >
# > http://www.apache.org/licenses/LICENSE-2.0
# >
# > Unless required by applicable law or agreed to in writing, software
# > distributed under the License is distributed on an "AS IS" BASIS,
# > WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# > See the License for the specific language governing permissions and
# > limitations under the License.
# + id="U7bOoIgLttRC"
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
BRANCH = 'main'
# # If you're using Google Colab and not running locally, uncomment and run this cell.
# # !apt-get install sox libsndfile1 ffmpeg
# # !pip install wget unidecode
# # !python -m pip install git+https://github.com/NeMo/NeMo.git@$BRANCH#egg=nemo_toolkit[tts]
# + [markdown] id="2502cf61"
# ## Downloading Data
# ___
# + [markdown] id="81fa2c02"
# Download and untar the data.
#
# The data contains a 5 minute subset of audio from speaker 6097 from the HiFiTTS dataset.
# + id="VIFgqxLOpxha"
# !wget https://nemo-public.s3.us-east-2.amazonaws.com/6097_5_mins.tar.gz # Contains 10MB of data
# !tar -xzf 6097_5_mins.tar.gz
# + [markdown] id="gSQqq0fBqy8K"
# Looking at manifest.json, we see a standard NeMo json that contains the filepath, text, and duration. Please note that manifest.json only contains the relative path.
#
# ```
# {"audio_filepath": "audio/presentpictureofnsw_02_mann_0532.wav", "text": "not to stop more than ten minutes by the way", "duration": 2.6, "text_no_preprocessing": "not to stop more than ten minutes by the way,", "text_normalized": "not to stop more than ten minutes by the way,"}
# ```
#
# Let's take 2 samples from the dataset and split it off into a validation set. Then, split all other samples into the training set.
# + id="B8gVfp5SsuDd"
# !cat ./6097_5_mins/manifest.json | tail -n 2 > ./6097_manifest_dev_ns_all_local.json
# !cat ./6097_5_mins/manifest.json | head -n -2 > ./6097_manifest_train_dur_5_mins_local.json
# !ln -s ./6097_5_mins/audio audio
# + [markdown] id="ef75d1d5"
# ## Finetuning FastPitch
# ___
#
#
# + [markdown] id="lhhg2wBNtW0r"
# Let's first download the pretrained checkpoint that we want to finetune from. NeMo will save checkpoints to ~/.cache, so let's move that to our current directory.
#
# *Note: please, check that `home_path` refers to your home folder. Otherwise, change it manually.*
# -
home_path = !(echo $HOME)
home_path = home_path[0]
print(home_path)
# + id="LggELooctXCT"
import os
import json
import torch
import IPython.display as ipd
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
from nemo.collections.tts.models import FastPitchModel
FastPitchModel.from_pretrained("tts_en_fastpitch")
from pathlib import Path
nemo_files = [p for p in Path(f"{home_path}/.cache/torch/NeMo/").glob("**/tts_en_fastpitch_align.nemo")]
print(f"Copying {nemo_files[0]} to ./")
Path("./tts_en_fastpitch_align.nemo").write_bytes(nemo_files[0].read_bytes())
# + [markdown] id="6c8b13b8"
# To finetune the FastPitch model on the above created filelists, we use `examples/tts/fastpitch_finetune.py` script to train the models with the `fastpitch_align.yaml` configuration.
#
# Let's grab those files.
# + id="3zg2H-32dNBU"
# !wget https://raw.githubusercontent.com/nvidia/NeMo/$BRANCH/examples/tts/fastpitch_finetune.py
# !mkdir -p conf && cd conf && wget https://raw.githubusercontent.com/nvidia/NeMo/$BRANCH/examples/tts/conf/fastpitch_align.yaml && cd ..
# + [markdown] id="12b5511c"
# We can now train our model with the following command:
#
# **NOTE: This will take about 50 minutes on colab's K80 GPUs.**
#
# `python fastpitch_finetune.py --config-name=fastpitch_align.yaml train_dataset=./6097_manifest_train_dur_5_mins_local.json validation_datasets=./6097_manifest_dev_ns_all_local.json +init_from_nemo_model=./tts_en_fastpitch_align.nemo +trainer.max_steps=1000 ~trainer.max_epochs trainer.check_val_every_n_epoch=25 prior_folder=./Priors6097 model.train_ds.dataloader_params.batch_size=24 model.validation_ds.dataloader_params.batch_size=24 exp_manager.exp_dir=./ljspeech_to_6097_no_mixing_5_mins model.n_speakers=1 model.pitch_avg=121.9 model.pitch_std=23.1 model.pitch_fmin=30 model.pitch_fmax=512 model.optim.lr=2e-4 ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null`
# + id="reY1LV4lwWoq"
!(python fastpitch_finetune.py --config-name=fastpitch_align.yaml \
train_dataset=./6097_manifest_train_dur_5_mins_local.json \
validation_datasets=./6097_manifest_dev_ns_all_local.json \
+init_from_nemo_model=./tts_en_fastpitch_align.nemo \
+trainer.max_steps=1000 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
prior_folder=./Priors6097 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./ljspeech_to_6097_no_mixing_5_mins \
model.n_speakers=1 model.pitch_avg=121.9 model.pitch_std=23.1 \
model.pitch_fmin=30 model.pitch_fmax=512 model.optim.lr=2e-4 \
~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null \
)
# + [markdown] id="j2svKvd1eMhf"
# Let's take a closer look at the training command:
#
# * `python fastpitch_finetune.py --config-name=fastpitch_align.yaml`
# * --config-name tells the script what config to use.
#
# * `train_dataset=./6097_manifest_train_dur_5_mins_local.json validation_datasets=./6097_manifest_dev_ns_all_local.json`
# * We tell the model what manifest files we can to train and eval on.
#
# * `+init_from_nemo_model=./tts_en_fastpitch_align.nemo`
# * We tell the script what checkpoint to finetune from.
#
# * `+trainer.max_steps=1000 ~trainer.max_epochs trainer.check_val_every_n_epoch=25`
# * For this experiment, we need to tell the script to train for 1000 training steps/iterations. We need to remove max_epochs using `~trainer.max_epochs`.
#
# * `prior_folder=./Priors6097 model.train_ds.dataloader_params.batch_size=24 model.validation_ds.dataloader_params.batch_size=24`
# * Some dataset parameters. The dataset does some online processing and stores the processing steps to the `prior_folder`.
#
# * `exp_manager.exp_dir=./ljspeech_to_6097_no_mixing_5_mins`
# * Where we want to save our log files, tensorboard file, checkpoints, and more
#
# * `model.n_speakers=1`
# * The number of speakers in the data. There is only 1 for now, but we will revisit this parameter later in the notebook
#
# * `model.pitch_avg=121.9 model.pitch_std=23.1 model.pitch_fmin=30 model.pitch_fmax=512`
# * For the new speaker, we need to define new pitch hyperparameters for better audio quality.
# * These parameters work for speaker 6097 from the HiFiTTS dataset
# * For speaker 92, we suggest `model.pitch_avg=214.5 model.pitch_std=30.9 model.pitch_fmin=80 model.pitch_fmax=512`
# * fmin and fmax are hyperparameters to librosa's pyin function. We recommend tweaking these per speaker.
# * After fmin and fmax are defined, pitch mean and std can be easily extracted
#
# * `model.optim.lr=2e-4 ~model.optim.sched model.optim.name=adam`
# * For fine-tuning, we lower the learning rate
# * We use a fixed learning rate of 2e-4
# * We switch from the lamb optimizer to the adam optimizer
#
# * `trainer.devices=1 trainer.strategy=null`
# * For this notebook, we default to 1 gpu which means that we do not need ddp
# * If you have the compute resources, feel free to scale this up to the number of free gpus you have available
# * Please remove the `trainer.strategy=null` section if you intend on multi-gpu training
# + [markdown] id="c3bdf1ed"
# ## Synthesize Samples from Finetuned Checkpoints
#
# ---
#
#
# + [markdown] id="f2b46325"
# Once we have finetuned our FastPitch model, we can synthesize the audio samples for given text using the following inference steps. We use a HiFiGAN vocoder trained on LJSpeech.
#
# We define some helper functions as well.
# + id="886c91dc"
from nemo.collections.tts.models import HifiGanModel
from nemo.collections.tts.models import FastPitchModel
vocoder = HifiGanModel.from_pretrained("tts_hifigan")
vocoder = vocoder.eval().cuda()
# + id="0a4c986f"
def infer(spec_gen_model, vocoder_model, str_input, speaker = None):
"""
Synthesizes spectrogram and audio from a text string given a spectrogram synthesis and vocoder model.
Arguments:
spec_gen_model -- Instance of FastPitch model
vocoder_model -- Instance of a vocoder model (HiFiGAN in our case)
str_input -- Text input for the synthesis
speaker -- Speaker number (in the case of a multi-speaker model -- in the mixing case)
Returns:
spectrogram, waveform of the synthesized audio.
"""
parser_model = spec_gen_model
with torch.no_grad():
parsed = parser_model.parse(str_input)
if speaker is not None:
speaker = torch.tensor([speaker]).long().cuda()
spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, speaker = speaker)
audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)
if spectrogram is not None:
if isinstance(spectrogram, torch.Tensor):
spectrogram = spectrogram.to('cpu').numpy()
if len(spectrogram.shape) == 3:
spectrogram = spectrogram[0]
if isinstance(audio, torch.Tensor):
audio = audio.to('cpu').numpy()
return spectrogram, audio
def get_best_ckpt(experiment_base_dir, new_speaker_id, duration_mins, mixing_enabled, original_speaker_id):
"""
Gives the model checkpoint paths of an experiment we ran.
Arguments:
experiment_base_dir -- Base experiment directory (specified on top of this notebook as exp_base_dir)
new_speaker_id -- Speaker id of new HiFiTTS speaker we finetuned FastPitch on
duration_mins -- total minutes of the new speaker data
mixing_enabled -- True or False depending on whether we want to mix the original speaker data or not
original_speaker_id -- speaker id of the original HiFiTTS speaker
Returns:
List of all checkpoint paths sorted by validation error, Last checkpoint path
"""
if not mixing_enabled:
exp_dir = "{}/{}_to_{}_no_mixing_{}_mins".format(experiment_base_dir, original_speaker_id, new_speaker_id, duration_mins)
else:
exp_dir = "{}/{}_to_{}_mixing_{}_mins".format(experiment_base_dir, original_speaker_id, new_speaker_id, duration_mins)
ckpt_candidates = []
last_ckpt = None
for root, dirs, files in os.walk(exp_dir):
for file in files:
if file.endswith(".ckpt"):
val_error = float(file.split("v_loss=")[1].split("-epoch")[0])
if "last" in file:
last_ckpt = os.path.join(root, file)
ckpt_candidates.append( (val_error, os.path.join(root, file)))
ckpt_candidates.sort()
return ckpt_candidates, last_ckpt
# + [markdown] id="0153bd5a"
# Specify the speaker id, duration mins and mixing variable to find the relevant checkpoint from the exp_base_dir and compare the synthesized audio with validation samples of the new speaker.
# + id="8901f88b"
new_speaker_id = 6097
duration_mins = 5
mixing = False
original_speaker_id = "ljspeech"
_ ,last_ckpt = get_best_ckpt("./", new_speaker_id, duration_mins, mixing, original_speaker_id)
print(last_ckpt)
spec_model = FastPitchModel.load_from_checkpoint(last_ckpt)
spec_model.eval().cuda()
_speaker=None
if mixing:
_speaker = 1
num_val = 2
manifest_path = os.path.join("./", "{}_manifest_dev_ns_all_local.json".format(new_speaker_id))
val_records = []
with open(manifest_path, "r") as f:
for i, line in enumerate(f):
val_records.append( json.loads(line) )
if len(val_records) >= num_val:
break
for val_record in val_records:
print ("Real validation audio")
ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))
print ("SYNTHESIZED FOR -- Speaker: {} | Dataset size: {} mins | Mixing:{} | Text: {}".format(new_speaker_id, duration_mins, mixing, val_record['text']))
spec, audio = infer(spec_model, vocoder, val_record['text'], speaker = _speaker)
ipd.display(ipd.Audio(audio, rate=22050))
# %matplotlib inline
#if spec is not None:
imshow(spec, origin="lower", aspect = "auto")
plt.show()
# + [markdown] id="ge2s7s9-w3py"
# ## Improving Speech Quality
# ___
#
# We see that from fine-tuning FastPitch, we were able to generate audio in a male voice but the audio quality is not as good as we expect. We recommend two steps to improve audio quality:
#
# * Finetuning HiFiGAN
# * Adding more data
#
# Both of these steps are outside the scope of the notebook due to the limited compute available on colab.
#
# ### Finetuning HiFiGAN
# From the synthesized samples, there might be audible audio crackling. To fix this, we need to finetune HiFiGAN on the new speaker's data. HiFiGAN shows improvement using synthesized mel spectrograms, so the first step is to generate mel spectrograms with our finetuned FastPitch model.
#
# ```python
# # Get records from the training manifest
# manifest_path = "./6097_manifest_train_dur_5_mins_local.json"
# records = []
# with open(manifest_path, "r") as f:
# for i, line in enumerate(f):
# records.append(json.loads(line))
#
# # Generate a spectrogram for each item
# for i, r in enumerate(records):
# with torch.no_grad():
# parsed = parser_model.parse(r['text'])
# spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed)
# if isinstance(spectrogram, torch.Tensor):
# spectrogram = spectrogram.to('cpu').numpy()
# if len(spectrogram.shape) == 3:
# spectrogram = spectrogram[0]
# np.save(f"mel_{i}", spectrogram)
# r["mel_filepath"] = f"mel_{i}.npy"
#
# # Save to a new json
# with open("hifigan_train_ft.json", "w") as f:
# for r in records:
# f.write(json.dumps(r) + '\n')
#
# # Please do the same for the validation json. Code is omitted.
# ```
#
# We can then finetune hifigan similarly to fastpitch using NeMo's [hifigan_finetune.py](https://github.com/NVIDIA/NeMo/blob/main/examples/tts/hifigan_finetune.py) and [hifigan.yaml](https://github.com/NVIDIA/NeMo/blob/main/examples/tts/conf/hifigan/hifigan.yaml):
#
# `python examples/tts/hifigan_finetune.py --config_name=hifigan.yaml model.train_ds.dataloader_params.batch_size=32 model.max_steps=1000 ~model.sched model.optim.lr=0.0001 train_dataset=./hifigan_train_ft.json validation_datasets=./hifigan_val_ft.json exp_manager.exp_dir=hifigan_ft +init_from_nemo_model=tts_hifigan.nemo trainer.check_val_every_n_epoch=10`
#
# ### Improving TTS by Adding More Data
# We can add more data in two ways. they can be combined for the best effect:
#
# * Add more training data from the new speaker
#
# The entire notebook can be repeated from the top after a new .json is defined for the additional data. Modify your finetuning commands to point to the new json. Be sure to increase the number of steps as more data is added to both the fastpitch and hifigan finetuning. We recommend 1000 steps per minute of audio for fastpitch and 500 steps per minute of audio for hifigan.
#
# * Mix new speaker data with old speaker data
#
# We recommend to train fastpitch using both old speaker data (LJSpeech in this notebook) and the new speaker data. In this case, please modify the .json when finetuning fastpitch to include speaker information:
#
# `
# {"audio_filepath": "new_speaker.wav", "text": "sample", "duration": 2.6, "speaker": 1}
# {"audio_filepath": "old_speaker.wav", "text": "LJSpeech sample", "duration": 2.6, "speaker": 0}
# `
# 5 hours of data from the old speaker should be sufficient. Since we should have less data from the new speaker, we need to ensure that the model sees a similar amount of new data and old data. For each sample from the old speaker, please add a sample from the new speaker in the .json. The samples from the new speaker will be repeated.
#
# Modify the fastpitch training command to point to the new training and validation .jsons, and update `model.n_speakers=1` to `model.n_speakers=2`. Ensure the pitch statistics correspond to the new speaker.
#
# For HiFiGAN finetuning, the training should be done on the new speaker data.
| tutorials/tts/FastPitch_Finetuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + _cell_guid="a3cb0ee3-7bca-4b2b-8a27-be198d18818e" _uuid="075ab0f3fc310e293828b3681f1d80642f88c106" language="html"
# <style>
# .h1_cell, .just_text {
# box-sizing: border-box;
# padding-top:5px;
# padding-bottom:5px;
# font-family: "Times New Roman", Georgia, Serif;
# font-size: 125%;
# line-height: 22px; /* 5px +12px + 5px */
# text-indent: 25px;
# background-color: #fbfbea;
# padding: 10px;
# }
#
# hr {
# display: block;
# margin-top: 0.5em;
# margin-bottom: 0.5em;
# margin-left: auto;
# margin-right: auto;
# border-style: inset;
# border-width: 2px;
# }
# </style>
# -
# <h1>
# <center>
# Module 5 - Fun with document vectors
# </center>
# </h1>
# <div class=h1_cell>
# <p>
# I want to continue to explore using the values in bag-of-words to build vectors. The general idea is that we will generate a vector for EAP, a vector for HPL and a vector for MWS. How do we get these vectors? Simple. We take a column from bag-of-words. Before going further, let's read in bag-of-words fromn week 4.
# </div>
# +
#I am using dropbox so got the url to my file. If you have on local drive, then use file reading code
url = '...'
import urllib, json
response = urllib.urlopen(url)
bag_of_words = json.loads(response.read())
sorted_items = sorted(bag_of_words.items()) # need to sort to make sure vectors align
sorted_items[:10]
# -
# <h2>
# Challenge 1
# </h2>
# <div class=h1_cell>
# <p>
# Let's write a better version of sentence_wrangler. What I noticed this week when going through new books is that I was letting some strange words through. For instance, my sentence_wrangler from last week lets numbers through. And it also lets byte codes through. I think a better design would be to switch from a blacklist (define chars don't want) to a whitelist (define chars that are ok). Change the 3rd argument to the set of legal characters you allow.
# <p>
# If you want to be fancy, be my guest. Use the 3rd argument to pass in an re pattern that needs to match against each word. Much more elegant.
# </div>
def sentence_wrangler(sentence, swords, legal_chars):
return result, removed_words
# +
#Here is my whitelist - re pattern would be better. Extra credit if you do it
#legals = r'...'
legals = 'abcdefghijklmnopqrstuvwxyz'
# -
# <div class=h1_cell>
# <p>
# Some other odds and ends. We will need cosine_similarity from prior module, stop words and tokenizer.
# <p>
# </div>
def cosine_similarity(v1,v2):
from nltk.corpus import stopwords
swords = stopwords.words('english')
from nltk.tokenize import WordPunctTokenizer
word_punct_tokenizer = WordPunctTokenizer()
# <h2>
# Ok, let's get to it
# </h2>
# <div class=h1_cell>
# <p>
# What I want to know is how "close" 2 books are to each other. I'll build a word-count vector for each book. And then take the cosine similarity. I'll give you a start.
# </div>
# +
#item in sorted_items: (word, (eap_val, hpl_val, mws_val))
eap_vector = [pair[1][0] for pair in sorted_items]
hpl_vector = [pair[1][1] for pair in sorted_items]
# -
eap_hpl = cosine_similarity(eap_vector, hpl_vector)
eap_hpl
# <h2>
# Is that close?
# </h2>
# <div class=h1_cell>
# <p>
# The range of the cosine similarity for us is 0..1. Does that make .75 high? It is hard to answer this without having the values for other book combinations. I would say it is high enough to warrant a further look if I was searching for plagiarism. Let's check out some other combos.
# </div>
# <h2>
# Challenge 2
# </h2>
# <div class=h1_cell>
# <p>
# Go ahead and do the other 2 comparisons.
# </div>
# +
eap_mws
# +
mws_hpl
# -
# <h2>
# Kind of interesting
# </h2>
# <div class=h1_cell>
# <p>
# All 3 have roughly same similarity score. I would expect that given they are all gothic novels. Do you think we are catching the gothic/horror genre in our vectors through use of words?
# </div>
# <h2>
# Challenge 3
# </h2>
# <div class=h1_cell>
# <p>
# Let's test our conjecture that we are capturing something about gothicness. Let's compare the 3 against Huckleberry Finn by <NAME>. My gut feeling is that this should not be high on gothic scale. Your goal is to build a huck_vector that you can compare against our existing vectors. Here is what you need to do:
# <p>
# <ol>
# <li>Initialize a huck_dict that has same keys as bag_of_words and each key's value is a count of that word in the Huck Finn book.
# <li>Find an online version of Huck Finn. Hint: Project Gutenberg is a great source.
# <li>Figure out how to read the book in and to break the book into sentences.
# <li>Pass each sentence through sentence_wrangler to get words.
# <li>For each word, increase the count for huck_dict[word], but only if word is in bag_of_words. If the word is not in bag_of_words, add it to the list huck_left_out.
# </ol>
# <p>
# Check your results against mine.
# </div>
# +
# -
len(all_huck_words) # we expect this to be 24944, the len of bag_of_words
sorted(all_huck_words.items())[:10]
len(set(huck_left_out)) #number of unique words left out
sorted(list(set(huck_left_out)), reverse=False)[:10] #first 10
sorted(list(set(huck_left_out)), reverse=True)[:10] #last 10
# <h2>
# A note about these left out words
# </h2>
# <div class=h1_cell>
# <p>
# I am keeping bag_of_words static for simplicity. But in reality, it is a growing thing. We should really add these left out words into bag_of_words and zero them out for eap, hpl and mws. As it is, we are kind of playing by gothic rules, ony using the words we saw in gothic authors. What would happen if we expanded bag_of_words to include all the new words we see in each new book? Would that move us closer or farther away from similarity with the gothic authors?
# </div>
# <h2>
# Challenge 4
# </h2>
# <div class=h1_cell>
# <p>
# Build the huck_vector and compare with other 3. Remember to sort items so vectors align.
# </div>
huck_sorted_items =
huck_vector =
huck_vector[:10]
# +
eap_huck
# +
hpl_huck
# +
mws_huck
# -
#
# <div class=h1_cell>
# <p>
# Huck Finn is definitely less similar. Closest to Lovecraft. Hmmmmm. They were writing at roughly the same time period.
# </div>
# <h2>
# Challenge 4
# </h2>
# <div class=h1_cell>
# <p>
# Let's try one my literary colleague tells me is the antithesis of gothic: Oliver Twist by <NAME>.
# <p>
# Same routine as Huckleberry Finn. Find it, bag it, vectorize it, cosine it with other 4.
# </div>
len(all_twist_words)
twist_sorted_items =
twist_sorted_items[:10]
len(set(twist_left_out))
sorted(list(set(twist_left_out)), reverse=False)[:10]
sorted(list(set(twist_left_out)), reverse=True)[:10]
# <h2>
# Dang
# </h2>
# <div class=h1_cell>
# <p>
# Looks like sentence_wrangler is letting through preface page numbers, e.g., xxxv. If we know we are dealing with books, I suppose we could write a special sentence_wrangler that knows about the weird things we will see in books and throw them out. I kind of like the idea of having a library of sentence wranglers that are tuned to specific styles of text. Then you can choose the one that makes the most sense.
# </div>
# <h2>
# Same left outs?
# </h2>
# <div class=h1_cell>
# <p>
# I wonder how much an overlap there is between words being left out of Huck Finn and words left out of Oliver Twist.
# </div>
intersection = set(twist_left_out).difference(set(huck_left_out))
len(intersection)
# <div class=h1_cell>
# <p>
# A pretty big overlap.
# </div>
# <h2>
# Ok, back to the problem
# </h2>
# <div class=h1_cell>
# <p>
# Build the twist_vector and compare with other 4.
# </div>
twist_vector =
# +
eap_twist
# +
hpl_twist
# +
mws_twist
# +
huck_twist
# -
# <h2>
# Poe is winner this time
# </h2>
# <div class=h1_cell>
# <p>
# Wonder if Poe and Dickens knew each other. They were writing at roughly the same time. Maybe we are picking up on the language and jargon of the time?
# </div>
# <h2>
# Challenge 5
# </h2>
# <div class=h1_cell>
# <p>
# I'm putting down a challenge. Find a book that has a cosine similarity value of below .5 for all 3 gothic authors. I was able to get below .4! You will get a shout-out if you can beat me.
# <p>
# To make exploration easier, I packaged up the code to produce the 3 values into a single function. For each book I explored, I saved it in my dropbox account and then got the url to it. That is what I passed into my function. You could do something similar with Google docs. Or change the url to a file path.
# </div>
def check_book(url, bag, swords, legals):
return (eap_x, hpl_x, mws_x)
# test to make sure get same values as by hand above
check_book('...twist.txt?raw=1', bag_of_words, swords, legals) # twist
check_book('...', bag_of_words, swords, legals) # close
check_book('...', bag_of_words, swords, legals) # my winner
# <h2>
# Closing note
# </h2>
# <div class=h1_cell>
# <p>
# I'm still interested in using these word-frequency vectors to see what we can do. Next week we will take a look at another way to reason with words, i.e. word co-occurrence matrices.
# </div>
| UpperDivisionClasses/Data_Science/week5/.ipynb_checkpoints/week5_handout_ref-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic tour of the Bayesian Optimization package
#
# This is a constrained global optimization package built upon bayesian inference and gaussian process, that attempts to find the maximum value of an unknown function in as few iterations as possible. This technique is particularly suited for optimization of high cost functions, situations where the balance between exploration and exploitation is important.
#
# Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.
#
# As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).
#
# This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method.
# ## 1. Specifying the function to be optimized
#
# This is a function optimization package, therefore the first and most important ingreedient is, of course, the function to be optimized.
#
# **DISCLAIMER:** We know exactly how the output of the function below depends on its parameter. Obviously this is just an example, and you shouldn't expect to know it in a real scenario. However, it should be clear that you don't need to. All you need in order to use this package (and more generally, this technique) is a function `f` that takes a known set of parameters and outputs a real number.
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1
# ## 2. Getting Started
#
# All we need to get started is to instanciate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work
import os
import sys
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
print(module_path)
sys.path.append(module_path)
from bayes_opt import BayesianOptimization
# Bounded region of parameter space
pbounds = {'x': (2, 4), 'y': (-3, 3)}
optimizer = BayesianOptimization(
f=black_box_function,
pbounds=pbounds,
verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
random_state=1,
)
# The BayesianOptimization object will work out of the box without much tuning needed. The main method you should be aware of is `maximize`, which does exactly what you think it does.
#
# There are many parameters you can pass to maximize, nonetheless, the most important ones are:
# - `n_iter`: How many steps of bayesian optimization you want to perform. The more steps the more likely to find a good maximum you are.
# - `init_points`: How many steps of **random** exploration you want to perform. Random exploration can help by diversifying the exploration space.
optimizer.maximize(
init_points=2,
n_iter=3,
)
# The best combination of parameters and target value found can be accessed via the property `bo.max`.
print(optimizer.max)
# While the list of all parameters probed and their corresponding target values is available via the property `bo.res`.
for i, res in enumerate(optimizer.res):
print("Iteration {}: \n\t{}".format(i, res))
# ### 2.1 Changing bounds
#
# During the optimization process you may realize the bounds chosen for some parameters are not adequate. For these situations you can invoke the method `set_bounds` to alter them. You can pass any combination of **existing** parameters and their associated new bounds.
optimizer.set_bounds(new_bounds={"x": (-2, 3)})
optimizer.maximize(
init_points=0,
n_iter=5,
)
# ## 3. Guiding the optimization
#
# It is often the case that we have an idea of regions of the parameter space where the maximum of our function might lie. For these situations the `BayesianOptimization` object allows the user to specify specific points to be probed. By default these will be explored lazily (`lazy=True`), meaning these points will be evaluated only the next time you call `maximize`. This probing process happens before the gaussian process takes over.
#
# Parameters can be passed as dictionaries such as below:
optimizer.probe(
params={"x": 0.5, "y": 0.7},
lazy=True,
)
# Or as an iterable. Beware that the order has to be alphabetical. You can usee `optimizer.space.keys` for guidance
print(optimizer.space.keys)
optimizer.probe(
params=[-0.3, 0.1],
lazy=True,
)
optimizer.maximize(init_points=2, n_iter=3)
# ## 4. Saving, loading and restarting
#
# By default you can follow the progress of your optimization by setting `verbose>0` when instanciating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.
#
# ### 4.1 Saving progress
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
# The observer paradigm works by:
# 1. Instantiating an observer object.
# 2. Tying the observer object to a particular event fired by an optimizer.
#
# The `BayesianOptimization` object fires a number of internal events during optimization, in particular, everytime it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.
#
# **Caveat:** The logger will not look back at previously probed points.
logger = JSONLogger(path="./logs.json")
optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
optimizer.maximize(
init_points=2,
n_iter=3,
)
# ### 4.2 Loading progress
#
# Naturally, if you stored progress you will be able to load that onto a new instance of `BayesianOptimization`. The easiest way to do it is by invoking the `load_logs` function, from the `util` submodule.
from bayes_opt.util import load_logs
new_optimizer = BayesianOptimization(
f=black_box_function,
pbounds={"x": (-2, 2), "y": (-2, 2)},
verbose=2,
random_state=7,
)
print(len(new_optimizer.space))
load_logs(new_optimizer, logs=["./logs.json"]);
print("New optimizer is now aware of {} points.".format(len(new_optimizer.space)))
new_optimizer.maximize(
init_points=0,
n_iter=10,
)
# ## Next Steps
#
# This tour should be enough to cover most usage scenarios of this package. If, however, you feel like you need to know more, please checkout the `advanced-tour` notebook. There you will be able to find other, more advanced features of this package that could be what you're looking for. Also, browse the examples folder for implementation tips and ideas.
| examples/basic-tour.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Big data analytics data preparation
#
# This file details the data preparation process for one of my master's projects (the details can be found on my [website](www.jamiefawcett.org.uk).
#
import pandas as pd
import numpy as np
# ## Loading data
all_data = pd.read_csv("Full_2019-03-10.csv",index_col=0)
display(all_data.head())
print(len(all_data))
# ## Cleaning up the data
#reset index
all_data = all_data.reset_index()
#making numeric
def makeInt(x):
if x == None:
return None
if type(x) == str:
x = x.replace(",","")
return int(x)
for var in ["topic_reply_count", "topic_view_count", "forum_topic_count", "forum_post_count"]: ###posttypeid -- also a number but its a category -- choose not to make integer
all_data[var] = all_data[var].map(lambda num: makeInt(num))
# +
#fixing time
#fix today and yesterday
def makeDates(x):
if "Today" in x:
x = x.replace("Today","2019-03-10")
if "Yesterday" in x:
x = x.replace("Yesterday","2019-03-09")
x = datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
return x
# -
#convert to datetime objects
all_data["time"] = all_data["time"].map(lambda date: makeDates(date))
def getAdditional(x,key):
adit_dict = ast.literal_eval(x)
if key in adit_dict:
value = adit_dict[key]
value = value.strip()
else:
value = None
return value
# +
keys = ['role', 'from', 'registr', 'num_post']
for key in keys:
all_data[key] = all_data["additional"].map(lambda x: getAdditional(x,key))
all_data['num_post'] = all_data['num_post'].map(lambda num: makeInt(num))
# -
# ## Aggregating data
#number of posts
topic_posts_author = all_data.groupby(['topic_title','forum_title','topic_link'])[['index','author']].nunique()
#views
topic_views = pd.Series(all_data.groupby(['topic_title','forum_title','topic_link'])['topic_view_count'].unique())
#replies
topic_replies = pd.Series(all_data.groupby(['topic_title','forum_title','topic_link'])['topic_reply_count'].unique())
#timestamps
post_timestamps = pd.Series(all_data.groupby(['topic_title','forum_title','topic_link'])['time'].unique())
topic_df = pd.concat([topic_posts_author,topic_views,topic_replies,post_timestamps], axis =1, sort= False)
# +
#rename index as post
topic_df = topic_df.rename(columns={'index': 'posts'})
#unpack the topic_view count
topic_df['topic_view_count'] = topic_df['topic_view_count'].map(lambda l: l[0])
# topic_df['topic_view_count_unpack_num'] = topic_df['topic_view_count'].map(lambda l: len(l)) #check if any two still named the same
topic_df['topic_reply_count'] = topic_df['topic_reply_count'].map(lambda l: l[0])
# +
#make time again
def makeTime(x):
x = datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
return x
topic_df['real_time'] = topic_df['time'].map(lambda time_list: [makeTime(time) for time in time_list])
# +
#average time between replies
def getAverageTime(time_list):
if len(time_list) == 1:
av_time = 0
else:
av_time = np.diff(time_list).mean()
return pd.to_timedelta(av_time)
#average time between replies
def getFirstResponse(time_list):
if len(time_list) == 1:
av_time = 0
else:
av_time = np.diff(time_list)[0]
return pd.to_timedelta(av_time)
# -
#get first day, last day and two averages
topic_df['first_post'] = topic_df['real_time'].map(lambda time_list: min(time_list))
topic_df['last_post'] = topic_df['real_time'].map(lambda time_list: max(time_list))
topic_df['conversation_length'] = topic_df.apply(lambda row: row['last_post']-row['first_post'],axis=1)
topic_df['total_average_response'] = topic_df.apply(lambda row: row['conversation_length']/row['posts'],axis=1) #kinda hacky
topic_df['running_average_response'] = topic_df['real_time'].map(lambda time_list: getAverageTime(time_list))
topic_df['first_response_time'] = topic_df['real_time'].map(lambda time_list: getFirstResponse(time_list))
# +
# time to seconds
#Intervals to seconds
interval_cols =['conversation_length', 'total_average_response',"running_average_response",'first_response_time']
for column in interval_cols:
print(type(topic_df[column][0]))
topic_df["seconds_{}".format(column)] = topic_df[column].map(lambda x: x.total_seconds())
print(type(topic_df["seconds_{}".format(column)][0]))
# +
def dt2ut(dt):
epoch = pd.to_datetime('1970-01-01 00:00:00')
return (dt - epoch).total_seconds()
#Dates ready for R
date_cols = ['first_post', 'last_post']
for column in date_cols:
print(type(topic_df[column][0]))
topic_df["unix_{}".format(column)] = topic_df[column].apply(dt2ut)
print(type(topic_df["unix_{}".format(column)][0]))
# -
display(topic_df.head())
topic_df.to_csv("Data-2019-03-10/Topic_2019-03-10.csv", mode ='w')
#For R
for col in ['first_post', 'last_post','conversation_length', 'total_average_response',"running_average_response",'first_response_time','time','real_time']:
del topic_df[col]
topic_df.to_csv("Data-2019-03-10/Topic_BDA_2019-03-10.csv", mode ='w')
| OSM_forum_analysis-BDA-topic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Concepts in Algorithms
# ***
# ### Bottom-Up vs. Top-Down
# * Bottom-Up algorithms solve for a base case and use it to build up bigger and bigger cases until arriving at the final computation
# * Top-Down algorithms solve the final computation by recusrively solving sub-problems down to a base case
#
# Bottom-Up is inherientely dynamic programming with memoization. Top-Down can easily use dynamic programming and memoization.
# ###### Bottom-Up Fibonacci
# Solve Fib(3) using Fib(1) and Fib(2), then up to Fib(n) using Fib(n-2) and Fib(n-1)
def bottomUpFib(n):
if n <= 1:
return 0
if n < 3:
return 1
fibs = []
fibs.append(0)
fibs.append(1)
for i in range(3,n):
fibs.append(fibs[i - 3] + fibs[i - 2])
return fibs[n - 3] + fibs[n - 2]
for i in range(1,1000):
print(bottomUpFib(i))
# ##### Top-Down Fibonacci
# Solve Fib(n) by solving Fib(n - 2) + Fib(n - 1) and recurse down to Fib(<=2)
def topDownFib(n):
if n <= 1:
return 0
if n == 2:
return 1
return topDownFib(n - 2) + topDownFib(n - 1)
for i in range(1,100):
print(topDownFib(i))
# ##### Top-Down Fibonacci w/ Memoization
# Recursively solve Fib(n) by solving Fib(n - 2), storing that value for future use, and solcving Fib(n - 1)
def topDownFibDynamic(n, mem):
if n <= 1:
return 0
if n == 2:
return 1
if (n - 2) in mem:
lower = mem[n - 2]
else:
lower = topDownFibDynamic(n - 2, mem)
mem[n - 2] = lower
if (n - 1) in mem:
higher = mem[n - 1]
else:
higher = topDownFibDynamic(n - 1, mem)
mem[n - 1] = higher
return lower + higher
for i in range(1,1000):
print(topDownFibDynamic(i, {}))
#
| concepts/Concepts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import StandardScaler,LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.linear_model import LogisticRegression
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv')
# -
data.head()
data.info()
data.describe()
data.shape
data.isna().sum()
data["Department"].unique()
data["salary"].unique()
pd.crosstab(data.salary,data.left).plot(kind='bar')
pd.crosstab(data.Department,data.left).plot(kind='bar')
Label_encoder_depart=LabelEncoder()
Label_encoder_salary=LabelEncoder()
depart=Label_encoder_depart.fit_transform(data["Department"])
salary=Label_encoder_salary.fit_transform(data["salary"])
data["Department"]=depart
data["salary"]=salary
target=data["left"]
data.drop(columns="left")
data.shape
target.shape
inputs=data.iloc[:,0:9].values
inputs.shape
x_train,x_test,y_train,y_test=train_test_split(inputs,target,test_size=0.2)
model=LogisticRegression()
model.fit(x_train,y_train)
predict=model.predict(x_test)
cm=confusion_matrix(y_test,predict)
cm
| logistic-regression-hr-analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="kJCMwRx9_4rN"
# # Baseline (01)
# + [markdown] id="M7yzKetZAK3s"
# # Imports
# + id="iLciMje9AOmB"
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from wcs.google import google_drive_share
import pandas as pd
import urllib.request
from urllib.parse import urlparse
#from google.colab import drive
import warnings
warnings.simplefilter(action='ignore')
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# -
# !pip install gdown
# + [markdown] id="YcfsOKfzC2N6"
# # Load Data
# + id="SRMNiJ9oMGY2"
BASE_DIR ="./"
IMAGES_DIR = "/kaggle/input/movie-poster-genre-2021/images/"
# + [markdown] id="eA1fV-j_KX0L"
# # Preproc
# + id="oE24fz77duaZ"
def retrieveFilename(url):
a = urlparse(url)
picfilename = os.path.basename(a.path)
return picfilename
# +
import gdown
parquet_fname = "./df.parquet.gzip"
if not os.path.exists(parquet_fname):
url = 'https://drive.google.com/uc?id=178mXeXUC1400lj-LrWyhD3yfW5_KsVYw'
gdown.download(url, parquet_fname, quiet=False)
df = pd.read_parquet("./df.parquet.gzip")
df["filename"] = df.loc[~df["poster_url"].isnull(),"poster_url"].apply(retrieveFilename)
# use only mot null rows
df = df.dropna()
# set data path to basename of the file
#df['data_path'] = df['poster_path'].apply(lambda x: x.split('/')[-1])
# remove rows with empty genre_id list and set correct list type
df['genre_id'] = df['genre_id'].apply(lambda x: np.nan if len(eval(x)) == 0 else x)
df = df.dropna()
df['genre_id'] = df['genre_id'].apply(lambda x: eval(x))
print(f'len of df: {len(df)}')
df["file_exists"] = df["filename"].apply(lambda x: os.path.exists(IMAGES_DIR + x))
df.to_parquet(parquet_fname,compression='gzip')
else:
df = pd.read_parquet("./df.parquet.gzip")
# + id="acVfQ8KqKlaI"
# #!rm $parquet_fname
# -
df.head()
#print(df.shape)
#keep only rows where file exists in data set
df = df.loc[df["file_exists"]]
# + [markdown] id="v1ChqgNRK0py"
# Create ImageGenerators
# -
IMAGES_DIR
# + id="tz11Ruq2KyQs"
datagen = ImageDataGenerator(rescale=1 / 255., validation_split=0.1)
BATCH_SIZE = 64
train_generator = datagen.flow_from_dataframe(
dataframe=df,
directory=IMAGES_DIR,
x_col="filename",
y_col="genre_id",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(299, 299),
subset='training',
validate_filenames=True
)
valid_generator = datagen.flow_from_dataframe(
dataframe=df,
directory=IMAGES_DIR,
x_col="filename",
y_col="genre_id",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(299, 299),
subset='validation',
validate_filenames=True
)
# + id="1a6YhAuOLWFC"
# show class indicies
print(train_generator.class_indices)
print('length:', len(train_generator.class_indices))
# + id="irTOneeqduah"
list(train_generator.class_indices.keys())
# + id="SIVWsXqFduai"
#https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
from sklearn.utils import class_weight
#In order to calculate the class weight do the following
class_weights = class_weight.compute_class_weight('balanced',
np.array(list(train_generator.class_indices.keys()),dtype="int"),
np.array(df.genre_id.explode(),dtype="int"))
#np.unique(y_train),
#y_train)
class_weights_genre_id = dict(zip(list(train_generator.class_indices), class_weights))
display(class_weights_genre_id)
class_weights = dict(zip(list(range(len(class_weights))), class_weights))
class_weights
# + id="MXYlHNqVduaj"
map_gender={"28":"Action",
"12":"Adventure",
"16":"Animation",
"35":"Comedy",
"80":"Crime",
"99":"Documentary",
"18":"Drama",
"10751":"Family",
"14":"Fantasy",
"36": "History",
"27":"Horror",
"10402" :"Music",
"9648":"Mystery",
"10749":"Romance",
"878" :"Science Fiction",
"10770":"TV Movie",
"53":"Thriller",
"10752":"War",
"37":"Western"}
series_genre_id_counts = df.genre_id.explode().value_counts()
series_genre_id_counts
df_genre = pd.DataFrame(series_genre_id_counts)
df_genre["id"] = df_genre.index
df_genre.rename(columns={"genre_id" : "count"},inplace=True)
df_genre["name"] = df_genre["id"].apply(lambda x : map_gender[str(x)])
df_genre["weight"] = df_genre["id"].apply(lambda x : class_weights_genre_id[x])
df_genre.sort_values(by="count")
# + id="4Uc9-PAmduar"
# + [markdown] id="cPXBrisCLgl2"
# # Simple Model
# + [markdown] id="SL2yeEslLcMe"
# model = keras.Sequential(
# [
# layers.Conv2D(32, (3, 3), padding='same', input_shape=(299, 299, 3)),
# layers.Activation('relu'),
# layers.Conv2D(32, (3, 3)),
# layers.Activation('relu'),
# layers.MaxPooling2D(pool_size=(2, 2)),
# layers.Dropout(0.25),
# layers.Conv2D(64, (3, 3), padding='same'),
# layers.Activation('relu'),
# layers.Conv2D(64, (3, 3)),
# layers.Activation('relu'),
# layers.MaxPooling2D(pool_size=(2, 2)),
# layers.Dropout(0.25),
# layers.Flatten(),
# layers.Dense(512),
# layers.Activation('relu'),
# layers.Dropout(0.5),
# layers.Dense(len(train_generator.class_indices), activation='sigmoid')
# ]
# )
#
#
# #model.compile(optimizer='adam', loss="binary_crossentropy", metrics=["accuracy"])
# + [markdown] id="e8DgtGWiduau"
# model = keras.Sequential(
# [
# layers.Conv2D(32, (3, 3), padding='same', input_shape=(299, 299, 3)),
# layers.Activation('relu'),
# layers.Conv2D(64, (3, 3), activation='relu'),
# layers.MaxPooling2D(pool_size=(2, 2)),
# layers.Dropout(0.25),
# layers.Conv2D(128, kernel_size=(3, 3), activation='relu'),
# layers.Conv2D(64, (3, 3), activation='relu'),
# layers.MaxPooling2D(pool_size=(2, 2)),
# layers.Dropout(0.25),
# layers.Flatten(),
# layers.Dense(128, activation='relu'),
# layers.Dropout(0.5),
# layers.Dense(len(train_generator.class_indices), activation='sigmoid')
# #layers.Dense(len(train_generator.class_indices), activation='softmax')
#
# ])
# + id="hwX1XsBiduav"
#https://machinelearningmastery.com/how-to-use-transfer-learning-when-developing-convolutional-neural-network-models/
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.applications.densenet import DenseNet169
from keras.models import Model
# load model
#model = VGG16(include_top=False, input_shape=(299,299, 3))
model = DenseNet169(include_top=False, input_shape=(299,299, 3))
# summarize the model
model.summary()
# -
# Finally, we implemented a standard DenseNet-169 architecture with similar modifications. The final
# fully-connected layer of 1000 units was once again replaced by 3 sequential fully-connected layers of
# 3
# 1024, 128, and 7 units with ReLU, ReLU, and sigmoid activations respectively. The entire model
# consists of 14,479,943 parameters, out of which, 14,321,543 were trainable.
# + id="0F0PXrUbdua_"
x= layers.Flatten()(model.layers[-1].output)
#x= layers.Dense(128, activation='relu')(x)
x= layers.Dense(1024, activation='relu')(x)
x= layers.Dense(128, activation='relu')(x)
#x= layers.Dropout(0.5)(x)
classifications = layers.Dense(len(train_generator.class_indices), activation='sigmoid')(x)
# + id="nKhnvQ75dubS"
model = Model(inputs=model.inputs, outputs=classifications)
model.summary()
# + id="RXgyUO_Mdua_"
# mark loaded layers as not trainable
# except last layer
leng = len(model.layers)
print(leng)
for i,layer in enumerate(model.layers):
if leng-i == 500:
print("stopping at",i)
break
layer.trainable = False
model.summary()
# +
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# + id="c0lPlJz1dubT"
from keras import metrics
#https://neptune.ai/blog/keras-metrics
model.compile(optimizer='adam', loss="binary_crossentropy",
metrics=[metrics.categorical_accuracy,
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
f1_m,
])
# + id="PALSOI_Sjd45"
import os.path
fname_model = BASE_DIR + "DenseNet169_1"
if os.path.exists(fname_model) :
print("Load model")
model = keras.models.load_model(fname_model)
# + [markdown] id="vIRDrzYqM_Vw"
# # Train
# + id="-6NBrltEM2Xy" outputId="41257bb9-d813-470d-8c4c-a8a5b5534007"
history = model.fit(
train_generator,
validation_data=valid_generator,
epochs=20,# 5,
#batch_size=BATCH_SIZE,
#steps_per_epoch=1430,
class_weight = class_weights
)
# + id="eL6vW2zjji4o"
model.save(fname_model)
# + id="Jah0S5tcNKDV"
#num_samples // batch_size
183069 / 32 / 4
# + [markdown] id="YvVjyLQW7HfZ"
# Epoch 1/50
# 520/520 [==============================] - 174s 334ms/step - loss: 0.7014 - accuracy: 0.2550 - val_loss: 0.5431 - val_accuracy: 0.3258
# Epoch 2/50
# 520/520 [==============================] - 172s 331ms/step - loss: 0.5158 - accuracy: 0.2788 - val_loss: 0.4592 - val_accuracy: 0.3258
# Epoch 3/50
# 520/520 [==============================] - 172s 331ms/step - loss: 0.4422 - accuracy: 0.2797 - val_loss: 0.4133 - val_accuracy: 0.3258
# Epoch 4/50
# 520/520 [==============================] - 171s 330ms/step - loss: 0.4013 - accuracy: 0.2786 - val_loss: 0.3887 - val_accuracy: 0.3258
# + id="eQJFXU5DdubU"
| notebooks/05_aida-movie-genre-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="iWOZV94kYsbM" colab_type="text"
# 在实际应用中许多数据都以图(graph)的形式存在,比如,互联网、社交网络都可以看作是一个图。图数据上的机器学习具有理论与应用上的重要意义。pageRank算法是图的链接分析 (link analysis)的代表性算法,属于图数据上的无监督学习方法。
#
# pageRank算法最初作为互联网网页重要度的计算方法,1996年由page和Brin提出,并用于谷歌搜索引擎的网页排序。事实上,pageRank可以定义在任意有向图上,后来被应用到社会影响力分析、文本摘要等多个问题。
#
# pageRank算法的基本想法是在有向图上定义一个随机游走模型,即一阶马尔可夫链,描述随机游走者沿着有向图随机访问各个结点的行为。在一定条件下,极限情况访问每个结点的概率收敛到平稳分布, 这时各个结点的平稳概率值就是其 pageRank值,表示结点的重要度。 pageRank是递归定义的,pageRank的计算可以通过迭代算法进行。
# + id="fAN4q0cqYn-f" colab_type="code" colab={}
#https://gist.github.com/diogojc/1338222/84d767a68da711a154778fb1d00e772d65322187
import numpy as np
from scipy.sparse import csc_matrix
def pageRank(G, s = .85, maxerr = .0001):
"""
Computes the pagerank for each of the n states
Parameters
----------
G: matrix representing state transitions
Gij is a binary value representing a transition from state i to j.
s: probability of following a transition. 1-s probability of teleporting
to another state.
maxerr: if the sum of pageranks between iterations is bellow this we will
have converged.
"""
n = G.shape[0]
# transform G into markov matrix A
A = csc_matrix(G,dtype=np.float)
rsums = np.array(A.sum(1))[:,0]
ri, ci = A.nonzero()
A.data /= rsums[ri]
# bool array of sink states
sink = rsums==0
# Compute pagerank r until we converge
ro, r = np.zeros(n), np.ones(n)
while np.sum(np.abs(r-ro)) > maxerr:
ro = r.copy()
# calculate each pagerank at a time
for i in range(0,n):
# inlinks of state i
Ai = np.array(A[:,i].todense())[:,0]
# account for sink states
Di = sink / float(n)
# account for teleportation to state i
Ei = np.ones(n) / float(n)
r[i] = ro.dot( Ai*s + Di*s + Ei*(1-s) )
# return normalized pagerank
return r/float(sum(r))
# + id="Ds-wQEFFZ1F7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="b2860902-8712-4583-ab47-bec602c6791b"
# Example extracted from 'Introduction to Information Retrieval'
G = np.array([[0,0,1,0,0,0,0],
[0,1,1,0,0,0,0],
[1,0,1,1,0,0,0],
[0,0,0,1,1,0,0],
[0,0,0,0,0,0,1],
[0,0,0,0,0,1,1],
[0,0,0,1,1,0,1]])
print(pageRank(G,s=.86))
| 第21章 PageRank算法/PageRank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# -
# First let's make sure that we have access to a subset of image files from the PASCAL VOC dataset:
# +
import os.path as op
from zipfile import ZipFile
if not op.exists("images_resize"):
print('Extracting image files...')
zf = ZipFile('images_pascalVOC.zip')
zf.extractall('.')
# -
# # Using a pretrained model
#
# Objectives:
#
# - Load a pre-trained ResNet50 pre-trained model using Keras Zoo
# - Build a headless model and compute representations of images
# - Explore the quality of representation with t-SNE
# - Retrain last layer on cat vs dog dataset
# +
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.preprocessing import image
model = ResNet50(include_top=True, weights='imagenet')
# -
print(model.summary())
# ### Classification of an image
#
# **Exercise**
# - Open an image, preprocess it and build a batch of 1 image
# - Use the model to classify this image
# - Decode the predictions using `decode_predictions` from Keras
#
# Notes:
# - Test your code with `"images_resize/000007.jpg"`
# - You may need `preprocess_input` for preprocessing the image.
# - The Keras resnet expects floating point images of size `(224, 224)` with a dynamic in `[0, 255]` before preprocessing. [skimage's resize](http://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.resize) has a `preserve_range` flag that you might find useful.
# +
from skimage.io import imread
from skimage.transform import resize
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.imagenet_utils import decode_predictions
path = "images_resize/000007.jpg"
# TODO
# +
# # %load solutions/predict_image.py
# -
# ### Computing the representation of an image
#
# Let's build a new model that maps the image input space to the output of the layer before the last layer of the pretrained Resnet 50 model. We call this new model the "base model":
input = model.layers[0].input
output = model.layers[-2].output
base_model = Model(input, output)
base_model.output_shape
# The base model can transform any image into a flat, high dimensional, semantic feature vector:
representation = base_model.predict(img_batch)
print("Shape of representation:", representation.shape)
print("Proportion of zeros in the feature vector: %0.3f"
% np.mean(representation[0] == 0))
# Computing representations of all images can be time consuming.
# This is usually made by large batches on a GPU for massive performance gains.
#
# For the remaining part, we will use pre-computed representations saved in h5 format.
#
# For those interested, this is done using the `process_images.py` script
import os
paths = ["images_resize/" + path
for path in sorted(os.listdir("images_resize/"))]
# +
import h5py
with h5py.File('img_emb.h5', 'r') as h5f:
out_tensors = h5f['img_emb'][:]
out_tensors.shape
# -
out_tensors.dtype
# **Exercise**
# - What is the proportion of 0 values in this representation?
# - Can you find any negative values?
# - Why are there so many zero values?
# - Are the zero always located in the same dimensions for different input images?
# +
# # %load solutions/representations.py
# -
# Let's find a 2D representation of that high dimensional feature space using T-SNE:
# +
from sklearn.manifold import TSNE
img_emb_tsne = TSNE(perplexity=30).fit_transform(out_tensors)
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
plt.scatter(img_emb_tsne[:, 0], img_emb_tsne[:, 1]);
plt.xticks(()); plt.yticks(());
plt.show()
# -
# Let's add thumnails of the original images at their TSNE locations:
# +
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from skimage.io import imread
from skimage.transform import resize
def imscatter(x, y, paths, ax=None, zoom=1, linewidth=0):
if ax is None:
ax = plt.gca()
x, y = np.atleast_1d(x, y)
artists = []
for x0, y0, p in zip(x, y, paths):
try:
im = imread(p)
except:
print(p)
continue
im = resize(im, (224, 224), preserve_range=False, mode='reflect')
im = OffsetImage(im, zoom=zoom)
ab = AnnotationBbox(im, (x0, y0), xycoords='data',
frameon=True, pad=0.1,
bboxprops=dict(edgecolor='red',
linewidth=linewidth))
artists.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([x, y]))
ax.autoscale()
return artists
# -
fig, ax = plt.subplots(figsize=(50, 50))
imscatter(img_emb_tsne[:, 0], img_emb_tsne[:, 1], paths, zoom=0.5, ax=ax)
plt.savefig('tsne.png')
# ### Visual Search: finding similar images
def display(img):
plt.figure()
img = imread(img)
plt.imshow(img)
# +
idx = 57
def most_similar(idx, top_n=5):
dists = np.linalg.norm(out_tensors - out_tensors[idx], axis = 1)
sorted_dists = np.argsort(dists)
return sorted_dists[:top_n]
sim = most_similar(idx)
[display(paths[s]) for s in sim];
# -
# # Classification from Nearest Neighbors?
#
# Using these representations, it may be possible to build a nearest neighbor classifier. However, the representations are learnt on ImageNet, which are centered images, when we input images from PascalVOC, more plausible inputs of a real world system.
#
# The next section explores this possibility by computing the histogram of similarities between one image and the others.
out_norms = np.linalg.norm(out_tensors, axis=1, keepdims=True)
normed_out_tensors = out_tensors / out_norms
item_idx = 208
dists_to_item = np.linalg.norm(out_tensors - out_tensors[item_idx],
axis=1)
cos_to_item = np.dot(normed_out_tensors, normed_out_tensors[item_idx])
plt.hist(cos_to_item, bins=30)
display(paths[item_idx])
# Unfortunately there is no clear separation of class boundaries visible in the histogram of similarities alone. We need some supervision to be able to classify images.
#
# With a labeled dataset, even with very little labels per class, one would be able to:
# - build a k-Nearest Neighbor model,
# - build a classification model such as a SVM.
#
# These approximate classifiers are useful in practice.
# See the `cat vs dog` home assignment with GPU for another example of this approach.
items = np.where(cos_to_item > 0.5)
print(items)
[display(paths[s]) for s in items[0]];
| labs/04_conv_nets/02_Pretrained_ConvNets_with_Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
from numpy.random import default_rng
rng = default_rng()
def make_blob(loc:Tuple[float], scale:float, size:float, distortion:Tuple[float]=()) -> np.array:
"""Make individual, randomly distributed blobs
A better controlled output of blobs than the sklearn function
Args:
loc (Tuple of floats): The location of the blob in space
scale(float): std of the normal distribution of points
size: (float): Length of the array returned
distortion (Tuple of floats): Optional, Scaling factors of distribution, must be same size as loc.
Returns:
np.array: Output of size: size x len(loc)
"""
X = rng.normal(loc=0,scale=scale, size=(size,len(loc)))
if distortion:
for i, v in enumerate(distortion):
X[:,i] *= v
for i, v in enumerate(loc):
X[:,i] += v
return X
X = make_blob(loc=(10,0), scale=0.3, size=1000, distortion=(2,1))
plt.scatter(X[:,0],X[:,1], alpha=0.3, c='k')
plt.scatter(X[:,0],X[:,1], alpha=0.3, c='b')
| blob_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3.6
# ---
# <h1>Data Preparation</h1>
#
# <div class="alert alert-info"><p>Throughout this notebook, actions that you are supposed to take are written in a blue box like this.</p></div>
#
# <h3>Breast Cancer Data</h3>
#
# <p>For this notebook and some of the later exercises we will use RNA-Seq data from [The Cancer Genome Atlas (TCGA)](https://cancergenome.nih.gov/). We have selected 20 breast cancer primary tumors (BRCA) with their matched normal samples. We are starting off with the read counts files for each tumor and normal sample (40 in total) downloaded from the <a href="https://cancergenome.nih.gov/" target="_blank">TCGA data portal</a> and placed on a web server that permits unrestricted access. We also have a <a href="http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#sample-information-file" target="_blank">sample information file</a> that we created in a spreadsheet that has the filenames, sample names and an indication if it is a tumor or a matched normal sample.</p>
#
# **While the example we are using is for a specfic selection of breast cancer samples, you can use the steps in this notebook to prepare any TCGA-derived data for use in GenePattern analyses.**
#
# <h2>Scientific summary</h2>
#
# <p>For our data preparation phase, we will perform the following steps:</p>
#
# <h3>1. Compile multiple read count files into a matrix and a file describing the phenotypes</h3>
#
# <p>We will provide the read counts files (one per sample) and a sample info file to the MergeHTSeqCounts module. It will generate a <a href="http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#GCT" target="_blank">GCT</a> file with each sample as one column and each Ensembl gene id as a row. The module will use the 'filename' column of the <a href="http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#sample-information-file" target="_blank">sample information file</a> to identify which row corresponds to which read counts filename. It uses the **samplename** column to replace the read counts filename with a more informative sample name in the output files. It uses the second column (**primary tumor/normal**) to generate a <a href="http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#CLS" target="_blank">CLS file</a> assigning a phenotype to each sample.</p>
#
# <h3>2. Remove version suffix from Ensembl gene ids</h3>
#
# <p>The Ensembl gene ids in the read counts files include a version suffix (e.g. ENSG00000000419.11) but the module we will use in Step 3 (CollapseDataset) does not accept Ensembl ids that include the version We will load the GCT file generated in step 1 into this notebook, strip the versions from the ids (using Python), and then save it back to the GenePattern server to be used in the next step.</p>
#
# <h3>3. Replace Ensembl gene ids with HUGO symbols and remove duplicates</h3>
#
# <p>To make the dataset more human-friendly for analysis we will replace the Ensembl gene ids with HUGO symbols. Since more than one Ensembl gene id can map to a single symbol, we need to collapse any rows with duplicate symbols to a single row. The CollapseDataset module does both the remapping and collapsing for us.</p>
#
# <h3>4. Normalize for downstream analysis</h3>
# <p>Preprocess RNA-Seq count data in a GCT file so that it is suitable for use in GenePattern analyses.</p>
#
# <h3>Login</h3>
#
# <div class="alert alert-info"><ul><li>If you are not logged in, enter your username and password in the cell below and click **Login**.</ul></li></div>
#
# <p>The logins to the notebook server and the GenePattern server are separate to allow you to run analyses hosted on different GenePattern servers in the same notebook.</p>
#
# + genepattern={"server": "https://gp-beta-ami.genepattern.org/gp", "type": "auth"}
# Requires GenePattern Notebook: pip install genepattern-notebook
import gp
import genepattern
# Username and password removed for security reasons.
genepattern.display(genepattern.session.register("https://gp-beta-ami.genepattern.org/gp", "", ""))
# -
#
# <h3>Begin the analysis</h3>
#
# <p>We will use the cell below (MergeHTSeqCounts). This module will take the 40 read count files and the sample info file. We also tell it which columns of the sample info file have the filename, the sample name we want to use going forward, and a class distinction to use to generate a companion <b>cls</b> file.
#
# <p><i>We have prefilled the sample info file and the 40 input file URLs into the next cell to save time.</i></p>
#
# <h3>Step 1. Compile multiple read count files into a matrix and a file describing the phenotypes</h3>
# <p><div class="alert alert-info"><ul><li>Click **Run** in the cell below to generate the compiled read count (<strong>gct</strong>) and phenotype (<strong>cls</strong>) files.</ul></li></div></p>
#
# <p>Once you hit run, a new GenePattern output cell will appear. You can watch the job's status change in its top right corner. Once it is complete it will show you links to the output files in the job status cell.</p>
# + genepattern={"show_code": true, "type": "task"}
mergehtseqcounts_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00354')
mergehtseqcounts_job_spec = mergehtseqcounts_task.make_job_spec()
mergehtseqcounts_job_spec.set_parameter("input.files", ["https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0CE-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0CE-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0CH-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0CH-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0D9-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0D9-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0DB-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A0DB-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13E-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13E-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13F-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13F-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13G-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-A7-A13G-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A23H-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A23H-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FB-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FB-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FF-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FF-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FM-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-AC-A2FM-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AU-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AU-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AY-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AY-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AZ-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0AZ-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B3-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B3-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B5-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B5-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B7-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B7-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B8-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0B8-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0BA-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0BA-11.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0BC-01.htseq.counts", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA-BH-A0BC-11.htseq.counts"])
mergehtseqcounts_job_spec.set_parameter("output.prefix", "BRCA_with_versioned_ensemble_ids")
mergehtseqcounts_job_spec.set_parameter("sampleinfo.file", "https://datasets.genepattern.org/data/TCGA_BRCA/BRCA_HTSeqCounts/TCGA_BRCA_SAMPLEINFO.txt")
mergehtseqcounts_job_spec.set_parameter("filenames.column", "filename")
mergehtseqcounts_job_spec.set_parameter("class.division.column", "2")
mergehtseqcounts_job_spec.set_parameter("sample.name.column", "samplename")
genepattern.display(mergehtseqcounts_task)
# -
# <h3>Examine your output files</h3>
#
# <p><div class="alert alert-info"><ul><li>Click on the <strong>cls</strong> and <strong>gct</strong> files you just generated and select the `Open in new tab` option to view them in your browser.</li></ul></div></p>
# <p>Alternatively, for the <strong>gct</strong> file you can click on it and select `Send to data frame` to look at the resulting gct file within this Jupyter notebook.</p>
#
# <p> </p>
#
# <h3>Step 2. Remove version suffix from Ensembl gene ids</h3>
# <p><div class="alert alert-info">
# <ol>
# <li>Click on the `MergeHTSeqCounts gct file` input in the cell below and select <b>BRCA_with_versioned_ensemble_ids.gct</b> as its input. Leave the output variable unchanged.</li>
# <li>Run the cell below.</li>
# </ol>
# </div></p>
#
# + genepattern={"output_variable": "BRCA_with_versioned_ensemble_ids_stripped", "param_values": {"MergeHTSeqCounts_gct_file": "", "output_var": ""}, "show_code": false, "type": "uibuilder"}
import os
from gp.data import GCT
global my_local_url
@genepattern.build_ui( name="Strip Ensembl Version and write a new GCT",
description="Strip out the version from the Ensembl ids in a gct file and save it as "
+" a new gct file on the GenePattern server. Returns the URL to be used in the next job "
+"and also writes it to a notebook variable called \"my_local_url\". The method requires the gct "
+" file from a completed MergeHTSeqCounts job.", parameters={
"MergeHTSeqCounts_gct_file": {
"type": "file",
"kinds": ["gct"]
}
})
def stripEnsembleIdAndGetLocalUrl(MergeHTSeqCounts_gct_file):
output_gct_filename = "BRCA_unversioned_ensembl_ids.gct"
# get the input filename and job number
jobNum = MergeHTSeqCounts_gct_file.split("/")[-2]
input_gct_file_Name = MergeHTSeqCounts_gct_file.split("/")[-1]
# get the GenePattern input job object and my username
lastJob = gp.GPJob(genepattern.get_session(0), jobNum)
myUserId = genepattern.get_session(0).username
# this is the part that actually removes the version id
input_gct = GCT(lastJob.get_file(input_gct_file_Name))
df2 = input_gct.dataframe.reset_index()
df2['Name'] = df2['Name'].apply(lambda x: x.split(".")[0])
# reset the index on name and Description in case we want to look at this dataframe later
#df2.set_index(['Name', 'Description'])
# now save it back as a new file local to the Notebook server
with open(output_gct_filename, 'w') as f:
f.writelines('#1.2\n{}\t{}\n'.format(df2.shape[0], df2.shape[1] - 2))
df2.to_csv(f, sep='\t', index= False)
# upload the local file onto the GenePattern server so we can use it in the next module
uploaded = genepattern.get_session(0).upload_file(output_gct_filename, output_gct_filename)
my_local_url = uploaded.get_url()
print("Stripped GCT file url is: "+ my_local_url)
return my_local_url
# -
# <h3>Step 3. Replace Ensembl gene ids with HUGO symbols and remove duplicates</h3>
#
# <p>When you looked into the gct file that was output, you may have noticed that it uses Ensembl IDs for the rows (counts). We would like to change this to HUGO symbols to make this more human-friendly. When we do this, we will end up with multiple Ensembl transcripts that all map to a single HUGO symbol. However the analysis we will do later does not like to see duplicate rows, so we will want to collapse instances of multiple transcripts down to a single row.</p>
#
# <p>To do this we will use the CollapseDataset module which can collapse the rows and replace the Ensembl IDs with HUGO symbols in one step.</p>
# <p><div class="alert alert-info">
# <ol>
# <li>Drag the "Stripped GCT file" URL from above, to <em>dataset file</em> parameter in <b>CollapseDataset</b> below</li></ol>
# </div></p><br/>
# + genepattern={"show_code": true, "type": "task"}
collapsedataset_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00134')
collapsedataset_job_spec = collapsedataset_task.make_job_spec()
collapsedataset_job_spec.set_parameter("dataset.file", "")
collapsedataset_job_spec.set_parameter("chip.platform", "ftp://ftp.broadinstitute.org/pub/gsea/annotations/ENSEMBL_human_gene.chip")
collapsedataset_job_spec.set_parameter("collapse.mode", "Maximum")
collapsedataset_job_spec.set_parameter("output.file.name", "<dataset.file_basename>.collapsed")
genepattern.display(collapsedataset_task)
# -
# ### Step 4. Normalize for downstream analysis
#
# Preprocess RNA-Seq count data in a GCT file so that it is suitable for use in GenePattern analyses.
#
# * The **PreprocessReadCounts** module is used to preprocess RNA-Seq data into a form suitable for use downstream in other GenePattern analyses such as Gene Set Enrichment Analysis (GSEA), ComparativeMarkerSelection, HierarchicalClustering, as well as visualizers.
# * Many of these approaches assume that the data is distributed normally, yet this is not true of RNA-seq read count data. The PreprocessReadCounts module provides one approach to accommodate this. It uses a mean-variance modeling technique to transform the dataset to fit an approximation of a normal distribution, with the goal of being able to apply statistical methods and workflows that assume a normal distribution.
# * Learn more by reading about the [PreprocessReadCounts](http://software.broadinstitute.org/cancer/software/genepattern/modules/docs/PreprocessReadCounts/1) module.
# <div class="alert alert-info">
#
# 1. Click on the <b>input file*</b> parameter below and select the `BRCA_unversioned_ensembl_ids.collapsed.gct` result file from the analysis you just did.
# 2. Click on the <b>cls file*</b> parameter below and select the <a href="https://raw.githubusercontent.com/genepattern/example-notebooks/master/2017-11-07_CCMI_workshop/BRCA_40_samples.cls">BRCA_with_versioned_ensemble_ids.cls</a> from the MergeHTSeqCounts analysis you performed at the beginning of this notebook.
# 4. Click on the + at the right edge of the *Advanced Parameters* header to display additional parameters.
# 3. Set the <strong><em>expression.value.filter.threshold</em></strong> parameter to 4 (so as to reduce the number of rows, and thus the time for computation)
# 4. Copy and paste the name `BRCA_HUGO_symbols.preprocessed.gct` into the **output file** parameter.
# 5. Click the button <strong><em>Run</em></strong> on the analysis below.</li>
#
# + genepattern={"show_code": true, "type": "task"}
preprocessreadcounts_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00355')
preprocessreadcounts_job_spec = preprocessreadcounts_task.make_job_spec()
preprocessreadcounts_job_spec.set_parameter("input.file", "")
preprocessreadcounts_job_spec.set_parameter("cls.file", "")
preprocessreadcounts_job_spec.set_parameter("output.file", "")
preprocessreadcounts_job_spec.set_parameter("expression.value.filter.threshold", "1")
genepattern.display(preprocessreadcounts_task)
# -
# <h3>Review the newly generated file</h3>
# <div class="alert alert-info"><ol>
# <li>Click on the output file called `BRCA_HUGO_symbols.preprocessed.gct`. </li>
# <li>Select `Send to existing GenePattern Cell`. </li>
# <li>Select `HeatMapViewer` (below).</li>
# <li>Run the HeatMapViewer cell.</li>
# </ol></div>
#
#
# + genepattern={"show_code": true, "type": "task"}
heatmapviewer_task = gp.GPTask(genepattern.get_session(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.visualizer:00010')
heatmapviewer_job_spec = heatmapviewer_task.make_job_spec()
heatmapviewer_job_spec.set_parameter("dataset", "")
genepattern.display(heatmapviewer_task)
# -
# <h3>Extra Credit</h3>
#
# <p>In the example above, we provided the cell with the MergeHTSeqCounts module pre-selected and with all of the files pre-entered as input. If you would like to go through the steps to set this up for yourself, here is what you can do:</p>
#
# <ol>
# <li>Create a cell and change its type to "GenePattern".</li>
# <li>When the select module dialog opens, search to find the MergeHTSeqCounts module and select it.</li>
# <li>Open the datasets page from the link below in a seperate window.</li>
# <li>Drag the file, <b>TCGA_BRCA_SAMPLEINFO.txt</b>, from the datasets page into the <strong>sampleinfo file</strong> parameter.</li>
# <li>Drag all of the the htseq.counts files from datasets page (see below) to the <strong>input files</strong> parameter. <strong>You can select and drag all the htseq.counts files as a single block rather than one at a time.</strong></li>
# <li>Set the appropriate values for the columns. Click on the link for the sample info file to see its format. In this file, the columns are <strong>filename</strong>, <strong>samplename</strong> and <strong>primary tumor/normal</strong>. You can use either these names or their column index to identify the columns to the module.</li>
# <li>Run the module and confirm that the results match the example above.</li>
# <li>Repeat the rest of the steps from the example above.</li>
# </ol>
#
# <p> </p>
#
# <h3>Input Files:</h3>
#
# <p>You can drag and drop the input files from the page at the link below. Do not worry about selecting just the links as the GenePattern file drop parameter will do the right thing if you grab the whole page. Make sure you do differentiate between the <strong>"*.htseq.counts"</strong><em> </em>files and the sample info file<em>, </em><strong>TCGA_BRCA_SAMPLEINFO.txt</strong></p>
# <p><a href="https://datasets.genepattern.org/index.html?prefix=data/TCGA_BRCA/BRCA_HTSeqCounts/" target="_blank">https://datasets.genepattern.org/index.html?prefix=data/TCGA_BRCA/BRCA_HTSeqCounts</a></p>
#
| 2018-02-05_and_06_12_BroadE_workshop/notebooks/2018-06-12/2018-06-12_03_BroadE_DataPrep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# importing the required libraries
library(keras)
library(grid)
library(abind)
# loading the data
data <- dataset_mnist()
# loading the training and test partitions of input data
x_train <- data$train$x
x_test <- data$test$x
# normalizing the data with the range of 0 and 1
x_train_norm <- x_train/255
x_test_norm <- x_test/255
x_train_norm <- array_reshape(x_train_norm,dim = c(60000,28,28,1))
x_test_norm <- array_reshape(x_test_norm,dim = c(10000,28,28,1))
# checking dimensions of normalized data
dim(x_train_norm)
dim(x_test_norm)
# visualizing sample image
options(repr.plot.width = 0.5,repr.plot.height = 1)
library(grid)
grid.raster(x_train_norm[2,,,])
# adding a normal random noise centered at 0.5 and std=0.5 to the input data
noise_train <- array(data = rnorm(seq(0, 1, by = 0.02),mean = 0.5,sd = 0.5) ,dim = c(n_train,28,28,1))
dim(noise_train)
noise_test <- array(data = rnorm(seq(0, 1, by = 0.02),mean = 0.5,sd = 0.5) ,dim = c(n_test,28,28,1))
dim(noise_test)
x_train_norm_noise <- x_train_norm + noise_train
x_test_norm_noise <- x_test_norm + noise_test
# clipping the corrupted input data(input data + noise) for maintaining the pixel values in the range of 0 and 1
x_train_norm_noise[x_train_norm_noise < 0] <- 0
x_train_norm_noise[x_train_norm_noise > 1] <- 1
x_test_norm_noise[x_test_norm_noise < 0] <- 0
x_test_norm_noise[x_test_norm_noise > 1] <- 1
# visualizing a sample image
grid.raster(x_train_norm_noise[2,,,])
# +
# creating the encoder part
# input layer
inputs <- layer_input(shape = c(28, 28, 1))
x = inputs
# -
# configuring the layers for the encoder model
x <- x %>%
layer_conv_2d(filter = 32, kernel_size = 3,padding = "same", input_shape = c(28, 28, 1)) %>%
layer_activation("relu") %>%
layer_conv_2d(filter = 64, kernel_size = 3) %>%
layer_activation("relu")
# extracting the shape of the output tensor from the encoder network
shape = k_int_shape(x)
shape
# adding a flattened layer and a dense layer at the end of the encoder model
x = x %>% layer_flatten()
latent = x %>% layer_dense(16,name = "latent")
# instantiating the encoder model
encoder = keras_model(inputs, latent)
# summary of the encoder model
summary(encoder)
# +
# decoder part of the network; output of encoder is fed as input for the decoder part
latent_inputs = layer_input(shape=16, name='decoder_input')
x = latent_inputs %>% layer_dense(shape[[2]] * shape[[3]] * shape[[4]]) %>%
layer_reshape(c(shape[[2]],shape[[3]], shape[[4]]))
# +
# configuring the layers of the decoder part
x <- x %>%
layer_conv_2d_transpose(
filter = 64, kernel_size = 3, padding = "same",
input_shape = c(28, 28, 1)
) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d_transpose(filter = 32, kernel_size =3) %>%
layer_activation("relu")
x = x %>% layer_conv_2d_transpose(filters=1,
kernel_size= 3,
padding='same')
outputs = x %>% layer_activation('sigmoid', name='decoder_output')
# -
# instantiating the decoder model and see its summary
decoder = keras_model(latent_inputs, outputs)
summary(decoder)
# +
# building the autoencoder model
autoencoder = keras_model(inputs, decoder(encoder(inputs)))
summary(autoencoder)
# -
# compiling the autoencoder model
autoencoder %>% compile(loss = 'mse',optimizer = 'adam')
# training the autoencoder model
autoencoder %>% fit(x_train_norm_noise,
x_train_norm,
validation_data=list(x_test_norm_noise, x_test_norm),
epochs=30,batch_size= 128
)
# saving model
save_model_hdf5(autoencoder,"autoencoder_denoising_r_1.h5")
# generating predictions for the test data
prediction <- autoencoder %>% predict(x_test_norm_noise)
# visualizing the reconstructed inputs after the predictions
options(repr.plot.width=10, repr.plot.height=1)
# +
grid1 = x_test_norm_noise[20,,,]
for(i in seq(40,50)){
grid1 = abind(grid1,x_test_norm_noise[i,,,],along = 2)
}
grid.raster(grid1,interpolate=FALSE)
# +
grid2 = prediction[2,,,]
for(i in seq(40,50)){
grid2 = abind(grid2,prediction[i,,,],along = 2)
}
grid.raster(grid2,interpolate=FALSE)
| Chapter04/Denoising autoencoder mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib
import numpy
from skimage import color, io
from skimage.transform import resize
from skimage.io import imsave
def loadImage(path):
original_img = io.imread(path)[:,:,:3]
L = color.rgb2lab(original_img)[:,:,0]
img = resize(original_img, (256, 256), True)
img = img.reshape((img.shape[0], img.shape[1], 3))
img = img.astype(numpy.float32)
img = color.rgb2lab(img)[:,:,0]
return img.reshape((img.shape[0], img.shape[1], 1)), L, original_img
def loadGraph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="prefix")
return graph
# +
matplotlib.rcParams['figure.figsize'] = (20.0, 10.0)
img, L, original_image = loadImage('../data/img_0.jpg')
graph = loadGraph("model/frozen_model.pb")
x = graph.get_tensor_by_name('prefix/batch:0')
y = graph.get_tensor_by_name('prefix/colorized_image:0')
filler = numpy.zeros((256, 256, 1))
with tf.Session(graph=graph) as sess:
y_out = sess.run(y, feed_dict={
x: [img],
})
rgb = color.lab2rgb(y_out[0].astype(numpy.float64))
rgb = resize(rgb, (original_image.shape[0], original_image.shape[1]), True)
lab = color.rgb2lab(rgb)
final = numpy.zeros((original_image.shape[0], original_image.shape[1], 3))
final[:,:,0] = L
final[:,:,1] = lab[:,:,1]
final[:,:,2] = lab[:,:,2]
final = color.lab2rgb(final)
plt.subplot(131)
plt.title('Input', fontsize=22)
plt.imshow(L, cmap='gray')
plt.axis('off')
plt.subplot(132)
plt.title('Prediction', fontsize=22)
plt.imshow(rgb)
plt.axis('off')
plt.subplot(133)
plt.title('Actual', fontsize=22)
plt.imshow(original_image)
plt.axis('off')
fig = plt.gcf()
plt.show()
fig.savefig('results.png')
# -
| notebooks/color_one_pic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
sess_config = tf.ConfigProto()
import sys
import os
import glob
COCO_DATA = '../data/coco'
MASK_RCNN_MODEL_PATH = '../lib/Mask_RCNN/'
SIAMESE_MASK_RCNN_PATH = '../'
if MASK_RCNN_MODEL_PATH not in sys.path:
sys.path.append(MASK_RCNN_MODEL_PATH)
if SIAMESE_MASK_RCNN_PATH not in sys.path:
sys.path.append(SIAMESE_MASK_RCNN_PATH)
from samples.coco import coco
from mrcnn import utils
from mrcnn import model as modellib
from mrcnn import visualize
from lib import utils as siamese_utils
from lib import model as siamese_model
from lib import config as siamese_config
from collections import OrderedDict
import time
import datetime
import random
import numpy as np
import skimage.io
import imgaug
import pickle
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# -
index = 4
# +
class TrainConfig(siamese_config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 4
IMAGES_PER_GPU = 3
NUM_CLASSES = 1 + 1
NAME = 'parallel_coco'
EXPERIMENT = 'i{}'.format(index)
CHECKPOINT_DIR = '../checkpoints/'
# Reduced image sizes
TARGET_MAX_DIM = 192
TARGET_MIN_DIM = 150
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Reduce model size
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
FPN_FEATUREMAPS = 256
# Reduce number of rois at all stages
RPN_ANCHOR_STRIDE = 1
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
TRAIN_ROIS_PER_IMAGE = 200
DETECTION_MAX_INSTANCES = 100
MAX_GT_INSTANCES = 100
# Adapt NMS Threshold
DETECTION_NMS_THRESHOLD = 0.5
# Adapt loss weights
LOSS_WEIGHTS = {'rpn_class_loss': 2.0,
'rpn_bbox_loss': 0.1,
'mrcnn_class_loss': 2.0,
'mrcnn_bbox_loss': 0.5,
'mrcnn_mask_loss': 1.0}
config = TrainConfig()
config.display()
# -
exp_dir = os.path.join(ROOT_DIR, "{}_{}".format(config.NAME.lower(), config.EXPERIMENT.lower()))
exclude_classes = [4*i+index for i in range(20)]
train_classes = np.array(range(1,81))[np.array([i not in exclude_classes for i in range(1,81)])]
# +
# Load COCO/train dataset
coco_train = siamese_utils.IndexedCocoDataset()
coco_train.load_coco(COCO_DATA, subset="train", subsubset="train", year="2017")
coco_train.prepare()
coco_train.build_indices()
coco_train.ACTIVE_CLASSES = train_classes
# Load COCO/val dataset
coco_val = siamese_utils.IndexedCocoDataset()
coco_val.load_coco(COCO_DATA, subset="train", subsubset="val", year="2017")
coco_val.prepare()
coco_val.build_indices()
coco_val.ACTIVE_CLASSES = train_classes
# -
# ### Train
# Create model object in inference mode.
model = siamese_model.SiameseMaskRCNN(mode="training", model_dir=MODEL_DIR, config=config)
train_schedule = OrderedDict()
train_schedule[1] = {"learning_rate": config.LEARNING_RATE, "layers": "heads"}
train_schedule[120] = {"learning_rate": config.LEARNING_RATE, "layers": "all"}
train_schedule[160] = {"learning_rate": config.LEARNING_RATE/10, "layers": "all"}
# Load weights trained on Imagenet
try:
model.load_latest_checkpoint(training_schedule=train_schedule)
except:
model.load_imagenet_weights(pretraining='imagenet-687')
for epochs, parameters in train_schedule.items():
print("")
print("training layers {} until epoch {} with learning_rate {}".format(parameters["layers"],
epochs,
parameters["learning_rate"]))
model.train(coco_train, coco_val,
learning_rate=parameters["learning_rate"],
epochs=epochs,
layers=parameters["layers"])
| experiments/train_parallel_coco_i4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 1. Set up the notebook
# Import modules.
import numpy as np
import sympy as sym
import json
import matplotlib.pyplot as plt
from scipy import linalg
from scipy.interpolate import interp1d
# Define a function to load data from hardware (and resample it at 100 Hz using linear interpolation). If `only_in_flight=True`, then only data for which the desired position was positive (i.e., "trying to fly" rather than "sitting on the ground" data) will be returned.
def load_hardware_data(filename, t_min_offset=0, t_max_offset=0, only_in_flight=False):
# load raw data
with open(filename, 'r') as f:
data = json.load(f)
# convert lists to numpy arrays
for val in data.values():
for key in val.keys():
val[key] = np.array(val[key])
# create an array of times at which to subsample
t_min = -np.inf
t_max = np.inf
for key, val in data.items():
t_min = max(t_min, val['time'][0])
t_max = min(t_max, val['time'][-1])
t_min += t_min_offset * 1000
t_max -= t_max_offset * 1000
nt = int(1 + np.floor((t_max - t_min) / 10.))
t = np.arange(0, 10 * nt, 10) / 1000.
resampled_data = {'time': t}
# resample raw data with linear interpolation
for k, v in data.items():
f = interp1d((v['time'] - t_min) / 1000., v['data'])
resampled_data[k] = f(t)
# truncate to times when o_z_des is positive
if only_in_flight:
if 'ae483log.o_z_des' not in resampled_data.keys():
raise Exception('"ae483log.o_z_des" must be logged')
i = np.argwhere(resampled_data['ae483log.o_z_des'] > 0).flatten()
if len(i) == 0:
raise Exception('o_z_des was never positive')
if len(i) < 2:
raise Exception('o_z_des was only positive for one time step')
for key in resampled_data.keys():
resampled_data[key] = resampled_data[key][i[0]:i[-1]]
# return the resampled data
return resampled_data
# # 2. Derive models
# ## 2.1 Define symbolic variables
# Define states.
# +
# components of position (meters)
o_x, o_y, o_z = sym.symbols('o_x, o_y, o_z')
# yaw, pitch, and roll angles (radians)
psi, theta, phi = sym.symbols('psi, theta, phi')
# components of linear velocity (meters / second)
v_x, v_y, v_z = sym.symbols('v_x, v_y, v_z')
# -
# Define inputs.
# +
# gyroscope measurements - components of angular velocity (radians / second)
w_x, w_y, w_z = sym.symbols('w_x, w_y, w_z')
# z-axis accelerometer measurement - specific force (meters / second^2)
a_z = sym.symbols('a_z')
# -
# Define outputs.
n_x, n_y, r = sym.symbols('n_x, n_y, r')
# Define parameters.
g, k_flow = sym.symbols('g, k_flow')
# Create linear and angular velocity vectors (in coordinates of the body frame).
v_01in1 = sym.Matrix([[v_x], [v_y], [v_z]])
w_01in1 = sym.Matrix([[w_x], [w_y], [w_z]])
# ## 2.2 Define kinematics of orientation
# ### 2.2.1 Rotation matrix in terms of yaw, pitch, roll angles
# Define individual rotation matrices.
# +
Rz = sym.Matrix([[sym.cos(psi), -sym.sin(psi), 0],
[sym.sin(psi), sym.cos(psi), 0],
[0, 0, 1]])
Ry = sym.Matrix([[sym.cos(theta), 0, sym.sin(theta)],
[0, 1, 0],
[-sym.sin(theta), 0, sym.cos(theta)]])
Rx = sym.Matrix([[1, 0, 0],
[0, sym.cos(phi), -sym.sin(phi)],
[0, sym.sin(phi), sym.cos(phi)]])
# -
# Apply sequential transformation to compute the rotation matrix that describes the orientation of the drone (i.e., of frame 1 in the coordinates of frame 0).
R_1in0 = Rz * Ry * Rx
# ### 2.2.2 Map from angular velocity to angular rates
# Recall that
#
# $$\begin{bmatrix} \dot{\psi} \\ \dot{\theta} \\ \dot{\phi} \end{bmatrix} = N w_{0, 1}^{1}$$
#
# for some matrix $N$. Here is how to compute that matrix for a ZYX (yaw, pitch, roll) Euler angle sequence. First, we compute its inverse:
Ninv = sym.Matrix.hstack((Ry * Rx).T * sym.Matrix([[0], [0], [1]]),
(Rx).T * sym.Matrix([[0], [1], [0]]),
sym.Matrix([[1], [0], [0]]))
# Then, we compute $N$ by taking the inverse of $N^{-1}$:
N = sym.simplify(Ninv.inv())
# ## 2.3 Derive equations of motion
# Ratio of net thrust to mass in terms of z-axis accelerometer measurement.
f_z_over_m = a_z + (w_01in1.cross(v_01in1))[2]
# Ratio of forces to mass.
f_in1_over_m = R_1in0.T * sym.Matrix([[0], [0], [-g]]) + sym.Matrix([[0], [0], [f_z_over_m]])
# Equations of motion.
f = sym.Matrix.vstack(
R_1in0 * v_01in1,
N * w_01in1,
(f_in1_over_m - w_01in1.cross(v_01in1)),
)
# Show equations of motion, which have the form
#
# $$\dot{s} = f(s, i, p)$$
#
# where
#
# $$
# s = \begin{bmatrix} o_x \\ o_y \\ o_z \\ \psi \\ \theta \\ \phi \\ v_x \\ v_y \\ v_z \end{bmatrix}
# \qquad\qquad
# i = \begin{bmatrix} w_x \\ w_y \\ w_z \\ a_z \end{bmatrix}
# \qquad\qquad
# p = \begin{bmatrix} g \\ k_\text{flow} \end{bmatrix}.
# $$
f
# ## 2.4 Derive measurement equations
# Create measurement equations.
h = sym.Matrix([
k_flow * (v_x - o_z * w_y) / o_z, # <-- x flow (n_x)
k_flow * (v_y + o_z * w_x) / o_z, # <-- y flow (n_y)
o_z / (sym.cos(phi) * sym.cos(theta)), # <-- z range (r)
])
# Show measurement equations, which have the form
#
# $$o = h(s, i, p)$$
#
# where
#
# $$
# o = \begin{bmatrix} n_x \\ n_y \\ r \end{bmatrix}
# \qquad\qquad
# s = \begin{bmatrix} o_x \\ o_y \\ o_z \\ \psi \\ \theta \\ \phi \\ v_x \\ v_y \\ v_z \end{bmatrix}
# \qquad\qquad
# i = \begin{bmatrix} w_x \\ w_y \\ w_z \\ a_z \end{bmatrix}
# \qquad\qquad
# p = \begin{bmatrix} g \\ k_\text{flow} \end{bmatrix}.
# $$
h
# # 3. Derive state-space model
# ## 3.1 Choose equilibrium point
#
# An equilibrium point of the nonlinear system is a choice of states $s_\text{eq}$ and inputs $i_\text{eq}$ - along with constant parameters $p_\text{eq}$ - for which
#
# $$0 = f(s_\text{eq}, i_\text{eq}, p_\text{eq}).$$
# Create a symbolic variable to describe the equilibrium value of $o_z$.
o_z_eq = sym.symbols('o_z_eq')
# Create a list of states, inputs, outputs, and parameters as symbolic variables.
s = [o_x, o_y, o_z, psi, theta, phi, v_x, v_y, v_z]
i = [w_x, w_y, w_z, a_z]
o = [n_x, n_y, r]
p = [g, k_flow]
# Create a list of state and input values at equilibrium in the **same order** as before.
s_eq = [0, 0, o_z_eq, 0, 0, 0, 0, 0, 0]
i_eq = [0, 0, 0, g]
# Make sure all equilibrium values are symbolic.
s_eq = [sym.nsimplify(a) for a in s_eq]
i_eq = [sym.nsimplify(a) for a in i_eq]
# Evaluate the equations of motion at the equilibrium point - if it actually *is* an equilibrium point, then the result should be a matrix of zeros:
f.subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
# ## 3.2 Find $A$, $B$, $C$, and $D$
# Recall that:
#
# $$
# A = \frac{\partial f}{\partial s}\biggr\vert_{(s, i, p) = (s_\text{eq}, i_\text{eq}, p_\text{eq})}
# \qquad\quad
# B = \frac{\partial f}{\partial i}\biggr\vert_{(s, i, p) = (s_\text{eq}, i_\text{eq}, p_\text{eq})}
# \qquad\quad
# C = \frac{\partial h}{\partial s}\biggr\vert_{(s, i, p) = (s_\text{eq}, i_\text{eq}, p_\text{eq})}
# \qquad\quad
# D = \frac{\partial h}{\partial i}\biggr\vert_{(s, i, p) = (s_\text{eq}, i_\text{eq}, p_\text{eq})}.
# $$
#
# Compute each Jacobian and plug in the equilibrium values as follows.
A = f.jacobian(s).subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
B = f.jacobian(i).subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
C = h.jacobian(s).subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
D = h.jacobian(i).subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
# Show $A$:
A
# Show $B$:
B
# Show $C$:
C
# Show $D$ (note that it is *not* zero in this case):
D
# ## 3.3 Write linearized models
# Define the state, input, and output of the state-space system (i.e., the linearized model of the equations of motion and of the measurement equations).
x = sym.Matrix(s) - sym.Matrix(s_eq)
u = sym.Matrix(i) - sym.Matrix(i_eq)
y = sym.Matrix(o) - h.subs(tuple(zip(s, s_eq))).subs(tuple(zip(i, i_eq)))
# Show the linearized equations of motion $Ax+Bu$.
A * x + B * u
# Show the linearized measurement equations $Cx+Du$.
C * x + D * u
# Show the output (which our model tells us should be $Cx+Du$).
y
# # 4. Study error in linearized models
# #### Describe the flight test
# Replace this cell with the following information, at minimum:
#
# * A description of the flight trajectory, both in words and (if you like) a snippet of code from `flight.py`.
# * A description of the flight conditions (e.g., where was the flight conducted, did you power cycle the drone just before flying, were you using the positioning system or only the onboard sensors, etc.).
# #### Show a video of the flight test
# Replace this cell with a video of your flight. Here are two ways to do this. (What you should *not* do is drag-and-drop your video into this notebook.)
#
# ##### Markdown
#
# Put your video in the same directory as this notebook. Suppose this video is called `hardware_video.mov`. Then put the following code in a cell of type `Markdown` and evaluate it:
# ```
# 
# ```
#
# ##### HTML
# Put your video in the same directory as this notebook. Suppose this video is called `hardware_video.mov`. Then put the following code in a cell of type `Code` and evaluate it:
# ```
# # %%HTML
# <video width="480" controls>
# <source src="hardware_video.mov">
# </video>
# ```
# You can change the `width` parameter to resize your video.
# ## 4.1 Load and parse flight data
# Load flight data.
data = load_hardware_data(
'hardware_data.json', # <-- replace with name of file with hardware data
t_min_offset=0., # <-- (optional) replace with how many seconds of data to ignore at start
t_max_offset=0., # <-- (optional) replace with how many seconds of data to ignore at end
only_in_flight=True, # <-- (optional) only loads data for which o_z_des is positive
)
# Parse flight data.
# +
# time
t = data['time']
# states
o_x = data['ae483log.o_x']
o_y = data['ae483log.o_y']
o_z = data['ae483log.o_z']
psi = data['ae483log.psi']
theta = data['ae483log.theta']
phi = data['ae483log.phi']
v_x = data['ae483log.v_x']
v_y = data['ae483log.v_y']
v_z = data['ae483log.v_z']
# inputs
w_x = data['ae483log.w_x']
w_y = data['ae483log.w_y']
w_z = data['ae483log.w_z']
a_z = data['ae483log.a_z']
# outputs
n_x = data['ae483log.n_x']
n_y = data['ae483log.n_y']
r = data['ae483log.r']
# -
# Find time step (should be 0.01).
dt = t[1] - t[0]
print(f'dt = {dt:.4f}')
# Define numerical values of each parameter.
# +
# Acceleration of gravity
g = 9.81
# Optical flow constant (do not modify)
k_flow = 0.01 * 30.0 / np.deg2rad(4.2)
# Equilibrium value of o_z
o_z_eq = 0.0 # <-- FIXME
# -
# ## 4.2 Error in linearized equations of motion
# ### 4.2.1 Error in linear model of $\dot{o}_x$
# Approximate $\dot{o}_x$ by finite difference and call this "ground truth."
o_x_dot_true = (o_x[1:] - o_x[:-1]) / dt
# Remember that, because of the way it is computed, the length of the finite difference approximation `o_x_dot_true` is one less than the length of `o_x` (and of `t`):
print(f'len(o_x_dot_true) = {len(o_x_dot_true)}')
print(f' len(o_x) = {len(o_x)}')
print(f' len(t) = {len(t)}')
# Predict $\dot{o}_x$ with linearized equations of motion.
#
# In particular, note that the first element of $\dot{x}$ is $\dot{o}_x$, and that the first element of $Ax+Bu$ is $v_x$. So, our state-space model tells us that $\dot{o}_x \approx v_x$.
o_x_dot_predicted = v_x
# Compare the true value and the predicted values of $\dot{o}_x$ in a plot.
plt.figure(figsize=(10, 5))
plt.plot(t[:-1], o_x_dot_true, label='$\dot{o}_x$ (true)', linewidth=1)
plt.plot(t, o_x_dot_predicted, '--', label='$\dot{o}_x$ (predicted)', linewidth=2)
plt.legend(fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('time (s)', fontsize=14)
plt.show()
# Compute the error in the linear model, i.e., the difference between the predicted and true values of $\dot{o}_x$.
o_x_dot_err = o_x_dot_predicted[:-1] - o_x_dot_true
# Plot a histogram of the error, showing mean and standard deviation.
plt.figure(figsize=(5, 5))
plt.hist(o_x_dot_err, 50)
plt.xlabel('error', fontsize=14)
plt.ylabel('count', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.title(
'Error in prediction of $\dot{o}_x$\n' +
f'(mean = {np.mean(o_x_dot_err):6.3f}, std = {np.std(o_x_dot_err):6.3f})',
fontsize=14,
)
plt.show()
# ### 4.2.2 Error in linear model of $\dot{o}_y$
# ### 4.2.3 Error in linear model of $\dot{o}_z$
# ### 4.2.4 Error in linear model of $\dot{\psi}$
# ### 4.2.5 Error in linear model of $\dot{\theta}$
# ### 4.2.6 Error in linear model of $\dot{\phi}$
# ### 4.2.7 Error in linear model of $\dot{v}_x$
# ### 4.2.8 Error in linear model of $\dot{v}_y$
# ### 4.2.9 Error in linear model of $\dot{v}_z$
# ## 4.3 Error in linearized measurement equations
# ### 4.3.1 Error in linear model of $n_x$
# Predict $n_x$ with the linearized measurement equations.
#
# In particular, note that the first element of $y$ is $n_x$, and that the first element of $Cx+Du$ is
#
# $$k_\text{flow} \left( \dfrac{v_x}{o_\text{z, eq}} - w_y \right),$$
#
# so our linear model tells us that
#
# $$n_x \approx k_\text{flow} \left( \dfrac{v_x}{o_\text{z, eq}} - w_y \right).$$
n_x_predicted = k_flow * ((v_x / o_z_eq) - w_y)
# Compare the true value and the predicted values of $n_x$ in a plot.
plt.figure(figsize=(10, 5))
plt.plot(t, n_x, label='$n_x$ (true)', linewidth=1)
plt.plot(t, n_x_predicted, '--', label='$n_x$ (predicted)', linewidth=2)
plt.legend(fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('time (s)', fontsize=14)
plt.show()
# Compute the error in the linear model, i.e., the difference between the predicted and true values of $n_x$.
n_x_err = n_x_predicted - n_x
# Plot a histogram of the error, showing mean and standard deviation.
plt.figure(figsize=(5, 5))
plt.hist(n_x_err, 50)
plt.xlabel('error', fontsize=14)
plt.ylabel('count', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.title(
'Error in prediction of $n_x$\n' +
f'(mean = {np.mean(n_x_err):6.3f}, std = {np.std(n_x_err):6.3f})',
fontsize=14,
)
plt.show()
# ### 4.3.2 Error in linear model of $n_y$
# ### 4.3.3 Error in linear model of $r$
# ## 4.4 Summary
# The following table reports the mean and standard deviation of error in the linearized equations of motion:
#
# | | $\dot{o}_x$ | $\dot{o}_y$ | $\dot{o}_z$ | $\dot{\psi}$ | $\dot{\theta}$ | $\dot{\phi}$ | $\dot{v}_x$ | $\dot{v}_y$ | $\dot{v}_z$ |
# | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: |
# | mean | 0.123 | 0.456 | 0.789 | 0.789 | 0.456 | 0.123 | 0.123 | 0.123 | 0.123 |
# | std | 0.123 | 0.456 | 0.789 | 0.789 | 0.456 | 0.123 | 0.123 | 0.123 | 0.123 |
# The following table reports the mean and standard deviation of error in the linearized measurement equations:
#
# | | $n_x$ | $n_y$ | $r$ |
# | :--: | :--: | :--: | :--: |
# | mean | 0.123 | 0.456 | 0.789 |
# | std | 0.123 | 0.456 | 0.789 |
# **Modify the text in this cell** to answer the following questions:
#
# * Which equations of motion do you trust most (or least) and why?
# * Which measurement equations do you trust most (or least) and why?
| lab07/analysis-template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seawater as sw
import cartopy.crs as ccrs # import projections
import cartopy.feature as cf # import features
fig_dir='C:/Users/gentemann/Google Drive/f_drive/docs/projects/misst-arctic/Saildrone/'
data_dir = 'F:/data/cruise_data/saildrone/2019_arctic/post_mission/'
adir_sbe='F:/data/cruise_data/saildrone/2019_arctic/sbe56/sd-'
data_dir_sbe_combined = 'F:/data/cruise_data/saildrone/2019_arctic/post_mission_combined/'
# -
# # Metadata
# - SBE56 info, set some attributes and depths for metadata
# - put filenames and depts into a xarray dataset
#
# +
vattrs = {'long_name': 'sea surface depth','coordinates': 'time',
'standard_name': 'sea_water_temperature', 'installed_height':'-0.295 m' ,
'serial_number' : 'SBE05608196', 'model_number': '56',
'sensor_description': 'Seabird 56 Temperature Logger',
'model_name': 'SBE 56', 'product_page':'http://www.seabird.com/sbe56-temperature-logger',
'nominal_sampling_schedule': '2 sec',
'units':'degrees_C','valid_min':-10.0,'valid_max':50.0}
astr_sbe36=['SBE05609153_2019-11-25.csv','SBE05609154_2019-11-25.csv',
'SBE05609158_2019-11-25.csv','SBE05609150_2019-11-25.csv',
'SBE05609151_2019-11-25.csv','SBE05609152_2019-11-25.csv',
'SBE05609157_2019-11-25.csv']
depth36=[330,473,823,1048,1209,1436,1700]
astr_sbe37=['SBE05608196_2019-11-25.csv','SBE05604705_2019-11-25.csv',
'SBE05605200_2019-11-25.csv','SBE05608476_2019-11-25.csv',
'SBE05609159_2019-11-25.csv','SBE05609155_2019-11-26.csv',
'SBE05609156_2019-11-26.csv']
depth37=[324,474,805,1035,1196,1413,1725]
adepth=[depth36,depth37]
astr = [astr_sbe36,astr_sbe37]
ds_info = xr.Dataset(data_vars={'fname':(['trajectory','z'],astr),
'depth':(['trajectory','z'],adepth)},
coords={'z':np.arange(7),'trajectory':[1036,1037]})
# -
# # Combine the .cvs files into a single netcdf file with metadata
for itt in range(0,2):
for iz in range(7):
if (itt==0) and (iz==3): #the third sbe56 fell off & file doesn't exist
continue
fstr = str(ds_info.fname[itt,iz].data)
avehicle=str(ds_info.trajectory[itt].data)
filename=adir_sbe+avehicle+'/'+fstr
print(filename)
df = pd.read_csv(filename,header=11) #, sheet_name='data')
df['time']=pd.to_datetime(df['Date'] + ' ' + df['Time'])
del df['Date']
del df['Time']
df.index=df['time']
xf=df.to_xarray()
vname='sea_water_temperature_'+str(iz).zfill(2)
xf=xf.rename({'Temperature':vname})
vattrs['installed_height']=str(-1*ds_info.depth[itt,iz].data/1000.)+' m'
vattrs['serial_number']=fstr[:11]
xf.attrs=vattrs
if iz==0:
ds_sbe=xr.Dataset({'sea_water_temperature_00':xf[vname]})
ds_sbe[vname].attrs=vattrs
else:
ds_sbe=ds_sbe.assign(vname=xf[vname])
ds_sbe=ds_sbe.rename({'vname':vname})
ds_sbe[vname].attrs=vattrs
if itt==0:
ds_sbe['time']=ds_sbe.time+np.timedelta64(8,'h')
fout=adir_sbe+'combined'+avehicle+'.nc'
ds_sbe.to_netcdf(fout)
# # Create files with 1 minute averages of data using :54 to :06 seconds
# +
#make 1min average data
for itt in range(2):
avehicle=str(ds_info.trajectory[itt].data)
filename=adir_sbe+'combined'+avehicle+'.nc'
filename_out=adir_sbe+'combined_1minave'+avehicle+'.nc'
xf=xr.open_dataset(filename)
for iz in range(7):
if (itt==0) and (iz==3):
continue
print(itt,iz)
vname='sea_water_temperature_'+str(iz).zfill(2)
vnamemn='sea_water_temperature_'+str(iz).zfill(2)+'_mean'
vnamestd='sea_water_temperature_'+str(iz).zfill(2)+'_std'
offset = pd.to_timedelta(6, unit='s')
xftem=xf[vname]
pt = pd.to_datetime(xftem.time.data)+offset #add 6 seconds
psec=pt.second
tem=xftem[(psec <=12)] #data average should be only use sec 54 to sec 06 of each minute
tem['time']=tem['time']+offset #resample goes from :00 to :59 so shift +6sec
xf_avg1 = tem.resample(time='1min').mean(keep_attrs=True)
xf_std1 = tem.resample(time='1min').std(ddof=1,keep_attrs=True)
sattrs=xf[vname].attrs
sattrs['nominal_sampling_schedule']='1 minute averages from :54 to :06 seconds'
sattrs['long_name']='sea surface depth 1-min mean'
xf_avg1.attrs=sattrs
sattrs['long_name']='sea surface depth 1-min standard dev'
if iz==0:
ds_sbe=xr.Dataset({'vname':xf_avg1})
ds_sbe=ds_sbe.rename({'vname':vnamemn})
ds_sbe=ds_sbe.assign(vname=xf_std1)
ds_sbe=ds_sbe.rename({'vname':vnamestd})
else:
ds_sbe=ds_sbe.assign(vname=xf_avg1)
ds_sbe=ds_sbe.rename({'vname':vnamemn})
ds_sbe=ds_sbe.assign(vname=xf_std1)
ds_sbe=ds_sbe.rename({'vname':vnamestd})
#ds_sbe = ds_sbe.drop(vname)
ds_sbe2=ds_sbe.where(np.isfinite(ds_sbe.sea_water_temperature_00_mean),drop=True)
fout=adir_sbe+'combined'+avehicle+'.nc'
ds_sbe2.to_netcdf(filename_out)
# -
for iusv in range(2):
if iusv==0:
fname='saildrone-gen_5-arctic_misst_2019-sd1036-20190514T230000-20191011T183000-1_minutes-v1.1575336154680.nc'
fname2='sd-combined_1minave1036.nc'
fname_out='sd-combined_1minave1036_withattrs.nc'
if iusv==1:
fname='saildrone-gen_5-arctic_misst_2019-sd1037-20190514T230000-20191011T183000-1_minutes-v1.1575487464625.nc'
fname2='sd-combined_1minave1037.nc'
fname_out='sd-combined_1minave1037_withattrs.nc'
ds = xr.open_dataset(data_dir+fname)
ds.close()
ds_sbe=xr.open_dataset(adir_sbe[:-3]+fname2)
ds_sbe.close()
gattrs=ds.attrs
ds_sbe.attrs=gattrs
ds_sbe.sel(time=slice('2019-05-14T23:00','2019-10-11T18:30'))
ds_sbe.to_netcdf(adir_sbe[:-3]+fname_out)
ds_sbe
# # combine the 1-min ave SBE data with the saildrone file of all data
for iusv in range(2):
if iusv==0:
fname='saildrone-gen_5-arctic_misst_2019-sd1036-20190514T230000-20191011T183000-1_minutes-v1.1575336154680.nc'
fname2='sd-combined_1minave1036.nc'
if iusv==1:
fname='saildrone-gen_5-arctic_misst_2019-sd1037-20190514T230000-20191011T183000-1_minutes-v1.1575487464625.nc'
fname2='sd-combined_1minave1037.nc'
ds = xr.open_dataset(data_dir+fname)
ds.close()
ds = ds.rename({'latitude':'lat','longitude':'lon'})
ds['time']=ds.time[0,:]
ds = ds.swap_dims({'obs':'time'})
#calculate some variables
ds['wspd_MEAN']=np.sqrt(ds.UWND_MEAN**2+ds.VWND_MEAN**2)
ds['wdir_MEAN']=np.arctan2(ds.VWND_MEAN,ds.UWND_MEAN)*180./np.pi
ds_sbe=xr.open_dataset(adir_sbe[:-3]+fname2)
ds_sbe.close()
ds_tem=ds_sbe.interp(time=ds.time,method='nearest')
ds = ds.isel(trajectory=0)
for var in ds_tem:
ds[var]=ds_sbe[var]
fname_out=fname[:-3]+'-withSBE.nc'
ds.to_netcdf(data_dir_sbe_combined+fname_out)
ds = xr.open_mfdataset(data_dir_sbe_combined+'*.nc',combine='nested',concat_dim='trajectory').load()
ds
| temp_loggers/.ipynb_checkpoints/Arctic_temp_loggers_single_file_and_collocate-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # M2. Data preparation
import numpy as np
import pickle
from c15ym.items_data import ItemsData
from c15ym.scores_data import ScoresData
from sklearn.metrics.pairwise import cosine_similarity
from timer.timer import Timer
timer = Timer()
processed_data_dir_path = './../processed_data/'
items_data = pickle.load(open(processed_data_dir_path + 'c15ym_dataset_1_items_data.pickle', 'rb'))
scores_data = pickle.load(open(processed_data_dir_path + 'c15ym_dataset_1_train_scores_data.pickle', 'rb'))
users_area = np.arange(40000)
users_area_size = users_area.shape[0]
# for quick access to users' ratings in ratings matrices
users_2_area_indices = users_area # it is useless with users_area == [0..19999] but if you choose different users area, you will probably need in users_2_area_indices
all_artists = np.copy(items_data._artists)
all_artists_size = all_artists.shape[0]
all_artists_n_tracks = np.array(list(map(lambda artist: items_data.get_artist_tracks(artist).shape[0], all_artists)), dtype=np.int32)
all_artists_n_albums = np.array(list(map(lambda artist: items_data.get_artist_albums(artist).shape[0], all_artists)), dtype=np.int32)
# we are interested in artists with at least 1 related track or album
all_artists_flags = (all_artists_n_tracks + all_artists_n_albums) > 0
artists = all_artists[all_artists_flags]
artists_size = artists.shape[0]
artists_n_items = all_artists_n_tracks[all_artists_flags] + all_artists_n_albums[all_artists_flags]
# for quick access to artists' ratings in ratings matrices
artists_2_indices = np.full(shape=np.max(all_artists) + 1, fill_value=np.int32(-1), dtype=np.int32)
for i in np.arange(artists_size):
artists_2_indices[artists[i]] = i
artists_ratings = np.full(shape=(artists_size, users_area_size), fill_value=np.float32(-1), dtype=np.float32)
artists_items_acc = np.zeros(shape=(artists_size, users_area_size), dtype=np.float32)
artists_items_c = np.zeros(shape=(artists_size, users_area_size), dtype=np.int32)
for users_area_i in np.arange(users_area_size):
items, scores = scores_data.get_user_data(user=users_area[users_area_i])
tracks_indices = np.flatnonzero(items_data.is_track(items))
for track, score in zip(items[tracks_indices], scores[tracks_indices]):
track_artist = items_data.get_track_artist(track)
if track_artist != np.int32(-1):
artist_i = artists_2_indices[track_artist]
if artist_i != np.int32(-1):
artists_items_acc[artist_i, users_area_i] += score
artists_items_c[artist_i, users_area_i] += 1
albums_indices = np.flatnonzero(items_data.is_album(items))
for album, score in zip(items[albums_indices], scores[albums_indices]):
album_artist = items_data.get_album_artist(album)
if album_artist != np.int32(-1):
artist_i = artists_2_indices[album_artist]
if artist_i != np.int32(-1):
artists_items_acc[artist_i, users_area_i] += score
artists_items_c[artist_i, users_area_i] += 1
artists_indices = np.flatnonzero(items_data.is_artist(items))
for artist, score in zip(items[artists_indices], scores[artists_indices]):
artist_i = artists_2_indices[artist]
if artist_i != np.int32(-1):
artists_ratings[artist_i, users_area_i] = score
def get_artists_matrix(artists_ratings, artists_items_acc, artists_items_c):
def transfom_ratings(ratings):
threshold_rating = np.float32(0.5)
return np.array([-1, 1], dtype=np.float32)[np.int32(ratings >= threshold_rating)]
are_artists_rated = artists_ratings != np.float32(-1)
are_artists_rated_through_items = np.logical_and(np.logical_not(are_artists_rated), artists_items_c >= np.int32(1))
artists_matrix = np.zeros(shape=artists_ratings.shape, dtype=np.float32)
artists_matrix[are_artists_rated] = transfom_ratings(artists_ratings[are_artists_rated])
artists_matrix[are_artists_rated_through_items] = transfom_ratings(artists_items_acc[are_artists_rated_through_items] / artists_items_c[are_artists_rated_through_items])
return artists_matrix
train_artists_matrix = get_artists_matrix(artists_ratings[:, :20000], artists_items_acc[:, :20000], artists_items_c[:, :20000])
artists_sim = cosine_similarity(train_artists_matrix)
# +
results_acc = np.zeros(shape=(2, 3), dtype=np.int32)
timer.start()
for users_area_i in np.arange(20000, 30000):
#
artists_indices = np.flatnonzero(artists_ratings[:, users_area_i] != np.float32(-1))
artists_indices = artists_indices[np.random.permutation(artists_indices.shape[0])]
train_size = np.int32(np.round(np.float32(0.5) * artists_indices.shape[0]))
#
train_artists_indices = artists_indices[:train_size]
train_user_artists_ratings = artists_ratings[train_artists_indices, users_area_i]
train_user_artists_items_acc = artists_items_acc[train_artists_indices, users_area_i]
train_user_artists_items_c = artists_items_c[train_artists_indices, users_area_i]
#
test_artists_indices = artists_indices[train_size:]
test_user_artists_ratings = artists_ratings[test_artists_indices, users_area_i]
#
for test_artist_i, test_rating in zip(test_artists_indices, test_user_artists_ratings):
#
result_i = 0
if test_rating >= np.float32(0.5):
result_i = 1
#
pos = np.int32(0)
neg = np.int32(0)
#
for train_rating_i, train_rating in zip(train_artists_indices, train_user_artists_ratings):
if artists_sim[test_artist_i, train_rating_i] >= np.float32(0.2):
if train_rating >= np.float32(0.5):
pos += 1
else:
neg += 1
#
result_j = 2
if neg > 0 and neg > pos:
result_j = 0
if pos > 0 and pos >= neg:
result_j = 1
#
results_acc[result_i, result_j] += 1
print(timer.stop())
# -
print(results_acc)
print(np.round(np.float32(results_acc[0, :]) / np.sum(results_acc[0, :]), 4))
print(np.round(np.float32(results_acc[1, :]) / np.sum(results_acc[1, :]), 4))
# + active=""
#
# +
for users_area_i in np.arange(20000, 40000):
artists_indices = np.flatnonzero(artists_ratings[:, users_area_i] != np.float32(-1))
artists_indices = artists_indices[np.random.permutation(artists_indices.shape[0])]
train_size = np.int32(np.round(np.float32(0.75) * artists_indices.shape[0]))
# train
train_artists_indices = artists_indices[:train_size]
train_artists_ratings = artists_ratings[train_artists_indices, users_area_i]
train_artists_likes = train_artists_ratings >= np.float32(0.5)
train_artists_0_indices = train_artists_indices[np.logical_not(train_artists_likes)]
train_artists_1_indices = train_artists_indices[train_artists_likes]
# test
test_artists_indices = artists_indices[train_size:]
test_artists_ratings = artists_ratings[test_artists_indices, users_area_i]
test_artists_likes = test_artists_ratings >= np.float32(0.5)
test_artists_0_indices = test_artists_indices[np.logical_not(test_artists_likes)]
test_artists_1_indices = test_artists_indices[test_artists_likes]
# testing
| m2_data_preparation__.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import MetaTrader5 as mt5
# conectamos con MetaTrader 5
if not mt5.initialize():
print("initialize() failed")
mt5.shutdown()
# solicitamos el estado y los parámetros de conexión
print(mt5.terminal_info())
# obtenemos la información sobre la versión de MetaTrader 5
print(mt5.version())
# solicitamos 1000 ticks de EURAUD
euraud_ticks = mt5.copy_ticks_from("EURAUD", datetime(2021,6,18,13), 1000, mt5.COPY_TICKS_ALL)
# solicitamos los ticks de AUDUSD en el intervalo 2019.04.01 13:00 - 2019.04.02 13:00
audusd_ticks = mt5.copy_ticks_range("AUDUSD", datetime(2021,6,17,13), datetime(2021,6,18,13), mt5.COPY_TICKS_ALL)
# obtenemos con distintos métodos las barras de diferentes instrumentos
eurusd_rates = mt5.copy_rates_from("EURUSD", mt5.TIMEFRAME_M1, datetime(2021,6,18,13), 1000)
eurgbp_rates = mt5.copy_rates_from_pos("EURGBP", mt5.TIMEFRAME_M1, 0, 1000)
eurcad_rates = mt5.copy_rates_range("EURCAD", mt5.TIMEFRAME_M1, datetime(2021,6,17,13), datetime(2021,6,18,13))
# finalizamos la conexión con MetaTrader 5
mt5.shutdown()
#DATA
print('euraud_ticks(', len(euraud_ticks), ')')
for val in euraud_ticks[:10]: print(val)
print('audusd_ticks(', len(audusd_ticks), ')')
for val in audusd_ticks[:10]: print(val)
print('eurusd_rates(', len(eurusd_rates), ')')
for val in eurusd_rates[:10]: print(val)
print('eurgbp_rates(', len(eurgbp_rates), ')')
for val in eurgbp_rates[:10]: print(val)
print('eurcad_rates(', len(eurcad_rates), ')')
for val in eurcad_rates[:10]: print(val)
#PLOT
# creamos un DataFrame de los datos obtenidos
ticks_frame = pd.DataFrame(euraud_ticks)
# dibujamos los ticks en el gráfico
plt.plot(ticks_frame['time'], ticks_frame['ask'], 'r-', label='ask')
plt.plot(ticks_frame['time'], ticks_frame['bid'], 'b-', label='bid')
# mostramos los rótulos
plt.legend(loc='upper left')
# añadimos los encabezados
plt.title('EURAUD ticks')
# mostramos el gráfico
plt.show()
# -
ticks_frame
# +
import mplfinance as mpf
import pandas as pd
import yahoo_fin.stock_info as yf
from datetime import date
from datetime import timedelta
ticker = "^IXIC"
start_date = date.today() - timedelta(days=500)
end_date = date.today() + timedelta(days=1)
start_date = start_date.strftime("%Y/%m/%d")
end_date= end_date.strftime("%Y/%m/%d")
# Default Interval is 1 day
data = yf.get_data(ticker, start_date = start_date, end_date= end_date,
index_as_date= False)
data_candle = yf.get_data(ticker, start_date = start_date, end_date= end_date,
index_as_date= True)
del data_candle['adjclose']
del data_candle['ticker']
data_candle.rename(columns={'open':'Open',
'high':'High',
'low':'Low',
'close': 'Close',
'volume': 'Volume'},
inplace=True)
print(data_candle)
kwargs = dict(type = 'candle', volume = True, figratio=(30,8), figscale=0.85)
mpf.plot(data_candle, **kwargs, style = 'yahoo', title = '{}, {} - {}'.format(ticker, start_date, end_date))
# +
import pandas as pd
import numpy as np
import talib
import matplotlib.pyplot as plt
inputs = {
'open': pd.Series(data['open']),
'high': pd.Series(data['high']),
'low': pd.Series(data['low']),
'close': pd.Series(data['close']),
'adjclose': pd.Series(data['adjclose']),
'volume': pd.Series(data['volume'])
}
# -
# ## <NAME>
import pandas as pd
from pandas_datareader import data as pdr
import numpy as np
import datetime as date
import matplotlib.pyplot as plt
enddate = date.datetime(2021,6,18)
startdate = date.datetime(2020,6,18)
tick = 'AAPL'
data = pdr.get_data_yahoo(tick, start = startdate, end = enddate)
data.head()
data.Close.plot()
def BBANDS(df,n):
MA= pd.Series(pd.Series.rolling(df['Close'], n).mean())
MSD=pd.Series(pd.Series.rolling(df['Close'], n).std())
b1=MA + (MSD*2)
B1= pd.Series(b1, name = 'BollingerSuperior')
df= df.join(B1)
b2= MA - (MSD*2)
B2= pd.Series(b2, name = 'BollingerInferior')
df = df.join(B2)
b3= MA
B3= pd.Series(b3, name = 'BollingerM_')
df = df.join(B3)
return df
df = BBANDS(data,20)
df2 = df[['Close','BollingerSuperior','BollingerInferior','BollingerM_']]
df2.plot(figsize = (16,8),grid=True)
# +
a = df['BollingerSuperior']
b = df['BollingerInferior']
data.loc[(data['Close'] < a) & (data['Close'] > b), 'Comportamiento'] = 'Entre bandas'
data.loc[data['Close'] <b, 'Comportamiento'] = 'Cruce de banda inferior'
data.loc[data['Close'] >a, 'Comportamiento'] = 'Cruce de banda superior'
data.loc[(data['Close'] < a) & (data['Close'] > b), 'Recomendacion'] = '-'
data.loc[data['Close'] <b, 'Recomendacion'] = 'Se recomienda comprar la accion.'
data.loc[data['Close'] >a, 'Recomendacion'] = 'Se recomienda vender la accion'
data["Recomendacion"] = data["Recomendacion"].fillna("-")
data["Comportamiento"] = data["Comportamiento"].fillna("Sin informacion")
# -
pd.set_option('display.max_rows', None)
data[['Close','Comportamiento', 'Recomendacion']]
tabla=pd.DataFrame(a)
tabla2=pd.DataFrame(b)
tabla= tabla.join(tabla2).join(data['Close'])
tabla
# ## Oscilador Estocástico
def STO(df, nK, nD, nS=1):
SOk = pd.Series((df['Close'] - df['Low'].rolling(nK).min())/(df['High'].rolling(nK).max() - df['Low'].rolling(nK).min()), name = 'SOk'+str(nK) )
SOd = pd.Series(SOk.ewm(ignore_na = False, span = nD, min_periods = nD -1, adjust = True).mean(), name = 'SOd'+str(nD))
SOk = SOk.ewm(ignore_na = False, span = nS, min_periods = nS - 1 , adjust = True).mean()
SOd = SOd.ewm(ignore_na = False, span = nS, min_periods = nS - 1 , adjust = True).mean()
df = df.join(SOk)
df = df.join(SOd)
df['Recomendacion'] = 0
df.loc[(((SOk < SOd) & (SOk.shift(1) > SOd.shift(1))) & (SOd > 0.8)),'Recomendacion'] = 'Vender'
df.loc[(((SOk > SOd) & (SOk.shift(1) < SOd.shift(1))) & (SOd < 0.2)),'Recomendacion'] = 'Comprar'
df.loc[ df['Recomendacion'] == 0, 'Recomendacion'] = '-'
return df
df2 = STO(data, 14, 3)
fig = plt.figure(figsize = (16,8))
ax1 = plt.subplot(211)
plt.plot(df2.Close)
plt.grid(True)
plt.title('Precio')
ax2 = plt.subplot(212, sharex=ax1)
plt.plot(df2.SOk14, color = 'r')
plt.plot(df2.SOd3, color = 'g')
plt.axhline(y=0.8, color = 'k', linestyle = '--')
plt.axhline(y=0.2, color = 'k', linestyle = '--')
plt.title('Oscilador Estocástico')
plt.grid(True)
plt.show()
# +
df2[['Close', 'Recomendacion']]
# -
# ## ROC
# +
def ROC(df, n):
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M/N, name = 'ROC_' + str(n))
df = df.join(ROC)
Cr = pd.Series(df['Close'].shift(1), name = 'Cr')
df = df.join(Cr)
ROCr = pd.Series(ROC.shift(1), name = 'ROC_' + str(n) + '_r')
df = df.join(ROCr)
df['Recomendación'] = '-'
df.loc[((ROC > 0) & (ROCr < 0)), 'Recomendación'] = 'Se recomienda COMPRAR la acción.'
df.loc[((ROC < 0) & (ROCr > 0)), 'Recomendación'] = 'Se recomienda VENDER la acción.'
df.loc[((df['Close'] < Cr) & ((ROC > 0) & (ROCr < 0) & (ROC > ROCr))), 'Recomendación'] = 'Se recomienda COMPRAR la acción. (DIV)'
df.loc[((df['Close'] > Cr) & ((ROC < 0) & (ROCr > 0) & (ROC < ROCr))), 'Recomendación'] = 'Se recomienda VENDER la acción. (DIV)'
return df
df2 = ROC(data, 10)
# -
fig = plt.figure(figsize = (16,8))
plt.subplot(2, 1, 1)
plt.plot(df2.Close)
plt.title('Precio del Indice')
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(df2.ROC_10)
plt.axhline(y=0, color= 'r')
plt.grid()
plt.show()
df2.loc[df2['Recomendación'] != '-',['Close', 'ROC_10', 'Recomendación']]
| MT5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
count = ['138','170','155','243','232']
platform =['Tribune de Genève','Le Temps','Machin Truc','Asdf','Gnagnagna']
def autolabel(rects):
# attach some text labels
for ii,rect in enumerate(rects):
width = int(rect.get_width())
height = rect.get_height()
print(height,width)
yloc1=rect.get_y() + height /2.0
yloc2=rect.get_y() + height /2.0
if (width <= 5):
# Shift the text to the right side of the right edge
xloc1 = width + 1
yloc2=yloc2+0.3
# Black against white background
clr = 'black'
align = 'left'
else:
# Shift the text to the left side of the right edge
xloc1 = 0.98*width
# White on blue
clr = 'white'
align = 'right'
yloc1=rect.get_y() + height /2.0
print(xloc1,yloc1,yloc2)
ax.text(xloc1,yloc1, '%s'% (count[ii]),horizontalalignment=align,
verticalalignment='center',color=clr,weight='bold',
clip_on=True)
ax.text(5,yloc2, '%s'% (platform[ii]),horizontalalignment='left',
verticalalignment='center',color=clr,weight='bold',
clip_on=True)
val = [138,170,155,243,232]
print(val)# the bar lengths or count in your case
pos = [ 'sadf' , 1997, 1998, 1999, 2000] # the bar centers on the y axis
print(pos)
fig = plt.figure()
ax = fig.add_subplot(111)
rects = ax.barh(pos,val, align='center',height=0.4)
print(rects)
autolabel(rects)
ax.set_ylabel('Ici pas ')
ax.set_xlabel('Count')
ax.set_title('horizontal bar chart')
ax.grid(False)
plt.savefig("horizontal.pdf")
plt.show()
| utils/Seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: ''
# name: pysparkkernel
# ---
# # Sparkify Project
# Install pandas on EMR Notebook
sc.install_pypi_package("pandas==1.0.3")
sc.install_pypi_package("matplotlib==3.1.3")
sc.install_pypi_package("seaborn==0.10.1")
# +
# import libraries
from pyspark.sql import SparkSession, Window
import pyspark.sql.functions as F
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType, FloatType
from pyspark.ml.stat import Correlation
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml.evaluation import BinaryClassificationEvaluator, Evaluator
from pyspark.ml.classification import RandomForestClassifier, GBTClassifier
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
import time
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# -
# create a Spark session
spark = SparkSession.builder\
.master("local")\
.appName('Sparkify')\
.getOrCreate()
spark
# Check <a href="https://livy.apache.org/">Apache Livy</a> magic commands in Jupyter:
# %help
# Useful: `%%spark -o df` will let the Spark Dataframe available locally as a Pandas dataframe
# # Load and Clean Dataset
# Using tiny subset (128MB) of the full dataset available (12GB) for local development.
#
# full dataset: `s3n://udacity-dsnd/sparkify/full_sparkify_event_data.json`
#
# Full dataset: `s3n://udacity-dsnd/sparkify/sparkify_event_data.json`
#
event_data = "s3n://udacity-dsnd/sparkify/sparkify_event_data.json"
df = spark.read.json(event_data)
# Persist data on cluster
df.persist()
# Check schema and column types
df.printSchema()
df.head(5)
# + magic_args="-o df_head" language="spark"
# df_head = df.limit(5)
# -
# %local
df_head
print('Number of rows full dataset', df.count())
print('Number of columns full dataset', len(df.columns))
print('Number of duplicated rows in full dataset', df.count() - df.dropDuplicates().count())
# #### Let's check our data for numerical columns:
numCols = [col[0] for col in df.dtypes if not col[1]=='string']; numCols
df.select(numCols).describe().show()
# For some informations above (`registration`, `sessionId`, `status`, `ts`) it doesn't make sense to analyze as numbers. We will dive deeper in the next Exploratory Data Analysis session of this notebook.
# #### Let's check our data for categorical columns:
text_cols = [col[0] for col in df.dtypes if col[1]=='string']; text_cols
# Although not expected by inspecting the data, `userId` is actually a string.
#
# We check other information about our text data (not interested in `firstName` and `lastName`):
# +
dist_artists = df.select(F.countDistinct('artist').alias('numberOfDistinctArtists')).withColumn("id", F.monotonically_increasing_id())
dist_songs = df.select(['song','artist']).groupBy('song').agg(F.countDistinct('artist').alias('countDistinctArtists')).\
select(F.sum('countDistinctArtists').alias('numberOfDistinctSongs')).withColumn("id", F.monotonically_increasing_id())
dist_user_agents = df.select(F.countDistinct('userAgent').alias('numberOfuserAgents')).withColumn("id", F.monotonically_increasing_id())
dist_locations = df.select(F.countDistinct('location').alias('numberOfDistinctLocations')).withColumn("id", F.monotonically_increasing_id())
text_cols_info = dist_artists.join(dist_songs, "id", "outer")\
.join(dist_user_agents, "id", "outer")\
.join(dist_locations, "id", "outer").drop('id')\
text_cols_info.show()
# -
# Sparkify's full dataset contains has 38337 different artists, 311148 songs, users in 886 different locations that use 85 different types of devices/software to access the app.
# +
# We calculate value counts for:
text_cols_value_counts = ['auth',
'gender',
'level',
'method',
'page',
'status']
for column in text_cols_value_counts:
df.select(column).groupBy(column).count().orderBy('count').show(30, truncate=False)
# -
# #### Loading, cleaning the dataset and checking for invalid or missing data - for example, records without userids or sessionids.
#Checking if there are NaNs
df.select([F.count(F.when(F.isnan(c), c)).alias(c+'IsNan') for c in df.columns]).toPandas()
# No Nans in the full dataset.
#Checking if there are null values
df.select([F.count(F.when(col(c).isNull(), c)).alias(c+'IsNull') for c in df.columns]).toPandas()
# Appearently missing data is correlated (missing counts of columns consistently appear having 8346 or 58392 Null values).
# Let's check how missing values are correlated:
# +
# Check null values: 1 is null and 0 not null
df_is_null = df.select([F.when(col(c).isNull(), 1).otherwise(0).alias(c) for c in df.columns])
df_is_null_describe = df_is_null.describe()
df_is_null_describe = df_is_null_describe.filter(
(df_is_null_describe['summary']=='stddev') |
(df_is_null_describe['summary'] == 'max')
)
# Handle the std equals to zero (all values are the same) and without any null value
zero_std_max_cols = [col for col in df_is_null_describe.columns if df_is_null_describe.select(F.collect_list(col)).head().asDict()['collect_list('+col+')'] == ['0.0', '0']]
# Drop all columns with Standard Deviation equals zero and no missing values
df_is_null = df_is_null.drop(*zero_std_max_cols)
# Create vectors
assembler = VectorAssembler(inputCols=df_is_null.columns, outputCol='vector')
assembled = assembler.transform(df_is_null).drop(*df_is_null.columns)
# Calculate and print Pearson correlation matrix for missing values
pearson_corr = Correlation.corr(assembled, 'vector').head()
pearson_corr = pd.DataFrame(data=pearson_corr[0].toArray(), columns=df_is_null.columns, index=df_is_null.columns)
# -
fig, ax = plt.subplots(figsize=(8, 10))
sns.heatmap(pearson_corr, ax=ax, annot=True);
# %matplot plt
# When there's a null in `artist` column also a null in `length` and `song` happen. Hence, this data may be related and length appears to be the length in seconds of songs.
#
# Similarly, data related to users are related and when a null happens in either `firstName`, `lastName`, `gender`, `location`, `userAgent` and `registration` the others are null too. The column registration seems to be related to the timestamp of when a user registers himself/herself in the application.
# Let's check `userId`:
# userId is string and shold be always with length greater than 0
df.select('userId', F.length(col('userId')).alias('userIdLength')).distinct().orderBy(col('userIdLength')).show(5)
# number of users with userId equals to ''
df.filter(df.userId=='').count()
# In the full dataset there are no users with userId equals to `''` (length of the string userId is zero). Perhaps those users without userId are those who have not signed up yet or that are signed out and are about to log in and somehow were only in the mini-dataset.
#
# We'll drop them from our dataframe (in order for the analysis of individual users make sense):
# Drop userId equals to ''
df = df.filter(df.userId!='')
# Full dataset has 26,259,199 rows. It should stay the same
df.count()
print(f"There are {df.select('userId').distinct().count()} users in the full dataset.")
# # Exploratory Data Analysis
#
# ### Define Churn
#
# We create a column `Churn` to use as the label for our model. We choose the `Cancellation Confirmation` events to define the churn, which happen for both paid and free users. We also analyze the `Downgrade` events.
# Create Churn column
churner_ids = df.select('userId').where(col('page')=='Cancellation Confirmation').toPandas()
churner_ids = churner_ids.values.squeeze().tolist()
print('churner_ids:', churner_ids[:20])
print('\nTotal churners in full dataset:', len(churner_ids))
all_ids = df.select('userId').distinct().toPandas()
all_ids = all_ids.values.squeeze().tolist()
print('Total distinct users in full dataset:', len(all_ids))
not_churner_ids = np.setdiff1d(all_ids, churner_ids).tolist()
print('not_churner_ids:', not_churner_ids[:20], '...')
print('\nTotal churners in full dataset:', len(not_churner_ids))
# +
# %spark -o df_head
is_churn = udf(lambda usrIdCol: 1 if usrIdCol in churner_ids else 0, IntegerType())
df = df.withColumn('churn', is_churn('userId'))
df_head = df.limit(5)
# -
# %local
df_head
# ### Explore Data
# Analyzing the behavior for users who stayed vs users who churned.
# We explore aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played.
# Without knowing much about the business nor the actual application, we can think the following possible reasons why users cancel the service:
# 1. user is not using Sparkify
# 2. Sparkify doesn't have specific songs/artists
# 3. bad recommendations
# 4. bad experience (streaming getting stuck, interface not intuitive in version of app, too many ads in the app, etc.)
# 5. bugs (commands not responding or don't do what they should, crashes in the app, etc.)
# 6. Sparkify too expensive
# 7. don't have friends using Sparkify
# 8. external influences (some news about company benefits/harms its image, country where users live imposes some limits, cost increase, etc.)
#
# From the hypothesis above:
# - `1.` we can check by counting number of interations in a given timeframe
# - `2.` we cannot check directly since we don't have logs for search queries which would be better (showing that the search query returned exactly what the user wanted)
# - `3.` we have the Add to Playlist, Thumbs Down and Thumbs Up `pages` which could indicate the quality of recommendations
# - `4.` and `5.` we don't have application logs which could indicate loading times, interrupting streaming. However, <a href="https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.3">status</a> could give us some information about the application and the Error and Help `pages`. In addition, <a href="https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent">userAgent</a> can also give us some information about Sparkify applications that are not behaving as expected (Windows, MacOS, specific browser, platform, etc.). The Roll Advert `page` indicates Advertising events and if it is affecting too much user experience (if we could reduce it or enphasize that user can upgrade plan)
# - `6.` as "expensive" may be an ambiguous definition and depend on many factors, with the dataset given we won't be able to infer anything.
# - `7.` Add Friend `page` could indicate that friends are also using app (in the provided dataset we don't have relationships between users which would be better)
# - `8.` again we would need more data related to the business and context to infer anything here
#
# We check the data to answer those questions and compare customers who churn with those who don't:
not_churn_users = df.filter(col('churn')==0)
churn_users = df.filter(col('churn')==1)
num_churn_users = churn_users.select('userId').distinct().count()
num_not_churn_users = not_churn_users.select('userId').distinct().count()
# Sanity check (there should be 22278 users in total in the full dataset)
print('Number of users who churn:', num_churn_users)
print("Number of users who don't churns", num_not_churn_users)
print('Total (should be 26,259,199 users):', num_churn_users + num_not_churn_users)
# As we see, there is class imbalance in the fulldataset with ~22% of users who churn and ~78% of users who don't churn.
# #### Some analysis
# +
# How many songs do users listen to on average between visiting our home page (calculation shown in the Udacity course)
fun = udf(lambda ishome : int(ishome == 'Home'), IntegerType())
user_window = Window \
.partitionBy('userID') \
.orderBy(F.desc('ts')) \
.rangeBetween(Window.unboundedPreceding, 0)
cusum_churn = churn_users.filter((col('page') == 'NextSong') | (col('page') == 'Home')) \
.select('userID', 'page', 'ts') \
.withColumn('homevisit', fun(col('page'))) \
.withColumn('period', F.sum('homevisit').over(user_window))
cusum_churn = cusum_churn.filter((col('page') == 'NextSong')) \
.groupBy('userID', 'period') \
.agg({'period':'count'}) \
.agg({'count(period)':'avg'}).withColumnRenamed('avg(count(period))', 'churnAvg(count(period))') \
.withColumn("id", F.monotonically_increasing_id())
cusum_not_churn = not_churn_users.filter((col('page') == 'NextSong') | (col('page') == 'Home')) \
.select('userID', 'page', 'ts') \
.withColumn('homevisit', fun(col('page'))) \
.withColumn('period', F.sum('homevisit').over(user_window))
cusum_not_churn = cusum_not_churn.filter((col('page') == 'NextSong')) \
.groupBy('userID', 'period') \
.agg({'period':'count'}) \
.agg({'count(period)':'avg'}).withColumnRenamed('avg(count(period))', 'notChurnAvg(count(period))') \
.withColumn("id", F.monotonically_increasing_id())
result = cusum_churn.join(cusum_not_churn, "id", "outer").drop('id')\
result.show()
# -
# Interestingly in the full-dataset the number of songs users listen to on average between visiting our home page is similar in the not churn group and in the group of users who churn.
# +
# Calculating number of songs played given time frame
def add_time_columns(df):
# Add hour column
get_hour = udf(lambda x: int(datetime.fromtimestamp(x / 1000.0).hour), IntegerType())
df_time = df.withColumn('hour', get_hour(df.ts))
songs_in_hour = df_time.filter(df_time.page == 'NextSong').groupby('hour').count().orderBy('hour')
songs_in_hour_pd = songs_in_hour.toPandas()
# Add weekday column
get_weekday = udf(lambda x: int(datetime.fromtimestamp(x / 1000.0).weekday()), IntegerType())
df_time = df_time.withColumn('weekday', get_weekday(df_time.ts))
songs_in_weekday = df_time.filter(df_time.page == 'NextSong').groupby('weekday').count().orderBy('weekday')
songs_in_weekday_pd = songs_in_weekday.toPandas()
songs_in_weekday_pd.weekday = songs_in_weekday_pd.weekday.map({0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'})
return songs_in_hour_pd, songs_in_weekday_pd
churn_songs_hour, churn_songs_weekday = add_time_columns(churn_users)
not_churn_songs_hour, not_churn_songs_weekday = add_time_columns(not_churn_users)
# -
fig, ax = plt.subplots(1,2, figsize=(12,5))
ax[0].scatter(churn_songs_hour['hour'], churn_songs_hour['count']/num_churn_users, label='churn')
ax[0].scatter(not_churn_songs_hour['hour'], not_churn_songs_hour['count']/num_not_churn_users, label='not churn')
ax[0].set_xlim(-1, 24)
ax[0].set_ylim(0, 1.2 * max(not_churn_songs_hour["count"]/num_not_churn_users))
ax[0].set_xlabel("Hour")
ax[0].set_ylabel("Songs played per user")
ax[0].set_title("Songs played by Hour")
ax[0].legend(loc='best')
ax[1].scatter(churn_songs_weekday['weekday'], churn_songs_weekday['count']/num_churn_users, label='churn')
ax[1].scatter(not_churn_songs_weekday['weekday'], not_churn_songs_weekday['count']/num_not_churn_users, label='not churn')
ax[1].set_xlim(-0.5, 6.5)
ax[1].set_ylim(0, 1.2 * max(not_churn_songs_weekday["count"]/num_not_churn_users))
ax[1].set_xlabel("Week day")
ax[1].set_ylabel("Songs played per user")
ax[1].set_title("Songs played by Week day")
ax[1].legend(loc='best')
fig.tight_layout();
# %matplot plt
# Users that churn and those who don't behave similarly w.r.t. the time, however playing less songs in the churn group.
# +
# How the number of interactions change over time, since user registrates
earliest = df.select('ts').orderBy(col('ts')).head()
latest = df.select('ts').orderBy(col('ts'), ascending=False).head()
print(f'Earliest record in full dataset is {datetime.fromtimestamp(earliest.ts / 1000.0)}')
print(f'Latest record in full dataset is {datetime.fromtimestamp(latest.ts / 1000.0)}')
# +
# Count actions per user per day
# We randomly select 1 user who have churned and 1 user who haven't churned for comparison
churner = np.random.choice(churner_ids, size=1, replace=False).tolist()
not_churner = np.random.choice(not_churner_ids, size=1, replace=False).tolist()
def get_actions_by_day(df, ids):
actions = df.where(df.userId.isin(ids))\
.withColumn('day', F.date_trunc('day', F.from_unixtime(col('ts')/1000)))\
.groupBy('userId', 'day', 'page').agg({'page': 'count'})\
.orderBy('userId','day', 'page')
# We want each line to be a day, and columns for counts of each page action
actions = actions.groupBy(col('userId'), col('day')).pivot('page')\
.agg(F.first('count(page)')).drop('page').orderBy('userId','day')
# In order to compare users, we transform day of the month in a running day number (e.g. if )
first_interactions = actions.select('userId', 'day').groupBy('userId').agg({'day': 'min'})
actions = actions.join(first_interactions, on='userId').withColumn('runningDaysFromFirstInteration', F.datediff('day',col('min(day)'))).drop('min(day)')
# Fill nulls with zeros (no actions of that type in the day)
actions = actions.fillna(0)
return actions
churner_actions = get_actions_by_day(df, churner)
not_churner_actions = get_actions_by_day(df, not_churner)
# -
churner_actions_pd = churner_actions.toPandas()
not_churner_actions_pd = not_churner_actions.toPandas()
churner_actions_pd['churn'] = pd.Series(np.ones(churner_actions_pd.shape[0]))
not_churner_actions_pd['churn'] = pd.Series(np.zeros(churner_actions_pd.shape[0]))
churner_actions_pd.drop('day', axis=1, inplace=True)
not_churner_actions_pd.drop('day', axis=1, inplace=True)
actions = pd.concat([churner_actions_pd, not_churner_actions_pd])
cols = churner_actions_pd.columns[1:-2]
# +
ax_i = int(np.ceil(cols.shape[0]/5))
ax_j = 5
fig, ax = plt.subplots(ax_i, ax_j, figsize=(26,16))
for i in range(ax_i):
for j in range(ax_j):
sns.lineplot(x='runningDaysFromFirstInteration', y=cols[j + 5*i], hue='churn', data=actions, ax=ax[i][j])
ax[i][j].set_title(f'Action "{cols[j + 5*i]}" vs day')
if (j + 5*i) == len(cols)-1:
break
fig.tight_layout();
# -
# %matplot plt
# +
# Users who Downgrade Their Accounts
# We find when users downgrade their accounts and then flag those log entries.
# Then we use a window function to create 2 phases (0 for pre-downgrade 1 for pos-downgrade) using a cumulative sum for each user.
flag_downgrade_event = udf(lambda x: 1 if x == 'Submit Downgrade' else 0, IntegerType())
df_downgraded = df.withColumn('downgraded', flag_downgrade_event('page'))
windowval = Window.partitionBy('userId').orderBy(F.desc('ts')).rangeBetween(Window.unboundedPreceding, 0)
df_downgraded = df_downgraded.withColumn('phase', F.sum('downgraded').over(windowval))
# -
# %spark -o df_downgraded_1994680
# Taking userId 1994680 as example
df_downgraded_1994680 = df_downgraded.select(["userId", "firstname", "ts", "page", "level", "downgraded", "phase"])\
.where(col('userId') == "1994680").sort(F.desc("ts"))\
# %local
df_downgraded_1994680.iloc[7:26,:]
# # Feature Engineering
# We need to make each row of our dataset to be an user.
# Possible features (considering only all account time, not aggregations in recent vs distant events):
# - artist: distinct artists listened
# - song: distinct songs listened
# - length: average length of songs listened
# - gender: one-hot encode (M/F)
# - itemInSession: average items in session
# - sessionId: can be used calculate average length of sessions
# - level: one-hot encode (Free/Paid)
# - location: percentage of interactions of user at specific location
# - page: counts by page type
# - status: counts by status codes
# - userAgent: get percentage of interactions of user using specific user agent - device, system information, platform, etc.
# - accountLifeTime: time an user has an account, from the first interaction until the present or the moment he/she cancels account
# +
# Helper functions for cleaning and calculating percentages
def get_mappings(cols, prefix):
mapping = dict()
for i, c in enumerate(cols):
mapping[prefix+str(i)] = c
return mapping
def calculate_perc(percentage_df , mappings):
for new_name, existing_name in mappings.items():
percentage_df = percentage_df.withColumnRenamed(existing_name, new_name)
percentage_df = percentage_df.withColumn(new_name, col(new_name)/col('userTotalInterations'))
percentage_df = percentage_df.drop('userTotalInterations')
return percentage_df
# +
def compute_features(df):
'''
Function for computing features.
Parameters
----------
input: Spark dataframe with schema of raw dataset and data
Returns
-------
df_features: Spark dataframe with computed features
location_mappings: dict for encoded locations
sys_agent_mappings: dict for encoded system information from user-agent
plat_agent_mappings: dict for encoded platform information from user-agent
'''
# Create `day` column for aggregating days and keeping information about month and year
df_features = df
# print('df_features RAW shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.withColumn('day', F.date_trunc('day', F.from_unixtime(col('ts')/1000)))
# Create `userAgentSystemInformation` and `userAgentPlatform` columns for retrieving separate information from `userAgent`
df_features = df_features.withColumn('userAgentSystemInformation', F.regexp_extract(col('userAgent'),'(?<=\().+?(?=\))',0))
df_features = df_features.withColumn('userAgentPlatform', F.regexp_extract(col('userAgent'),'(?<=\)).+',0))
df_features = df_features.drop('userAgent')
# Intermediate DF to calculate counts of actions by page type, per user
page_counts = df_features.groupBy('userId', 'page')\
.agg({'page': 'count'})\
.groupBy(col('userId')).pivot('page')\
.agg(F.first('count(page)')).drop('page')\
.fillna(0)
# Intermediate DF to calculate average length of user sessions
session_avg_length = df_features.groupby('userId', 'sessionId')\
.agg(
F.min(col('ts')).alias('startSession'),
F.max(col('ts')).alias('endSession')
)\
.groupby('userId')\
.agg(
F.avg(col('endSession')-col('startSession')).alias('avgSessionLength')
)
# Intermediate DF to calculate percentage of interactions at specific location, per user
location_percentage = df_features.groupBy('userId', 'location').agg({'location': 'count'})
total_interations = df_features.groupBy('userId').agg(F.count('userId').alias('userTotalInterations'))
location_percentage = location_percentage.groupBy(col('userId')).pivot('location')\
.agg(F.first('count(location)'))\
.fillna(0).join(total_interations,on='userId').drop('location')
location_cols = location_percentage.columns
location_cols.remove('userId')
location_cols.remove('userTotalInterations')
# Deal with bad column names
location_mappings = get_mappings(location_cols, 'location_')
location_percentage = calculate_perc(location_percentage , location_mappings)
# Intermediate DF to calculate percentage of interactions using specific user-agent system information, per user
countSysInfo = df_features.groupBy('userId', 'userAgentSystemInformation')\
.agg(
F.count('userAgentSystemInformation').alias('sysInfoCount')
)
total = df_features.groupBy('userId').agg(F.count('userId').alias('userTotalInterations'))
countSysInfo = countSysInfo.groupBy(col('userId')).pivot('userAgentSystemInformation')\
.agg(F.first('sysInfoCount'))\
.fillna(0).join(total,on='userId').drop('userAgentSystemInformation')
sys_cols = countSysInfo.columns
sys_cols.remove('userId')
sys_cols.remove('userTotalInterations')
# Deal with bad column names
sys_agent_mappings = get_mappings(sys_cols, 'sys_agent_')
percentage_sys_info = calculate_perc(countSysInfo , sys_agent_mappings)
# Intermediate DF to calculate percentage of interactions using specific user-agent platform information, per user
countPlat = df_features.groupBy('userId', 'userAgentPlatform')\
.agg(
F.count('userAgentPlatform').alias('platformCount')
)
countPlat = countPlat.groupBy(col('userId')).pivot('userAgentPlatform')\
.agg(F.first('platformCount'))\
.fillna(0).join(total,on='userId').drop('userAgentPlatform')
plat_cols = countPlat.columns
plat_cols.remove('userId')
plat_cols.remove('userTotalInterations')
# Deal with bad column names
plat_agent_mappings = get_mappings(plat_cols, 'plat_agent_')
percentage_plat = calculate_perc(countPlat , plat_agent_mappings)
# print('page_counts shape (row, cow)', page_counts.count(), len(page_counts.columns))
# print('session_avg_length shape (row, cow)', session_avg_length.count(), len(session_avg_length.columns))
# print('location_percentage shape (row, cow)', location_percentage.count(), len(location_percentage.columns))
# print('percentageSysInfo shape (row, cow)', percentage_sys_info.count(), len(percentage_sys_info.columns))
# print('percentagePlat shape (row, cow)', percentage_plat.count(), len(percentage_plat.columns))
df_features = df_features.groupby('userId')\
.agg(
F.countDistinct('artist').alias('distinctArtistsListened'),
F.countDistinct('song').alias('distinctSongsListened'), # simplification, disregarding songs with the same name and different artists
F.avg('length').alias('avgLength'),
F.first(F.when(col('gender') == 'M', 1).otherwise(0)).alias('isMale'),
F.avg('itemInSession').alias('avgItemsInSession'),
F.sum(F.when(col('level') == 'Paid', 1).otherwise(0)).alias('interactionsPaid'),
F.sum(F.when(col('level') == 'Free', 1).otherwise(0)).alias('interactionsFree'),
F.sum(F.when(col('status') == 307, 1).otherwise(0)).alias('statusCount307'),
F.sum(F.when(col('status') == 404, 1).otherwise(0)).alias('statusCount404'),
F.sum(F.when(col('status') == 200, 1).otherwise(0)).alias('statusCount200'),
F.min('day').alias('firstInteraction'),
F.max('day').alias('lastInteraction'),
)
# print('df_features after AGGs shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.join(page_counts, on='userId')\
.join(session_avg_length, on='userId')\
.join(location_percentage, on='userId')\
.join(percentage_sys_info, on='userId')\
.join(percentage_plat, on='userId')
# print('df_features after JOINs shape (row, cow)', df_features.count(), len(df_features.columns))
df_features = df_features.withColumn('accountLifeTime', F.datediff( col('lastInteraction'), col('firstInteraction') ) )
df_features = df_features.drop('lastInteraction','firstInteraction')
# print('df_features after NEW COL shape (row, cow)', df_features.count(), len(df_features.columns))
# Handle NaNs, Nulls
df_features = df_features.fillna(0)
# print('df_features after FILLNA shape (row, cow)', df_features.count(), len(df_features.columns))
return df_features, location_mappings, sys_agent_mappings, plat_agent_mappings
# -
start = time.time()
df_features, location_mappings, sys_agent_mappings, plat_agent_mappings = compute_features(df)
end = time.time()
print(f'Spent {end-start}s in feature computation')
# Add churn column as label
churn = df.select('userId','churn').distinct()
df_features_label = df_features.join(churn, on='userId')
df_features_label = df_features_label.withColumnRenamed('churn', 'label')
# +
# %spark -o df_features_label -n 5
# Drop userId, as it's not a feature
df_features_label = df_features_label.drop('userId')
# -
# %local
df_features_label
print('df_features_label shape:', df_features_label.count(), len(df_features_label.columns))
# +
# Save features in my S3 bucket called emr-sparkify, partitioning by date
now = datetime.now()
s3_path = f's3n://emr-sparkify/processed/{now.year}/{now.month}/{now.day}/{now.hour}/{now.minute}'
print(f'Saving data in {s3_path}...')
df_features_label.write.csv(s3_path, mode='overwrite', header=True)
# -
# As seen, EMR has processed and saved the data partitioned by date in the S3 bucket and in multiple csv files:
#
# <img src="./media/s3.png" alt="s3">
# For simplifications (after several trials with different cluster configurations, number of instances, adding spot task instances amd timeouts from JupyterLab), I broke the work into 2 notebooks. The [second notebook](./Sparkify-cluster.ipynb) focus on modeling and evaluating the results.
#
# In the data pipeline breaing the logic here in 3 Spark applications would make sense:
#
# - one for processing features
# - one for training (when desired)
# - another one for inference.
| spark-churn/Sparkify-cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
s_p=[10,7,5,8,11,9]
def get_max_profit(stock_prices):
if not stock_prices:
raise Exception('empty')
elif len(stock_prices)==1:
raise Exception('just one stock')
else:
# Calculate the max profit
max_profit=0
for outter_time in range(len(stock_prices)):
for inner_time in range(len(stock_prices)):
earlier_time=min(outter_time,inner_time)
later_time=max(outter_time,inner_time)
earlier_price=stock_prices[earlier_time]
later_price=stock_prices[later_time]
potential_profit=later_price-earlier_price
max_profit=max(max_profit,potential_profit)
return max_profit
get_max_profit(s_p)
def get_max_profit(stock_prices):
max_profit = 0
if not stock_prices:
raise Exception('empty')
elif len(stock_prices)==1:
raise Exception('just one stock')
else:
for earlier_time, earlier_price in enumerate(stock_prices):
for later_time in range(earlier_time + 1, len(stock_prices)):
later_price = stock_prices[later_time]
potential_profit = later_price - earlier_price
max_profit = max(max_profit, potential_profit)
return max_profit
get_max_profit([9, 7, 4, 1])
# +
def get_max_profit_greedy(stock_prices):
# check that the stock prices is not empty or just one value
max_profit=stock_prices[1]-stock_prices[0]
low=stock_prices[0]
for i in stock_prices[1:]:
potential_profit=i-low
max_profit=max(max_profit,potential_profit)
low=min(low,i)
return max_profit
# -
get_max_profit_greedy([9, 7, 4, 1])
# ## Merging Meeting times
#a=[(1, 3), (2, 4)]
a= [(1, 2), (2, 3)]
#a=[(1, 5), (2, 3)]
for i in range(len(a)-1):
if a[i][i+1]>=a[i+1][i]:
if a[i][i+1]>[i+1][i]:
result=a[i][i],a[i][i+1]
else:
result=a[i][i],a[i+1][i+1]
a[0][1]
a[1][1]
result
sorted(a)
## greedy
## time O(nlogn)
## space O(ns)
def mergeRanges(meetings):
sortMeetings=sorted(meetings)
mergedMeetings=[sortMeetings[0]]
for currentMeetingStart,currentMeetingEnd in sortMeetings[1:]:
lastMergedMeetingStart,LastMergedMeetingEnd=mergedMeetings[-1]
if(currentMeetingStart<=LastMergedMeetingEnd):
mergedMeetings[-1]=(lastMergedMeetingStart,max(LastMergedMeetingEnd,currentMeetingEnd))
else:
mergedMeetings.append((currentMeetingStart,currentMeetingEnd))
return mergedMeetings
mergeRanges(a)
# ## product of other numbers
list_of_ints=[1,7,3,4]
def get_products_of_all_ints_except_at_index(int_list):
if len(int_list)<2:
raise valueError('error')
# Make a list with the products
resultado=[]
for i in range(len(int_list)):
temp=1
for j in range(len(int_list[:])):
if j!=i:
temp*=int_list[j]
resultado.append(temp)
return resultado
resultado=get_products_of_all_ints_except_at_index
# +
def products_of_all_ints_except_at_index(list_of_ints):
if len(list_of_ints)<2:
raise IndexError('this functios need at least two elements')
product_of_all_except_at_index=[None]*len(list_of_ints)
product_so_far=1
for i in range(len(list_of_ints)):
product_of_all_except_at_index[i]=product_so_far
product_so_far*=list_of_ints[i]
product_so_far=1
for i in range(len(list_of_ints)-1,-1,-1):
product_of_all_except_at_index[i]*=product_so_far
product_so_far*=list_of_ints[i]
return product_of_all_except_at_index
# -
products_of_all_ints_except_at_index(list_of_ints)
list_of_ints
# ## find duplicate (arrays)
def find_repeat(numbers):
numbers_seen=set()
for number in numbers:
if number in numbers_seen:
return number
else:
numbers_seen.add(number)
raise Exception('no duplicate')
lista=[1,2,3,4,5,6,7,7,7,8,9]
find_repeat(lista)
def find_repeat_space(numbers):
floor=1
ceiling=len(numbers)-1
while floor < ceiling:
midpoint=floor+((ceiling-floor)//2)
lower_range_floor, lower_range_ceiling=floor,midpoint
upper_range_floor,upper_range_ceiling=midpoint+1,ceiling
items_in_lower_range=0
for item in numbers:
if item>=lower_range_floor and item<=lower_range_ceiling:
items_in_lower_range+=1
distinct_possible_integers_in_lower_range=(lower_range_ceiling-lower_range_floor+1)
if items_in_lower_range>distinct_possible_integers_in_lower_range:
floor,ceiling=lower_range_floor, lower_range_ceiling
else:
floor,ceiling=upper_range_floor,upper_range_ceiling
return floor
find_repeat_space(lista)
meetings=[[0,1], [3,5] ,[4,8]]
for currentMeetingStart,currentMeetingEnd in meetings:
print(currentMeetingStart)
print(currentMeetingEnd)
# +
sortMeetings=sorted(meetings)
mergedMeetings=[sortMeetings[0]]
for currentMeetingStart,currentMeetingEnd in sortMeetings[1:]:
lastMeetingMergedStart, lastMeetingMergedEnd=mergedMeetings[-1]
if currentMeetingStart <=lastMeetingMergedEnd:
mergedMeetings[-1]=(lastMeetingMergedStart,max(lastMeetingMergedEnd,currentMeetingEnd))
else:
mergedMeetings.append((currentMeetingStart,currentMeetingEnd))
# -
print(mergedMeetings)
# O(nlogn)
# O(n)
# +
def merge_ranges(meetings):
if len(meetings) <1:
raise indexError('need more data')
sortMeetings=sorted(meetings)
mergedMeetings=[sortMeetings[0]]
for currentMeetingStart,currentMeetingEnd in sortMeetings[1:]:
lastMeetingMergedStart, lastMeetingMergedEnd=mergedMeetings[-1]
if currentMeetingStart <=lastMeetingMergedEnd:
mergedMeetings[-1]=(lastMeetingMergedStart,max(lastMeetingMergedEnd,currentMeetingEnd))
else:
mergedMeetings.append((currentMeetingStart,currentMeetingEnd))
# Merge meeting ranges
return mergedMeetings
# -
# ### is a balanced tree?
class binary_tree(object):
def __init__(self,value):
self.value=value
self.left=None
self.right=None
def insert_left(self,value):
self.left=binary_tree(value)
return self.left
def insert_right(self,value):
self.right=binary_tree(value)
return self.right
def is_balanced(tree_root):
if tree_root is None:
return True
depths=[]
nodes=[]
nodes.append((tree_root,0))
while len(nodes):
node,depth=nodes.pop()
if (not node.left) and (not node.right):
if depth not in depths:
depths.append(depth)
if ((len(depths)>2)or (len(depths)==2 and abs(depths[0]-depths[1])>1 )):
return False
else:
if node.left:
nodes.append((node.left,depth+1))
if node.right:
nodes.append((node.right,depth+1))
return True
# O(n) time, O(n) space
# ### bst checker
# O(n) time
# O(log n) space
def BSTHelper(root,minimo,maximo):
if root is None:
return True
if (minimo is not None and root.value<=minimo) or (maximo is not None and root.value>maximo):
return False
if not(BSTHelper(root.left,minimo,root.value)) :
return False
if not(BSTHelper(root.right,root.value,maximo)):
return False
return True
def is_binary_search_tree(root):
return BSTHelper(root,None,None)
# ### find the second largest node of a tree
# O(h) worst case time
# O(log h) best casem time
# O(1) space
def find_largest(root):
if root is None:
raise ValueError('at least 1 node is required')
current=root
while current.right:
current=current.right
return current.value
tree = binary_tree(50)
left = tree.insert_left(30)
right = tree.insert_right(70)
left.insert_left(10)
left.insert_right(40)
right.insert_left(60)
right.insert_right(80)
find_largest(tree)
def find_second_largest(root):
if (root is None or (root.left is None and root.right is None)):
raise ValueError('at least 2 nodes are required')
current=root
while current:
if current.left and not current.right:
return find_largest(current.left)
if (current.right and not current.right.left and not current.right.right):
return current.value
current=current.right
# ### find duplicate space optimization
# +
def find_duplicate(int_list):
n = len(int_list) - 1
position_in_cycle = n + 1
for _ in range(n):
position_in_cycle = int_list[position_in_cycle - 1]
remembered_position_in_cycle = position_in_cycle
current_position_in_cycle = int_list[position_in_cycle - 1] # 1 step ahead
cycle_step_count = 1
while current_position_in_cycle != remembered_position_in_cycle:
current_position_in_cycle = int_list[current_position_in_cycle - 1]
cycle_step_count += 1
pointer_start = n + 1
pointer_ahead = n + 1
for _ in range(cycle_step_count):
pointer_ahead = int_list[pointer_ahead - 1]
while pointer_start != pointer_ahead:
pointer_start = int_list[pointer_start - 1]
pointer_ahead = int_list[pointer_ahead - 1]
return pointer_start
# -
# ### recursive String permutation
def get_permutations(string):
if len(string)<1:
return set([string])
all_chars_except_last=string[:-1]
last_char=string[-1]
permutations_of_all_chars_except_last=get_permutations(all_chars_except_last)
permutations=set()
for permutation_of_all_chars_except_last in permutations_of_all_chars_except_last:
for position in range(len(all_chars_except_last)+1):
permutation=(
permutation_of_all_chars_except_last[:position]
+ last_char
+ permutation_of_all_chars_except_last[position:]
)
permutations.add(permutation)
return permutations
# ### nth fibonacci
def fib(n):
if n<0:
raise ValueError('non negative numbers')
elif n in [0,1]:
return n
prev_prev=0
prev=1
for _ in range(n-1):
current=prev_prev+prev
print(current)
prev_prev=prev
prev=current
return current
fib(9)
# ### coin? O(n*m) time, O(n)space
def change_possibilities(amount, denominations):
ways_of_doing_n_cents=[0]*(amount+1)
ways_of_doing_n_cents[0]=1
for coin in denominations:
for higher_amount in range(coin,amount+1):
higher_amount_remainder=higher_amount-coin
ways_of_doing_n_cents[higher_amount]+=(ways_of_doing_n_cents[higher_amount_remainder])
return ways_of_doing_n_cents[amount]
# ### cake thief, O(n*k) time, O(k) space, n- number of cakes, k- capacity
def max_duffel_bag_value(cake_tuples, weight_capacity):
max_values_at_capacities=[0]*(weight_capacity+1)
for current_capacity in range(weight_capacity+1):
current_max_value=0
for cake_weight,cake_value in cake_tuples:
if cake_weight==0 and cake_value!=0:
return float('inf')
if cake_weight<=current_capacity:
max_value_using_cake=(cake_value + max_values_at_capacities[current_capacity-cake_weight])
current_max_value=max(max_value_using_cake,current_max_value)
max_values_at_capacities[current_capacity]=current_max_value
return max_values_at_capacities[weight_capacity]
# ### balanced binary tree
# "the difference between the depths of any two leaf nodes is no greater than 1"
# +
def is_balanced(tree_root):
# A tree without nodes is superbalanced
if tree_root is None:
return True
# variable to break as soon we find more than 2
depths=[]
# this is an stack that will store a tupple of (node,depth)
nodes=[]
nodes.append((tree_root,0))
while len(nodes):
# pop a node and its depth from top of the stack
node,depth=nodes.pop()
# look for a leaf
if (not node.left) and (not node.right):
# only if is a new depth
if depth not in depths:
depths.append(depth)
# if more than 2 different leaf depths or 2 leafs depth are more than 1 apart is enough to no be superbalanced
if ((len(depths)>2)or (len(depths)==2 and abs(depths[0]-depths[1])>1 )):
return False
else:
# if this isn't a leaf keep stepping down
if node.left:
nodes.append((node.left,depth+1))
if node.right:
nodes.append((node.right,depth+1))
return True
# -
# DFS because the use of Stacks
# O(n) time and space
# n--- nodes
# ### largest element in a stack
# O(1) time
# O(m) space because the second stack of max elements
# +
class Stack(object):
def __init__(self):
"""Initialize an empty stack"""
self.items = []
def push(self, item):
"""Push a new item onto the stack"""
self.items.append(item)
def pop(self):
"""Remove and return the last item"""
# If the stack is empty, return None
# (it would also be reasonable to throw an exception)
if not self.items:
return None
return self.items.pop()
def peek(self):
"""Return the last item without removing it"""
if not self.items:
return None
return self.items[-1]
class MaxStack(object):
# Implement the push, pop, and get_max methods
def __init__(self):
self.stack=Stack()
self.maxes_stack=Stack()
def push(self, item):
self.stack.push(item)
if self.maxes_stack.peek() is None or item>=self.maxes_stack.peek():
self.maxes_stack.push(item)
def pop(self):
item=self.stack.pop()
if item==self.maxes_stack.peek():
self.maxes_stack.pop()
return item
def get_max(self):
return self.maxes_stack.peek()
# -
# ### queue using two stacks
# O(m) time because the dequeue, O(1) queue
# O(1) Space
class QueueTwoStacks(object):
def __init__(self):
self.in_stack=[]
self.out_stack=[]
def enqueue(self,item):
self.in_stack.append(item)
def dequeue(self):
if len(self.out_stack)==0:
while len(self.in_stack)>0:
newest_in_stack_item=self.in_stack.pop()
self.out_stack.append(newest_in_stack_item)
if len(self.out_stack)==0:
raise IndexError('cant dequeue a empty queue')
return self.out_stack.pop()
# ### matching parenthesis
# O(n) time
# O(1) space
# +
def get_closing_paren(sentence, opening_paren_index):
open_nested_parens=0
for position in range(opening_paren_index+1,len(sentence)):
char=sentence[position]
if char=='(':
open_nested_parens+=1
elif char==')':
if open_nested_parens==0:
return position
else:
open_nested_parens-=1
raise Exception('No closing parenthesis')
# -
# ### Linked list cycle?
# O(n) time
# O(1) space
def contains_cycle(first_node):
slow_pointer=first_node
faster_pointer=first_node
while faster_pointer is not None and faster_pointer.next is not None:
slow_pointer=slow_pointer.next
faster_pointer=faster_pointer.next.next
if faster_pointer is slow_pointer:
return True
return False
# ### delete node from linked list
# O(1) time
# O(1) Space
def delete_node(node_to_delete):
next_node=node_to_delete.next
if next_node:
node_to_delete.value=next_node.value
node_to_delete.next=next_node.next
else:
raise Exception('this method cannot delete the last element')
# ### Reverse Linked in place
# O(n) time
# O(1) space
def reverse_ll(head):
current=head
prev=None
next_node=None
while current:
next_node=current.next
current.next=prev
prev=current
current=next_node
return prev
# ### kth to last node on singly linked list
# O(n) time
# O(1) space
def kth_to_last_node(k, head):
slow_pointer=head
faster_pointer=head
if k< 1:
raise Exception('k must be equal or greater than 1')
for _ in range(k):
faster_pointer=faster_pointer.next
while faster_pointer:
slow_pointer=slow_pointer.next
faster_pointer=faster_pointer.next
return slow_pointer
# ### find duplicate beast mode
# O(n) time
# O(1) space
# +
def find_duplicate(int_list):
n = len(int_list) - 1
position_in_cycle = n + 1
for _ in range(n):
position_in_cycle = int_list[position_in_cycle - 1]
remembered_position_in_cycle = position_in_cycle
current_position_in_cycle = int_list[position_in_cycle - 1] # 1 step ahead
cycle_step_count = 1
while current_position_in_cycle != remembered_position_in_cycle:
current_position_in_cycle = int_list[current_position_in_cycle - 1]
cycle_step_count += 1
pointer_start = n + 1
pointer_ahead = n + 1
for _ in range(cycle_step_count):
pointer_ahead = int_list[pointer_ahead - 1]
while pointer_start != pointer_ahead:
pointer_start = int_list[pointer_start - 1]
pointer_ahead = int_list[pointer_ahead - 1]
return pointer_start
# -
# this task already schedule this a new one
# * [12:00, 1min], [12:30, 5min]; <4:00, 5min>
#
def merge_lists(my_list, alices_list):
if my_list is None:
return alices_list
elif alices_list is None:
return my_list
if my_list is None and alices_list is None:
raise valueError('no empties arrays')
# Combine the sorted lists into one large sorted list
p1=len(my_list)-1
p2=len(alices_list)-1
p=len(my_list)+len(alices_list)-1
for i in range(p2+1):
my_list.append(0)
while p1>=0 and p2>=0:
if my_list[p1]<alices_list[p2]:
my_list[p]=alices_list[p2]
p2-=1
else:
my_list[p]=my_list[p1]
p1-=1
p-=1
my_list[:p2+1]=alices_list[:p2+1]
return my_list
| hackerRank/IC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
suppressPackageStartupMessages(library(tidyverse))
head(mtcars)
nrow(mtcars)
mtcars %>%
ggplot(aes(hp, mpg, color = as.factor(cyl))) +
geom_point()
mod = lm(mpg ~ hp * wt, data = mtcars)
summary(mod)
| nbs/dl1/homework/R Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import bz2
import json
import pandas as pd
import numpy as np
import ijson
import matplotlib.pyplot as plt
from io import StringIO
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.mixture import GaussianMixture
import scipy as spv
# -
filename="/home/aimed/sicurezza_informatica/network/netflow_day-02.bz2"
df=pd.DataFrame()
with bz2.open(filename) as f:
data=f.read().decode('utf8')
df = pd.read_csv(StringIO(data), header=None, names=header_list)
df.drop_duplicates(inplace=True)
df=df[(df.DestPackets!=0) & (df.SrcPackets!=0)]
df.to_hdf("/home/aimed/sicurezza_informatica/network/day_02.h5",'df')
| Data_Import_and_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting with [cartopy](https://scitools.org.uk/cartopy/docs/latest/)
#
# From Cartopy website:
#
# * Cartopy is a Python package designed for geospatial data processing in order to produce maps and other geospatial data analyses.
#
# * Cartopy makes use of the powerful PROJ.4, NumPy and Shapely libraries and includes a programmatic interface built on top of Matplotlib for the creation of publication quality maps.
#
# * Key features of cartopy are its object oriented projection definitions, and its ability to transform points, lines, vectors, polygons and images between those projections.
#
# * You will find cartopy especially useful for large area / small scale data, where Cartesian assumptions of spherical data traditionally break down. If you’ve ever experienced a singularity at the pole or a cut-off at the dateline, it is likely you will appreciate cartopy’s unique features!
#
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import cartopy.crs as ccrs
# # Read in data using xarray
# - Read in the Saildrone USV file either from a local disc `xr.open_dataset()
# - change latitude and longitude to lat and lon `.rename({'longitude':'lon','latitude':'lat'})`
#
# +
file = '../data/saildrone-gen_5-antarctica_circumnavigation_2019-sd1020-20190119T040000-20190803T043000-1440_minutes-v1.1564857794963.nc'
ds_usv =
# -
# # Open the dataset, mask land, plot result
# *`xr.open_dataset`
# * use `.where` to mask values equal to 1
# +
#If you are offline use the first url
#url = '../data/20111101120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
url = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/2011/305/20111101120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
ds_sst =
ds_sst =
# -
#
# ## explore the in situ data and quickly plot using cartopy
#
# * first set up the axis with the projection you want: https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
#
# * plot to that axis and tell the projection that your data is in
# * set a background image `ax.stock_img()`
# * draw coastlines `ax.coastlines(resolution='50m')`
# * add a colorbary and label it `cax = plt.colorbar(cs1)` `cax.set_label('SST (K)')`
# +
#for polar data, plot temperature
ax = plt.axes(projection=ccrs.SouthPolarStereo())
(ds_sst.analysed_sst-273.15).plot(ax=ax,
transform=ccrs.PlateCarree(),
vmin=0,
vmax=12)
cs1 = ax.scatter(ds_usv.lon, ds_usv.lat,
transform=ccrs.PlateCarree(),
s=10.0,
c=ds_usv.TEMP_CTD_MEAN,
edgecolor='none',
cmap='jet',
vmin=0,vmax=12)
ax.set_extent([-180, 180, -90, -45], crs=ccrs.PlateCarree())
# -
# # Exercise!
# +
#now you try to plot plot salinity ds_usv.SAL_MEAN
# -
# # Let's plot some data off of California
# * `.rename({'longitude':'lon','latitude':'lat'})`
# +
#use the first URL if you are offline
#url = '../data/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc'
url = 'https://podaac-opendap.jpl.nasa.gov/opendap/hyrax/allData/insitu/L2/saildrone/Baja/saildrone-gen_4-baja_2018-sd1002-20180411T180000-20180611T055959-1_minutes-v1.nc'
ds_usv =
# -
# # Exercise!
# * for NON polar ds_usv data, use `ccrs.PlateCarree()` as your projection
# +
#for polar data, plot temperature
# -
# now add an extent to your figure
lonmin,lonmax = ds_usv.lon.min().data-2,ds_usv.lon.max().data+2
latmin,latmax = ds_usv.lat.min().data-2,ds_usv.lat.max().data+2
ax.set_extent([lonmin,lonmax,latmin,latmax], crs=ccrs.PlateCarree())
| notebooks/.ipynb_checkpoints/Intro_07_Xarray_and_plotting_with_cartopy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from discrete_shocklets import shocklets, kernel_functions
import matplotlib.pyplot as plt
import numpy as np
# -
# # Discrete Shocklet (cusplet?) Transform
#
# This provides a few examples of the discrete shocklet (affectionately referred to as a cusplet by some...) transform---or DST---in practice. We will show how the group action of $R_4$ on the kernel function affects the output of the transform and demonstrate the automated extraction of anomalous dynamics using the post-processing algorithms. You can find theoretical information about this collection of algorithms---and some applications---in the original paper: http://compstorylab.org/share/papers/dewhurst2019a/.
#
# First we will show what a cusplet transform actually looks like. We will use a pretty boring time series for this: just a random walk, $x_n - x_{n-1} = u_n$, where $u_n \sim \mathcal{U}(\{-1, 1\})$.
np.random.seed(50)
noise = np.random.choice([-1, 1], size=5000, replace=True)
x = np.cumsum(noise)
plt.plot(x, 'darkgray')
plt.xlabel('$n$')
plt.ylabel('$x_n$')
plt.xlim(0, 5000 - 1);
# At its core, the DST is just simple cross-correlation of a kernel function $\mathcal{K}$ with a signal such as $x_n$. A windowing parameter $W$ controls up- and down-sampling time so that anomalous dynamics can be found at all relevant timescales.
#
# Let's see what the immediate output of the DST looks like. We will use a kernel function that looks like $\mathcal{K}(n) \sim |n - n_0|^{-\theta}$.
# +
windows = np.linspace(10, 1000, 100) # 100 windows, equally spaced from width 10 to 1000
kernel = kernel_functions.power_cusp # a symmetric power-law type cusp
k_args = [3.] # arguments for the kernel; in this case, it's the parameter $\theta = 3$.
reflection = 2 # reflect the kernel over the horizontal axis
dst, largest_kernel = shocklets.cusplet(
x,
windows,
kernel_func=kernel,
kernel_args=k_args,
reflection=reflection
)
fig, axes = plt.subplots(2, 1, figsize=(6, 6))
ax0, ax1 = axes
ax0.plot(x, 'darkgray')
ax0.set_xlim(0, len(x))
im = ax1.imshow(
dst,
aspect='auto',
cmap=plt.cm.magma,
)
ax0.set_title('Figure 1', fontsize=15);
plt.tight_layout()
# -
# Lighter colors indicate large positive values while darker colors indicate large negative values. We see that the immediate output of the DST captures pieces of the time series that sort of "look" like the kernel, which in this case is an upside-down spike-y kind of thing (`shocklets.power_cusp` reflected over the horizontal axis).
# From Figure 1 we can also see that the DST captures this behavior over all timescales. There are similar spikes in DST intensity for the small-ish peak near $n = 250$ as for the much larger ones that peaks near $n = 2000$ and $n = 3500$.
#
# ### Thresholding procedures
#
# Now we can post-process. Let's find out in what actual windows of time $x_n$
# had this sort of cusp-y behavior.
# +
# extrema are the estimated extrema of the hypothesized underlying dynamics
# indicator is the cusplet indicator function, plotted below in a blue curve
# geval are indices of the cusplet indicator function where it exceeds geval
# b is a sensitivity parameter; higher b is less sensitive.
extrema, indicator, gepoints = shocklets.classify_cusps( dst, b=0.5, geval=0.5 )
# now we can get and plot contiguous windows of cusp-y behavior
# these are created from the gepoints
windows = shocklets.make_components(gepoints)
fig, axes = plt.subplots(2, 1, figsize=(10, 6))
ax, ax2 = axes
ax.plot(x, 'darkgray')
ax.vlines(extrema, min(x), max(x))
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_xlim(0, len(x))
# annotate the windows
for window in windows:
ax.vlines(window, min(x), max(x), alpha=0.01, color='r')
ax2.plot(indicator, 'b-')
ax2.vlines(extrema, min(indicator), max(indicator))
ax2.set_xlim(0, len(x))
ax.set_ylabel('$x_n$')
ax2.set_xlabel('$n$')
ax2.set_ylabel('$C(n)$')
ax.set_title('Figure 2', fontsize=15)
# -
# We'll work from the bottom up.
# The blue curve in the bottom panel displays the cusp indicator function,
# which (usually, unless you specified any custom weightings of the discrete
# shocklet transform) is roughly equivalent to `shocklets.zero_norm( np.sum(cc, axis=0) )`.
# The vertical bars denote relative maxima $C^*$ of the cusp indicator function
# that satisfy the additional condition
# $$
# C^* \geq \mu_C + b \sigma_C,
# $$
# where $\mu_C$ and $\sigma_C$ are the mean and standard deviation of the cusp
# indicator function and $b$ is a tunable parameter that adjusts the sensitivity
# of the thresholding.
# The top window again displays the $C^*$ in vertical bars and windows where the
# cusp indicator function exceeds some other threshold $b'$.
| example/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting used to numpy: a mini review + some extra tips/tools
#
# For the first ~20 minutes of class, we'll be covering nump - you've already been introduced to numpy arrays, so this will be a refresher.
#
# In the window below, import numpy as np
# + slideshow={"slide_type": "slide"}
import numpy as np
# -
# Create a 1D numpy array "my_numbers" containing the numbers 1 through 10 in order. What different ways are there to do this?
# + slideshow={"slide_type": "slide"}
my_numbers = np.
# -
# Now, using numpy's reshape( ) method, reshape my_numbers into a 2 x 5 array
#There are also multiple ways to do this!
my_numbers =
# Using your new, reshaped my_numbers array, use array indexing to print out the second value of the second element (the number 8)
print()
# Using slicing techniques, print every alternating value from my_numbers's first element
print()
# Another convenient way of pulling values from a numpy array is numpy's where() function. If we wanted to pull __the indices__ of all values in my_numbers greater than 6, we could say: <code> np.where(my_numbers > 6) </code>
#go ahead and run this cell to see what the output looks like
np.where(my_numbers > 6)
# This output is actually two arrays containing an __index__ of matching rows and columns! e.g. <code>my_numbers[1,1]</code> = 7, <code>my_numbers[1,2]</code> = 8, and so on.
# <br> <br>
# How would we create a new array called new_array that contains the actual values? Give it a try now
# <br><br>
#
new_array = #fill in answer here
print(new_array)
# It's important to note that the __where__ function has a lot more capability than just returning an index- it actually can manipulate elements using the same logic as <code> [xv if c else yv
# for c, xv, yv in zip(condition, x, y)] </code> when passed <code> np.where(condition, [x,y])</code>
#
# See example below:
# +
#go ahead and run this cell to see output
example_arr = np.arange(1,20,1)
print('Before using np.where(): ','\n',example_arr)
example_arr = np.where(example_arr < 10, example_arr, example_arr-10)
print('After using np.where(), all indices where example_arr < 10 = False are subject to example_arr-10: ',
'\n',example_arr)
# -
# Finally, while it was touched on last lecture, we'd like to make the point that one can also easily index arrays by creating boolean/mask arrays using conditional statements. In this case, the functionality would look like: <br> <br>
# <code> new_array = old_array[old_array <font color = red>CONDITIONAL STATEMENT</font>]</code>
#
# In the code below, we've creating an array __a__ that's equal the the numbers between 1 and 20. Use a conditional statement to create a mask array with index values, and reassign a to only equaling its values that are greater than or equal to 12.
a = np.arange(1,21,1)
a = #fill in answer here
print(a)
# # Random number generation in numpy
#
# Numpy comes with a "random" module (np.random) that contains a number of functions for producing random numbers. Some examples include:
# <br> <br>
# <code>np.random.random(tuple indicating output dimensions, e.g. (2,3) )</code> -> outputs random values from continuous distribuion over 0 to 1 in n-dimensions (specified by input tuple)
# <br><br>
# <code>np.random.randn(dimension1, dimension2, dimension 3...)</code> -> random values from 0 to 1 over normal distribution
# <br><br>
# <code>np.random.randint(size, (lower bound, higher bound))</code> -> array of size x over specified lower/higher bounds
# <br> <br><br>
# Check out further numpy rng features here:
#
# https://docs.scipy.org/doc/numpy-1.15.1/reference/routines.random.html
#
#
#
#
#
# Using <code>np.random.random</code>, create two arrays of random numbers from a continuous distribution: array a (100x5), and array b (5x10)
#hint: don't forget that np.random.random takes in a tuple!
a =
b =
# Next, using the __shape__ attribute of numpy arrays, print the shape of a and the shape of b.
# <br><br>
#
#hint: attributes, unlike methods, don't require the use of parentheses!
print()
print()
# # numpy arrays: operators and a few more useful attributes
#
#
# Rather than having a "len" like lists, numpy arrays have two attributes than can help users keep track of dimensions/elements: <code>size</code> and <code>shape</code>. <code> size </code> refers to the __number of elements__ within an array, while <code> shape </code> returns an array's dimensions.
# <br> <br>
# Below, print the size and shape of a and b. What do you get?
print() #a size
print() #a shape
print() #b size
print() #b shape
# In the last lecture, Jacob covered some operators (+, -, etc.) that can be used on numpy arrays. We just wanted to make the point that matrix multiplication can be quickly accomplished using the <code> @ </code>. Below, create a matrix c that is the product of a and b and print the shapes of a, b, and c to demonstrate matrix multiplication.
print() #a shape
print() #b shape
c =
print()#c shape
# Arrays also have lots of useful methods associated with them that perform various functions, including:
# <br><br>
# <code> np.min()</code> <- returns minimum value of array<br> <br>
# <code> np.max()</code> <-returns max value of array<br> <br>
# <code> np.mean()</code> <-returns mean value of array<br> <br>
# <code> np.astype()</code> <- casts all data types contained in array to specified data type, eg. <code> =astype(int)</code>
#
# For a complete list of attributes and methods associated with numpy arrays, check out this documentation:
# <br> <br>
# https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-methods
# <br> <br>
# Using this information, find the minimum value of a.
#
print()
# ## A note on axes in numpy arrays:
#
# We've just told you about some of the neat methods you can use with numpy arrays; however, we haven't covered situations in which you might want to find the max or min of a __single column__ or __row__ of an array (we refer to these as __axes__).
#
# In this case, you'll want to specify *within* the method the axis that you would like to operate on. Let's take our 100x5 array "a" as an example. Let's say that we actually want to take the mean across the __first value__ of each "column", and return a vector of length 5 containing each of those values. In this case, we would need to specify axis = 0 *as input to the function*, like so:
# <br><br>
# <code> np.mean(<font color=red>ARRAY</font>,axis=0)</code>
# <br><br> Conversely, specifying "axis = 1" would return a vector of length 100 with the mean value of each "row".
# Below, create an array "a_mean" that's equal to the mean of array "a" across axis 0.
a_mean =
print(a_mean)
# # Huzzah! You've made it through part 1 of the lecture! We're going to go through some slides for a bit, and then you'll be prompted to import __pandas__. Go ahead and pick up here whenever that happens :)
# <br><br><br><br><br>
# In the cell below, import <code> pandas </code> as <code> pd</cd>
import pandas as pd
# Below, find the list "names_list", consisting of five random names.
names_list = ['Aragorn', 'Legolas', 'Gimli', 'Galadriel', 'Eowyn']
# Use panda's Series() method, with names_list as input, to create a series assigned to variable "names_series" Print output.
names_series =
print()
# Looking at our printed series, it's pretty clear that this no longer looks like a list. There are values all along the left side!! This is the series __index__. Because we didn't specify anything in particular, pandas automatically created an index for us beginning with 0 and continuing through the length of the list we passed it; however, we have control over the index! We can specify values *within* the input we provide the Series() function, like this:
# <br><br>
# <code> names_series = pd.Series(names_list, index = [5,6,7,8,9])</code>
# <br><br> __OR__ If we create a series from a __dictionary__, the index will automatically contain key values from the dict.
# <br><br><br>
# So how do we make a DataFrame? There are actually myriad ways!! This is __one of many__ - and not the most efficient!
# <br> Create a dictionary lotr_data, with the key 'Beings'assigned a list containing the strings 'human','elf','dwarf','elf', and 'human', and the key 'Age', assigned a list containing the values 87, 2931, 139, 7000, and 24
lotr_data =
# Now, create a dataframe lotr_df by passing lotr_data into the pandas function DataFrame()
lotr_dc = {
'Beings':['human', 'elf','dwarf','elf','human'],
'Age':[87, 2931, 139, 700, 24]
}
print(lotr_df)
lotr_df = pd.Dataframe(lotr_dc)
# We've been using Python's built-in print function throughout this class to look at data - but DataFrames are unique in that they are actually nicer to look at as output! Try using the .head() method associated with DataFrames - head() will normally return the first five rows of a DataFrame, but can take any number as input. In this case, just show the first 3 rows:
lotr_df = pd.Dataframe(lotr_data)
print(lotr_df)
# Let's say we want to view just the 'Beings' column of our DataFrame - this can be accomplished via the simple command <code> DataFrame[<font color=red>'COLUMN NAME'<font>]</code>. Try looking at just Beings below.
# Interestingly, the columns of a DataFrame are actually also __attributes__, meaning they can be accessed using <code> DataFrame.<font color = red>COLUMNS</font></code> notation. Try pulling out the Age column this way below.
# Egads! It looks like we've forgotten to insert our character names from earlier into our DataFrame! Thankfully, using the same indexing that we see above, this is quite easy to accomplish in a DataFrame - simply create a *new* column with the command <code>DataFrame['NEW COLUMN NAME'] = ________ </code> Below, insert your names_series Series into lotr_df in a column called 'Character Name', and take a look at your updated DataFrame
# This is all well and good, but let's say we get our hands on a *slightly* more complete dataset, and want to import it. pandas has built in functions to read/import __many__ different data types, including (but not limited to) numpy arrays, .xlsx, and .csv files.
# <br><br> Use the pandas read_csv function, which will take a csv file at a given directory and import it into a DataFrame, to import the lotr_char_age.csv file that you should have put into the same folder as this notebook at the beginning of class. Re-assign your lotr_df DataFrame to the output of this function.
lotr_df =
# What a lovely DataFrame we've created! It doesn't have a *lot* more information, but it should be enough for us to quickly go over how to pull the data your want out of your DataFrame.
# <br><br>
# We've already learned that it's easy to pull out all of the contents of a column - but this technique can also be used with __conditionals__. First, try seeing what happens if you run a conditional asking for the Age column where age < 1000?
# We've seen booleans before, and covered how to use them in numpy arrays earlier in this lecture! Based on what we learned then, and knowing how to pull column data from DataFrames, can you think of a method to return all values of lotr_df that are associated with an age <1000?
# Great!
# <br><br>
# We can even slice columns once we've pulled them from our DataFrame. Let's say we only want to see the first 3 entries from what we pulled out in the last window. We'd use the notation <code>DataFrame['COLUMN NAME'][INDEX OR SLICE]. Give it a try now!
# Everything we've done so far has been focused on pulling data out from DataFrame based on its column. But, when we introduced DataFrames, we also made a point that their indexing is valuable! How can we pull out rows based on index?
# <br> <br>
# This is were DataFrames differ significantly from arrays - in order to access data inparticular rows, DataFrames require users to use .iloc[] and .loc[] methods.
# <br><br> iloc, or index-based selection, treats your DataFrame like a giant matrix, and pulls out data based on its location (corresponding to the value you provided as input). loc, on the other hand, takes into account the *labels of the data* in a DataFrame. In the case of lotr_df, index values are the same as their numerical location; however, you might see how we need to use loc if we instead want to specify <code>'Beings' == 'hobbit'</code>.
# <br> <br>
# Let's try iloc first - works quite similarly to how you've gotten used to indexing/slicing numpy arrays, except that it takes in inputs in the order of __rows, columns__ - e.g., <code>DataFrame.iloc[2:4, 0]</code> will return values from the 0th column, rows 2 to 4.
# <br> <br>
# Below, pull out the 2nd column, rows 1, 3, 5, and 7 of lotr_df
# Great! Let's move on to loc - in this case, we can start to use labels and conditionals! For example, what happens if we specify that we want only rows of lotr_df where <code>'Beings' == 'elf'</code> __and__ <code> age < 4000 </code>? We can use the "&" operator!!
#This is an example - feel free to run the code to see output
lotr_df.loc[(lotr_df['Beings']=='elf') & (lotr_df['Age'] < 4000)]
# Your turn! Pull out all hobbits under the age of 50 from lotr_df __and__ assign these values to a new DataFrame called young_hobbits
young_hobbits =
# As you can see, if we had a __very__ large data set, this would be an invaluable tool for segmenting our data. Next, we'll be covering one or two more powerful tools associated with DataFrames before moving onto a brief overview of plotting with seaborn and wrapping up!
# <br><br>
# But first, oh no! The Tolkien fan who made our dataset didn't know that it can be hard to work with spaces in code - to make things easier for future users, let's see if we can rename our 'Character Name' column to 'Character_Name'. This can easily be accomplished using the rename method, which takes in dict-like input that can be specified to __either__ columns or the index. In this case, the format would look like:
# <br><br> <code> DataFrame.rename( {<br> <font color=red> 'EXISTING COLUMN'</font>: <font color=blue> 'NEW NAME'</font>, etc. <br> }, axis = 'columns')
#
lotr_df =
# Now that we've got our column names in order, let's take a look at one of the most powerful tools we have with DataFrames : the groupby() function.
# <br><br>
# Simply put, <code>groupby(['CATEGORY'])</code> is a method of grouping categories, and applying a function to each group. Let's just test it out to see what's going on! Create a DataFrame called "being_stats" that is lotr_df grouped by beings, and run the "count()" method on it:
#Feel free to just run the code below to see output
being_stats = lotr_df.groupby(['Beings'])
being_stats.count()
# What the above output is telling us is that each label under "Beings" is a unique value in the lotr_df['Beings'] column. For elves, there are 5 entered character names, and 5 entered ages.
# <br><br> But how strange! It looks like we're seeing...two dwarfs (dwarves, I know...)?? Can anybody think of a reason for this?
# We can take a closer look by using the unique() function - run <code> lotr_df['Beings'].unique()</code>. What do you see?
# This is all part of cleaning data, folks! People make mistakes!
# <br><br> In this case, we want to go in and replace the error, 'dwarf ', with 'dwarf'. To do this, let's use the replace() function associated with DataFrames. replace() can take in dict-like formats just like rename() - it will look like this:<br><br>
# <code> DataFrame.replace({<font color = red>'COLUMN NAME' : VALUE IN COLUMN </font>},
# <font color = blue>VALUE TO BE INSERTED</font> )
#
# <br><br>
#
# Try replacing 'dwarf ' with the correct value below - can you check just the dwarf entries using .loc? :
#
lotr_df =
# Great work!!! Finally, let's return to groupby(). Now that we know that we've corrected the error in the DataFrame, see if you can use groupby() and the .mean() functions to pull out the mean age for each type of being.
#fill out what's missing!
lotr_df.groupby
# ## Let's give seaborn a shot
# below, import seaborn as sns
import seaborn as sns
# Simple boxplot code!!
#go ahead and run this window, just to see what happens
sns.boxplot(x='Beings',y='Age',data=lotr_df)
# I promised beauty...but there are a lot of things happening in that plot that aren't great. But based on how easy it is to change things around in seaborn...in the cell below, change data to equal lotr_df __without__ elves (don't forget loc combined with conditional statements!)
sns.boxplot(x='Beings',y='Age',data=lotr_df.loc[lotr_df['Beings']!='elf'])
# Okay, looking better! A few more improvements...
# <br>let's use seaborn's set() function to set the palette to something calming, like 'Greens'...
# <br>and the background to something weird, like 'whitegrid'...
# <br> And let's go ahead and use violinplot, not boxplot, just to be fancy...
# <br> And while we're at it, let's set our plot to an object "g" so that we can title it easily.
# +
sns.set(palette = 'Greens',style='whitegrid')
g = sns.violinplot(x='Beings',y='Age',data=lotr_df.loc[lotr_df['Beings']!='elf'])
g.set_title('Relative Longevity in Middle-Earth Subpopulations',fontsize=14)
g.set_xlabel('Subpopulation')# <- decided 'Beings' looked silly, so replaced with 'Subpopulation'
# -
| Homeworks/HW3/Notebook_Class3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''tf1'': conda)'
# name: python3
# ---
import os
import numpy as np
from skimage.draw import polygon
import cv2
import json
from shutil import copyfile
from pathlib import Path
from skimage.measure import find_contours
import glob
import base64
import skimage
def getFileNameInfor(filepath):
file_name = filepath.split('/')[-1]
file_base = Path(filepath).stem
fdir = os.path.dirname(os.path.abspath(filepath))
return (fdir, file_name, file_base)
def boundary2Mask(xs, ys, img):
mask = np.zeros(img.shape[0:2])
rr, cc = polygon(xs, ys, img.shape)
mask[rr, cc] = 255
return mask
# +
def mask2Boundary(mask_file, sample_ratio=5):
mask = cv2.imread(mask_file)[:,:,0]
contours = find_contours(mask, 200)
boundaries=[]
for i, verts in enumerate(contours):
new_vects = []
for k, p in enumerate(verts):
# Subtract the padding and flip (y, x) to (x, y)z
if k % sample_ratio==0:
new_vects.append(list(p))
boundaries.append(new_vects)
return boundaries
#masks = '/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/test/data/proj_outdir/1456/masks/0_yin.jpg'
#print(mask2Boundary(masks))
# +
def masks2json(mask_file_ls, img_file, out_dir):
img_dir, img_name, img_id = getFileNameInfor(img_file)
json_file="{}/{}.json".format(out_dir,image_id)
# image = skimage.io.imread(img_file)
image = cv2.imread(img_file)
imageHeight = image.shape[0] # 获得长宽
imageWidth = image.shape[1]
with open(img_file, 'rb') as f: # 编码图片 base64
imageData = base64.b64encode(f.read())
imageData = str(imageData, encoding='utf-8')
json_data = {
'version': "4.5.6",
"flags": {},
'shapes': [],
'imagePath': img_file,
'imageData': imageData,
'imageHeight': imageHeight,
'imageWidth': imageWidth
}
for i,m in enumerate(mask_file_ls):
m_dir, m_name, m_id = getFileNameInfor(m)
className = m_id.split("_")[-1]
boundary = mask2Boundary(m)[0]
obj = {
'ID': i,
'label': className,
'points': [],
'group_id': None,
'shape_type': "polygon",
'flags': {},
}
for point in boundary:
obj["points"].append([float(point[0]), float(point[1])])
json_data['shapes'].append(obj)
json_str = json.dumps(json_data, indent=4)
with open('{}/{}.json'.format(out_dir, img_id), 'w') as json_file:
json_file.write(json_str)
copyfile(img_file, '{}/{}.png'.format(out_dir, img_id))
# test
# img_file = "../../samples/hpv_models/test/data/proj_outdir/1456/1456.png"
# img_dir, img_name, img_id = getFileNameInfor(img_file)
# mask_dir= "{}/masks/".format(img_dir)
# mask_file_ls = glob.glob('{}/*.jpg'.format(mask_dir))
# out_dir = "/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/test/data/out_json"
# masks2json(mask_file_ls, img_file, out_dir)
# +
import sys
ROOT_DIR = os.path.abspath("../../../") # 指定根目录
sys.path.append(ROOT_DIR) # 查找库的本地版本
from mrcnn import utils
def get_iou(roi1, roi2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
bb1={"y1":roi1[0], 'x1':roi1[1],"y2":roi1[2], 'x2':roi1[3]}
bb2={"y1":roi2[0], 'x1':roi2[1],"y2":roi2[2], 'x2':roi2[3]}
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def removeDuplicateObject(r, min_ratio=0.7):
classid_ls = list(map(int, r['class_ids']))
scores = r["scores"]
masks = r["masks"]
rois =r['rois']
new_classid_ls = []
new_scores = []
new_masks = None
removed_indexes = []
for i in range(len(classid_ls)-1):
for k in range(i+1, len(classid_ls)):
if k in removed_indexes:
continue
mask_a = np.reshape(masks[:,:,i],(masks.shape[0],masks.shape[1],1))
roi_a = rois[i]
class_a = classid_ls[i]
mask_b = np.reshape(masks[:,:,k],(masks.shape[0],masks.shape[1],1))
roi_b = rois[k]
class_b = classid_ls[k]
#print(utils.compute_overlaps_masks(mask_a, mask_b))
#print('as')
if get_iou(roi_a,roi_b)>0.8:
#if utils.compute_overlaps_masks(mask_a, mask_b)[0]>min_ratio:
if class_a<class_b:
removed_indexes.append(k)
else:
removed_indexes.append(i)
continue
if i in removed_indexes:
continue
else:
new_classid_ls.append(classid_ls[i])
new_scores.append(scores[i])
if new_masks is None:
new_masks=np.reshape(masks[:,:, i],(masks.shape[0],masks.shape[1],1))
else:
mask=np.reshape(masks[:,:, i],(masks.shape[0],masks.shape[1],1))
new_masks=np.concatenate([new_masks, mask], axis=-1)
return {'class_ids':new_classid_ls, "scores":new_scores, "masks":new_masks}
# -
def predictToJson(image_path, model, out_dir):
img_dir, img_name, img_id = getFileNameInfor(img_file)
image = skimage.io.imread(image_path)
imageHeight = image.shape[0] # 获得长宽
imageWidth = image.shape[1]
rr = model.detect([image], verbose=1)[0]
r = removeDuplicateObject(rr,0.6)
with open(image_path, 'rb') as f: # 编码图片 base64
imageData = base64.b64encode(f.read())
imageData = str(imageData, encoding='utf-8')
json_data = {
'version': "4.5.6",
"flags": {},
'shapes': [],
'imagePath': img_dir,
'imageData': imageData,
'imageHeight': imageHeight,
'imageWidth': imageWidth
}
for i in range(0, len(r['class_ids'])):
class_id = int(r['class_ids'][i])
className = ['BG', 'yin', 'yin-yang', 'yang'] # 把数字对应上标签
score = int(r["scores"][i])
mask = r["masks"][:, :, i]
boundary = mask2Boundary(mask)
obj = {
'ID': i,
'label': className[class_id],
'points': [],
'group_id': None,
'shape_type': "polygon",
'flags': {},
}
i = 0
for point in boundary:
obj["points"].append([float(point[0]), float(point[1])])
json_data['shapes'].append(obj)
json_str = json.dumps(json_data, indent=4)
with open('{}/{}.json'.format(out_dir, img_id), 'w') as json_file:
json_file.write(json_str)
copyfile(img_file, '{}/{}.png'.format(out_dir, img_id))
# +
from skimage import data
a = data.camera()
b = data.camera()*2
masks = np.concatenate([a, b], axis=-1)
print(masks[:,:,0])
k=np.array([1,2])
p=np.array([3,4])
#np.append(k, p)
#a[:,:,1]
# -
#
# +
def json2masks(json_file, img_file, out_dir):
img_dir, img_name, image_id = getFileNameInfor(img_file)
img = cv2.imread(img_file)
with open(json_file) as JSON:
data = json.load(JSON)
image_name = data['imagePath']
image_height = data['imageHeight']
image_width = data['imageWidth']
shapes = data["shapes"]
if len(shapes)>0:
mod_image_id=image_id.replace(" ","").replace("(","_").replace(")","")
os.makedirs('{}/{}'.format(out_dir,mod_image_id), exist_ok=True)
os.makedirs('{}/{}/masks'.format(out_dir,mod_image_id), exist_ok=True)
copyfile(img_file, '{}/{}/{}.png'.format(out_dir,mod_image_id,mod_image_id))
for i, s in enumerate(shapes):
label = s['label']
points = s['points']
xs = []
ys = []
for p in points:
ys.append(p[0])
xs.append(p[1])
if len(xs)>0 and len(ys)>0:
mask = boundary2Mask(xs, ys, img)
cv2.imwrite("{}/{}/masks/{}_{}.png".format(out_dir, mod_image_id,i,label), mask)
# +
def json2cell_infor(json_file):
with open(json_file) as JSON:
data = json.load(JSON)
image_name = data['imagePath']
shapes = data["shapes"]
cell_dic = {}
for i, s in enumerate(shapes):
label = s['label']
if label not in cell_dic:
cell_dic[label]=0
cell_dic[label]+=1
return cell_dic
# +
test_img = '/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/test/data/datasource/1456.png'
image_id = '1456'
test_json = '/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/test/data/datasource/1459.json'
out_dir = '/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/data/stage1_train'
#
# json2masks(test_json,test_img, out_dir)
# print(json2cell_infor(test_json))
import os
pictures_dir ="/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/data/500"
for folder, dirs, files in os.walk(pictures_dir):
for file in files:
if file.endswith('.png'):
png_file=os.path.join(folder,file)
json_file = os.path.join(folder,file.replace(".png",'.json'))
if os.path.isfile(json_file):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
print(f'{png_file}:{json_file}')
json2masks(json_file,png_file, out_dir)
# -
#
#
#
#
infor_path = "/home/hqyone/mnt/2tb/github/MaskRCNN_TF1/samples/hpv_models/data/stage1_train/xx_best_696_39_33/xx_best_696_39_33.png"
print(os.path.dirname(infor_path))
print(os.path.dirname(infor_path),"/masks")
print(os.path.join(os.path.dirname(infor_path),"masks"))
print(os.path.join(os.path.dirname(os.path.dirname(infor_path)),"masks"))
import torch
| samples/hpv_models/code/image_utilies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: thesis-venv
# language: python
# name: thesis-venv
# ---
from ipfml import metrics, processing, utils
from ipfml.filters import noise
from PIL import Image
calibration_image_path = "images/calibration.png"
calib_img = Image.open(calibration_image_path)
calib_img
Image.fromarray(noise.salt_pepper_noise(calib_img, 999, True, 0.1, 0.5))
| analysis/noise_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rashmiray211/Songsdata-1953-1983/blob/main/Songsdata%2C1953_1983.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2Spj-0keGQuA"
# Data scraping consisting of 'Year','song_name','song_lyrics','Genre','composer','Lyricist','movie','actors','singers' from "geetmala" for 1983 and also for 1953-1983.
# + id="VJnJsro6JH4i"
import urllib.request
from bs4 import BeautifulSoup
import requests
import csv
# + [markdown] id="A0mnPkFtUL9W"
# # For songs of 1983.
# + colab={"base_uri": "https://localhost:8080/"} id="bpyRWq9PI4pm" outputId="bd0bb7ba-e654-45fe-a4f2-80fc75165064"
all_data=[]
for year in range(1983,1984):
year_link=[]
all_links=[]
for i in range(1,6):
url1 = ('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(i))
url2 = ('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(i+1))
page1 = urllib.request.urlopen(url1)
soup1 = BeautifulSoup(page1)
href1 = [i1['href'] for i1 in soup1.find_all('a', href=True)]
page2 = urllib.request.urlopen(url2)
soup2 = BeautifulSoup(page2)
href2 = [i2['href'] for i2 in soup2.find_all('a', href=True)]
if len(href1)!=len(href2):
s=i+1
for j in range(1,s+1):
url=('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(j))
year_link.append(url)
theurl =url
thepage = urllib.request.urlopen(theurl)
soup = BeautifulSoup(thepage)
all_href = [k['href'] for k in soup.find_all('a', href=True)]
res = []
songs_link=[]
F="o"
K = "n"
for sub in all_href:
if sub[2].lower() == F.lower():
if sub[3].lower() == K.lower():
res.append('https://www.hindigeetmala.net'+sub)
[songs_link.append(x) for x in res if x not in songs_link]
all_links.append(songs_link)
print(songs_link)
for m in range(0,s):
for link in all_links[m]:
data=[]
data1=[]
data.append(str(year))
data1.append(str(year))
data.append((BeautifulSoup(requests.get(link).content).find("h2")).get_text())
data.append((BeautifulSoup(requests.get(link).content).find("div", attrs={"class":"song"})).get_text())
try:
data.append((BeautifulSoup(requests.get(link).content).find("span", attrs={"itemprop":"genre"})).get_text())
except:
data.append('Nan')
data.append((BeautifulSoup(requests.get(link).content).find("span",attrs={"itemprop":"composer"})).get_text())
data.append((BeautifulSoup(requests.get(link).content).find("span",attrs={"itemprop":"lyricist"})).get_text())
data.append((BeautifulSoup(requests.get(link).content).find( itemprop='inAlbum').getText()))
thepage1 = urllib.request.urlopen(theurl)
theurl1 =link
b=[]
thepage1 = urllib.request.urlopen(theurl1)
soup1 = BeautifulSoup(thepage1)
for data1 in soup1.find_all('td', class_='w150'):
for a in data1.find_all('a'):
b.append(a.get('href'))
res1 = []
songs_link1=[]
all_href1=b
F="c"
K = "t"
for sub1 in all_href1:
if sub1[2].lower() == F.lower():
if sub1[3].lower() == K.lower():
res1.append(sub1.replace('.php','').replace('/actor/',''))
[songs_link1.append(x) for x in res1 if x not in songs_link1]
print(res1)
data.append(res1)
theurl2 =link
thepage2 = urllib.request.urlopen(theurl2)
soup2 = BeautifulSoup(thepage2)
c=[]
for data2 in soup2.find_all('td', class_='w150'):
for a in data2.find_all('a'):
c.append(a.get('href'))
res2 = []
songs_link2=[]
all_href2=c
res2 = []
songs_link2=[]
F="i"
K = "n"
for sub2 in all_href2:
if sub2[2].lower() == F.lower():
if sub2[3].lower() == K.lower():
res2.append(sub2.replace('.php','').replace('/singer/',''))
data.append(res2)
print(data)
all_data.append(data)
# + [markdown] id="FrN_FA9XUaeH"
# # For songs of 1953-1983.
# + id="BVLv4GUUu4bm"
all_data=[]
for year in range(1953,1984):
year_link=[]
all_links=[]
for i in range(1,6):
url1 = ('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(i))
url2 = ('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(i+1))
page1 = urllib.request.urlopen(url1)
soup1 = BeautifulSoup(page1)
href1 = [i1['href'] for i1 in soup1.find_all('a', href=True)]
page2 = urllib.request.urlopen(url2)
soup2 = BeautifulSoup(page2)
href2 = [i2['href'] for i2 in soup2.find_all('a', href=True)]
if len(href1)!=len(href2):
s=i+1
for j in range(1,s+1):
url=('https://www.hindigeetmala.net/geetmala/binaca_geetmala_'+str(year)+'.php?page='+str(j))
year_link.append(url)
theurl =url
thepage = urllib.request.urlopen(theurl)
soup = BeautifulSoup(thepage)
all_href = [k['href'] for k in soup.find_all('a', href=True)]
res = []
songs_link=[]
F="o"
K = "n"
for sub in all_href:
if sub[2].lower() == F.lower():
if sub[3].lower() == K.lower():
res.append('https://www.hindigeetmala.net'+sub)
[songs_link.append(x) for x in res if x not in songs_link]
all_links.append(songs_link)
for m in range(0,s):
for link in all_links[m]:
data=[]
data1=[]
data.append(str(year))
data1.append(str(year))
data.append((BeautifulSoup(requests.get(link).content).find("h2")).get_text())
data.append((BeautifulSoup(requests.get(link).content).find("div", attrs={"class":"song"})).get_text())
try:
data.append((BeautifulSoup(requests.get(link).content).find("span", attrs={"itemprop":"genre"})).get_text())
except:
data.append('Nan')
data.append((BeautifulSoup(requests.get(link).content).find("span",attrs={"itemprop":"composer"})).get_text())
data.append((BeautifulSoup(requests.get(link).content).find("span",attrs={"itemprop":"lyricist"})).get_text())
data.append((BeautifulSoup(requests.get(link).content).find( itemprop='inAlbum').getText()))
thepage1 = urllib.request.urlopen(theurl)
theurl1 =link
b=[]
thepage1 = urllib.request.urlopen(theurl1)
soup1 = BeautifulSoup(thepage1)
for data1 in soup1.find_all('td', class_='w150'):
for a in data1.find_all('a'):
b.append(a.get('href'))
res1 = []
songs_link1=[]
all_href1=b
F="c"
K = "t"
for sub1 in all_href1:
if sub1[2].lower() == F.lower():
if sub1[3].lower() == K.lower():
res1.append(sub1.replace('.php','').replace('/actor/',''))
[songs_link1.append(x) for x in res1 if x not in songs_link1]
data.append(res1)
theurl2 =link
thepage2 = urllib.request.urlopen(theurl2)
soup2 = BeautifulSoup(thepage2)
c=[]
for data2 in soup2.find_all('td', class_='w150'):
for a in data2.find_all('a'):
c.append(a.get('href'))
res2 = []
songs_link2=[]
all_href2=c
res2 = []
songs_link2=[]
F="i"
K = "n"
for sub2 in all_href2:
if sub2[2].lower() == F.lower():
if sub2[3].lower() == K.lower():
res2.append(sub2.replace('.php','').replace('/singer/',''))
data.append(res2)
all_data.append(data)
# + id="1E730fd3i4oE"
fields = ['Year','song_name','song_lyrics','Genre','composer','Lyricist','movie','actors','singers']
with open('songs_lyrics.csv','w') as song:
write = csv.writer(song)
write.writerow(fields)
write.writerows(all_data)
# + [markdown] id="MHgTDndcCmty"
# Some other work
# + id="hPXdR1UcqfOd"
ll_data=[]
for year in range(1980,1990,10):
year_link=[]
all_links=[]
for i in range(1,21):
url1 = ('https://www.hindigeetmala.net/movie/'+str(year)+'s.php?page='+str(i))
url2 = ('https://www.hindigeetmala.net/movie/'+str(year)+'s.php?page='+str(i+1))
page1 = urllib.request.urlopen(url1)
soup1 = BeautifulSoup(page1)
href1 = [i1['href'] for i1 in soup1.find_all('a', href=True)]
page2 = urllib.request.urlopen(url2)
soup2 = BeautifulSoup(page2)
href2 = [i2['href'] for i2 in soup2.find_all('a', href=True)]
if len(href1)!=len(href2):
print(url1)
s=i+1
for j in range(1,s+1):
url=('https://www.hindigeetmala.net/movie/'+str(year)+'s.php?page='+str(j))
print(url)
year_link.append(url)
theurl =url
thepage = urllib.request.urlopen(theurl)
soup = BeautifulSoup(thepage)
all_href = [k['href'] for k in soup.find_all('a', href=True)]
res = []
songs_link=[]
| Songsdata,1953_1983.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "The Blahut-Arimoto Algorithms and their Use in the Information Bottleneck Problem"
# > "This article discusses the Blahut-Arimoto algorithms. We will begin with a geometric and functional interpretation of the algorithm. We will then discuss the capacity maximization and the Rate-Distortion minimization algorithms. We will dedicate more time to the latter, as we will see that it can be used to compute the Information Bottleneck problema."
#
# - toc: true
# - branch: master
# - bibliography: true
# - math: true
# - badges: true
# - comments: true
# - categories: [Tutorial, Blahut-Arimoto, Rate Distortion Theory, Channel Capacity, Optimization]
# - image: images/camacho.png
# - hide: false
# - search_exclude: true
# - metadata_key1: metadata_value1
# - metadata_key2: metadata_value2
# ## Introduction
# ## Mutual Information
# ## Channel Capacity Maximization
# ## Rate Distortion Minimization
# ## Computing the Information Bottleneck
# ## References
| _notebooks/2020-11-15-blahut_arimoto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xgboost
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from datetime import timedelta, date
from sklearn.neural_network import MLPRegressor
import matplotlib as mpl
# %matplotlib inline
data = pd.read_csv('/Users/Rohil/Documents/iGEM/yemen/prelim_training_data.csv')
data.date = pd.to_datetime(data.date, format = '%Y-%m-%d')
data = data.sort_values('date').reset_index(drop=True)
data[data.date<'2017-08-15']
X = data.set_index('date').iloc[:,2:].reset_index()
y = data.set_index('date').iloc[:,1].reset_index()
X.head()
y.tail()
pd.date_range(start='2017-06-05', end='2018-02-11', freq = 'W-MON')
def flatten(lst):
return([val for sublist in lst for val in sublist])
def scale_features(scaler, X, categorical_cols):
X_numerical = X.iloc[:,:(categorical_cols[0])]
X_categorical = X.iloc[:, categorical_cols[0]:categorical_cols[1]]
X_numerical_scaled = pd.DataFrame(data=scaler.transform(X_numerical), columns = X_numerical.columns)
X_scaled = pd.concat([X_numerical_scaled, X_categorical.reset_index(drop=True)], axis=1)
return (X_scaled)
def train_model(model_type, model_params, X_train, y_train):
if model_type == 'xgboost':
xgb = xgboost.XGBRegressor(**model_params)
xgb.fit(X_train,y_train)
return (xgb)
if model_type == 'mlp':
mlp = MLPRegressor(hidden_layer_sizes=(6,7,5))
mlp.fit(X_train, y_train)
return (mlp)
if model_type == 'linear_reg':
regr = LinearRegression()
regr.fit(X_train,y_train)
return (regr)
def walk_forward_validation(X, y, model_type, model_params, min_train_date, categorical_cols):
prediction_list = []
y_total_test = y[y.date>=min_train_date].drop('date', axis=1)
X_base_numerical = X[(X.date<min_train_date)].drop('date', axis=1).iloc[:,:(categorical_cols[0])]
scaler = StandardScaler()
scaler.fit(X_base_numerical)
for date in pd.date_range(start=min_train_date, end='2018-02-11', freq = 'W-MON'):
X_train, X_test = X[(X.date<date)].drop('date', axis=1), X[(X.date>=date) & (X.date < (date + timedelta(7)))].drop('date', axis=1)
y_train, y_test = y[(y.date<date)].drop('date', axis=1), y[(y.date>=date) & (y.date < (date + timedelta(7)))].drop('date', axis=1)
#print('train=%d, test=%d' % (X_train.shape[0], X_test.shape[0]))
X_train, X_test = scale_features(scaler, X_train, categorical_cols), scale_features(scaler, X_test, categorical_cols)
print('train=%d, test=%d' % (X_train.shape[0], X_test.shape[0]))
model = train_model(model_type, model_params, X_train, y_train)
predictions = model.predict(X_test)
prediction_list.append(predictions)
return(prediction_list, y_total_test)
def walk_forward_grid_search(X, y, model_params, min_train_date, categorical_cols):
my_cv = [data[data.date<'2017-07-31'].index, ]
for date in pd.date_range(start=min_train_date, end='2018-02-11', freq = '2W-MON'):
train_test_idx_tup = (X[X.date<date].index, X[(X.date>=date) & (X.date < (date + timedelta(14)))].index)
my_cv.append(train_test_idx_tup)
return(my_cv)
def time_series_split_grid_search(X, y, split_date, model_type, param_grid, categorical_cols):
X_train, X_test = X[(X.date<split_date)].drop('date', axis=1), X[(X.date>=split_date)].drop('date', axis=1)
y_train, y_test = y[(y.date<split_date)].drop('date', axis=1), y[(y.date>=split_date)].drop('date', axis=1)
train_indices, test_indices = y_train.index, y_test.index
scaler = StandardScaler()
scaler.fit(X_train.iloc[:,:(categorical_cols[0])])
X_full = scale_features(scaler, X.drop('date', axis=1), categorical_cols)
y_full = y.drop('date', axis=1)
my_cv = [train_indices, test_indices]
#my_cv = [data[(data.date<'2017-07-31')].index, data[(data.date>='2017-07-31')&(data.date<'2017-09-25')].index]
print (my_cv)
if model_type == 'xgboost':
xgb = xgboost.XGBRegressor()
grid_search = GridSearchCV(xgb,
param_grid = param_grid,
cv = TimeSeriesSplit(n_splits=3).split(X_full),
n_jobs = -1,
scoring = 'explained_variance',
verbose=True)
grid_search.fit(X_full, y_full)
if model_type == 'mlp':
mlp = MLPRegressor(model_params)
return(grid_search.best_score_, grid_search.best_params_)
# +
xgb_params = {'colsample_bytree': 0.5,
'learning_rate': 0.01,
'max_depth': 13,
'min_child_weight': 20,
'n_estimators': 250,
'nthread': 4,
'objective': 'reg:linear',
'silent': 1,
'subsample': 0.5}
# {'colsample_bytree': 0.65,
# 'learning_rate': 0.01,
# 'max_depth': 16,
# 'min_child_weight': 30,
# 'n_estimators': 300,
# 'nthread': 4,
# 'objective': 'reg:linear',
# 'silent': 1,
# 'subsample': 0.65}
# {'colsample_bytree': 0.6,
# 'learning_rate': 0.01,
# 'max_depth': 7,
# 'min_child_weight': 15,
# 'n_estimators': 400,
# 'nthread': 4,
# 'objective': 'reg:linear',
# 'silent': 1,
# 'subsample': 0.6}
# {'colsample_bytree': 0.6,
# 'learning_rate': 0.01,
# 'max_depth': 5,
# 'min_child_weight': 2,
# 'n_estimators': 1,
# 'nthread': 4,
# 'objective': 'reg:linear',
# 'silent': 1,
# 'subsample': 0.6}
# {'colsample_bytree': 0.75,
# 'learning_rate': 0.01,
# 'max_depth': 3,
# 'min_child_weight': 12,
# 'n_estimators': 200,
# 'nthread': 4,
# 'objective': 'reg:linear',
# 'silent': 1,
# 'subsample': 0.3}
# -
xgb_param_grid = {'nthread':[4], #when use hyperthread, xgboost may become slower
'objective':['reg:linear'],
'learning_rate': [0.01], #so called `eta` value
'max_depth': [13,15,17],
'min_child_weight': [15, 20, 25],
'silent': [1],
'subsample': [0.5, 0.6],
'colsample_bytree': [0.5, 0.6],
'n_estimators': [200, 250, 300,],
}
time_series_split_grid_search(X[X.date<'2017-09-25'], y[y.date<'2017-09-25'], '2017-08-14', 'xgboost', xgb_param_grid, (12,33))
mlp_predictions, y_test = walk_forward_validation(X, y, 'mlp', None, '2017-08-07', (12,33))
xgboost_predictions, y_test = walk_forward_validation(X, y, 'xgboost', xgb_params, '2017-08-07', (12,33))
linreg_predictions, y_test = walk_forward_validation(X, y, 'linear_reg', None, '2017-08-07', (12,33))
mlp_preds = pd.Series(flatten(mlp_predictions))
xgb_preds = pd.Series(flatten(xgboost_predictions))
lr_preds = pd.Series(flatten(flatten(linreg_predictions)))
pred_dict = {'mlp': mlp_preds, 'xgb': xgb_preds, 'lr' : lr_preds}
for key, value in pred_dict.items():
mean_error = mean_absolute_error(y_test, value)
explained_variance = explained_variance_score(y_test, value)
print ('%s: mean abs error = %s, explained variance score = %s' % (key, mean_error, explained_variance))
pred_crosstab_dict = {}
for key, value in pred_dict.items():
val = value
val.index = y_test.index
prediction_df = pd.DataFrame(val).merge(pd.DataFrame(data[['gov_iso', 'date']]), left_index = True, right_index = True)
prediction_df.columns = ['pred', 'gov_iso' ,'date']
pred_crosstab_dict[key] = prediction_df.pivot_table(index = 'date', columns = 'gov_iso', values = 'pred')
y_test_df = y.drop('date', axis=1).merge(pd.DataFrame(data[['gov_iso' ,'date']]), left_index = True, right_index = True)
y_test_crosstab = y_test_df.pivot_table(index = 'date', columns = 'gov_iso', values = 'weekly_cases')
y_test_crosstab.head()
mpl.rcParams.update(mpl.rcParamsDefault)
def set_style(color):
plt.style.use(['seaborn-' + color, 'seaborn-paper'])
def plot_pred_against_actual(pred_crosstab_dict, test_crosstab):
set_style('white')
fig, ax = plt.subplots(21,1,figsize = (6,15), sharex=True)
cols = test_crosstab.columns
for i in range(0,21):
test_crosstab[cols[i]].plot(kind='line', ax = ax[i], label = 'true_val', legend = True, color = 'red')
ax[i].set_color_cycle(['seagreen', 'blue', 'plum'])
for key, value in pred_crosstab_dict.items():
pred_crosstab_dict[key][cols[i]].plot(kind='line', ax = ax[i], label= key, legend = True)
ax[i].legend().set_visible(False)
ax[i].set_ylabel(cols[i])
ax[i].yaxis.set_label_position('right')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].spines['bottom'].set_visible(True)
ax[10].legend().set_visible(True)
ax[10].legend(fontsize=10, loc='center left', bbox_to_anchor=(1.05, 0.5))
fig.subplots_adjust(hspace = .2)
fig.savefig('/Users/Rohil/Documents/iGEM/yemen/plot_xgb_tuned.png', dpi = 500, bbox_inches = 'tight')
plt.close('all')
plot_pred_against_actual(pred_crosstab_dict, y_test_crosstab)
# +
def calculate_validation_by_gov(pred_crosstab_dict, test_crosstab):
cols = test_crosstab.columns
for col in cols:
test = test_crosstab[col]
for key, value in pred_crosstab_dict.items():
pred = pred_crosstab_dict[key][cols[i]]
mean_absolute_error(test, pred)
explained_variance_score(test, pred)
# -
y_test.mean()
y_test.std()
| DEPRECATED/yemen_time_series_forecasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Making a broadband telescope point spread function
#
# We will introduce the basic elements in HCIPy and produce a broadband point spread function for the Magellan telescope.
# +
import numpy as np
import matplotlib.pyplot as plt
from hcipy import *
# -
# We'll start by making a telescope pupil, then explain the code below.
# +
pupil_grid = make_pupil_grid(256)
telescope_pupil_generator = make_magellan_aperture(normalized=True)
telescope_pupil = telescope_pupil_generator(pupil_grid)
im = imshow_field(telescope_pupil, cmap='gray')
plt.colorbar()
plt.xlabel('x / D')
plt.ylabel('y / D')
plt.show()
# -
# A lot happened in these few lines. Let's break it down.
#
# We first made a grid, specifically a pupil grid. A `Grid` defines the sampling in some space, in essence providing the positions in $x$ and $y$ for each pixel in the telescope pupil. A `Grid` consists of `Coords`, which provide a list of coordinates, and a coordinate system, that tells the `Grid` how to interpret these numbers, for example `cartesian` or `polar`. You can also think of a `Grid` as a mapping between a one-dimensional index and a corresponding position in space.
#
# The function `make_pupil_grid()` did two things. It made `Coords` object with regularly-spaced coordinates, then made a `CartesianGrid` object out of them. A pupil grid will always be symmetric around the origin, and will be
#
# Finally, a `Field` is a combination of a one-dimensional array of numbers with an associated `Grid`. A `Field` can be thought of as a sampled physical field, such as temperature, potential, electric field, intensity, etc... Many functions in HCIPy use `Field`s to correctly handle their sampling requirements. For example, when plotting a `Field`, you do not have to supply an extent, as that is already given by the `Field` itself. Another example is a Fast Fourier Transform (FFT). Taking an FFT of a `Field` will return a `Field` for which its `Grid` is in frequency units. Scaling the `Grid` of the input `Field` will correspondingly change the `Grid` of the returned `Field`. As HCIPy keeps track of the sampling throughout your code, it makes it extremely hard to introduce human errors in sampling.
#
# So how do we now make a `Field` object? We can of course manually supply the values for each pixel, but here we take an easier approach. There are many `Field` generators in HCIPy. These are functions that can be evaluated on a `Grid`. Here we create a `Field` generator using `make_magellan_aperture()`, which returns the telescope pupil for the [Magellan 6.5m telescope](https://obs.carnegiescience.edu/Magellan) at Las Campanas Observatory in Chile. You can think of `Field` generators as a mathematical description of some function, which can be sampled on a `Grid` by evaluating it.
#
# The next line evaluates the telescope pupil on our previously generated `Grid`. The last few lines then display the `Field` as an image using the function `imshow_field()`. This function mimics the standard `pyplot.imshow()` in `matplotlib`, and they can be largely considered interchangeable. Note that `imshow_field()` used the `Grid` of the telescope aperture and correctly displays the numbers on the axes.
# Now that we have a telescope pupil, we can start creating the point spread function (PSF) for the telescope. Again, we'll explain the code below.
# +
wavefront = Wavefront(telescope_pupil)
focal_grid = make_focal_grid(q=8, num_airy=16)
prop = FraunhoferPropagator(pupil_grid, focal_grid)
focal_image = prop.forward(wavefront)
imshow_field(np.log10(focal_image.intensity / focal_image.intensity.max()), vmin=-5)
plt.xlabel('Focal plane distance [$\lambda/D$]')
plt.ylabel('Focal plane distance [$\lambda/D$]')
plt.colorbar()
plt.show()
# -
# We want to see what happens when we image a point source (such as a distant star) with a telescope that has this particular telescope pupil geometry. We first create a `Wavefront` object, and pass it the telescope pupil as the corresponding electric field. This `Wavefront` is the light just after it reflected of the Magellan primary mirror. `Wavefront`s are used for all light in HCIPy, and they can be modified by propagating the light through `OpticalElement`s, but more on that below.
#
# We now need to define the sampling of the PSF that we want to obtain. This is done with the `make_focal_grid()` function. This function takes `q`, which is the number of pixels per diffraction width, and `num_airy`, which is half size (ie. radius) of the image in the number of diffraction widths.
#
# We now define our first `OpticalElement`. The `FraunhoferPropagator` object is a propagator that simulates the propagation through a telecentric reimaging system, from a pupil plane to a focal plane. It requires the sampling of the input pupil and the sampling of the output focal plane as arguments.
#
# We can use this `Propagator` to propagate our `Wavefront` to the focal plane of the telescope. This is done by calling the `forward()` function with the `Wavefront` as an argument. This returns a new `Wavefront`, now in the focal plane of the telescope.
#
# Again, we use `imshow_field` to show the intensity of the focal-plane image on a logarithmic scale.
#
# Next, we want to take a cut across this image to see what how the flux changes as a function of angular separation from the on-axis position in units of diffraction widths $(\lambda/D)$. `focal_image` is a `Wavefront` object, which has several properties including `intensity`, so we use that:
psf = focal_image.intensity
# Next we want to know the size and shape of the `psf`, but it's stored as a 1D list of values. In some cases, including now, it is still useful to have a two-dimensional image instead. We can use the `shaped` attribute to get a reshaped version of `psf`, which has the shape according to its grid. We can then cut out the middle row from the image using `[:,psf_shape[0] // 2]` remembering that we need to have shaped the `psf` first before doing the slicing, and then we normalise the slice by the peak value of the `psf` image.
# +
psf_shape = psf.grid.shape
slicefoc = psf.shaped[:, psf_shape[0] // 2]
slicefoc_normalised = slicefoc / psf.max()
# -
# Finally we plot out the normalised slice. Note that HCIPy keeps track of the units and coordinates so that you don't have to propagate them yourself and risk making an error in the process - we get the units by taking the `x` values from the `focal_grid`, remembering to `reshape` them to a 2D array, and then slicing out one of the rows and using these values for the x axis of our plot:
plt.plot(focal_grid.x.reshape(psf_shape)[0, :], slicefoc_normalised)
plt.xlabel('Focal plane distance [$\lambda/D$]')
plt.ylabel('Normalised intensity [I]')
plt.yscale('log')
plt.title('Magellan telescope PSF in diffraction units')
plt.xlim(-10, 10)
plt.ylim(5e-6, 2)
plt.show()
# We've plotted up the monochromatic case, but now let's see the effect for broadening the range of wavelengths through our telescope - we adjust the wavelength in the `wavefront`, then calculate the intensity image and add them together for several different wavelengths. We pick 11 monochromatic PSFs over the fractional bandwidth:
# +
bandwidth = 0.2
focal_total = 0
for wlen in np.linspace(1 - bandwidth / 2., 1 + bandwidth / 2., 11):
wavefront = Wavefront(telescope_pupil, wlen)
focal_total += prop(wavefront).intensity
imshow_field(np.log10(focal_total / focal_total.max()), vmin=-5)
plt.title('Magellan PSF with a bandwidth of {:.1f} %'.format(bandwidth * 100))
plt.colorbar()
plt.xlabel('Focal plane distance [$\lambda/D$]')
plt.ylabel('Focal plane distance [$\lambda/D$]')
plt.show()
# -
# Until now, we've used normalized units. That is, all distances in the pupil have been in fractions of the pupil diameter, all distances in the focal plane have been in diffraction widths, the wavelength was one, and the focal length was one. While HCIPy will commonly assume normalized units as default arguments, we can also use physical units. We will now recreate the monochromatic PSF with physical units.
# +
pupil_diameter = 6.5 # m
effective_focal_length = 71.5 # m
wavelength = 750e-9 # m
pupil_grid = make_pupil_grid(256, diameter=pupil_diameter)
telescope_pupil_generator = make_magellan_aperture()
telescope_pupil = telescope_pupil_generator(pupil_grid)
imshow_field(telescope_pupil, cmap='gray')
plt.colorbar()
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.show()
wavefront = Wavefront(telescope_pupil, wavelength)
focal_grid = make_focal_grid(q=4, num_airy=16, pupil_diameter=pupil_diameter, focal_length=effective_focal_length, reference_wavelength=wavelength)
prop = FraunhoferPropagator(pupil_grid, focal_grid, focal_length=effective_focal_length)
focal_image = prop.forward(wavefront)
imshow_field(np.log10(focal_image.intensity / focal_image.intensity.max()), vmin=-5, grid_units=1e-6)
plt.xlabel('Focal plane distance [um]')
plt.ylabel('Focal plane distance [um]')
plt.colorbar()
plt.show()
# -
# The changes from this code to that for normalized units, is that the diameter of the telescope pupil was given to `make_pupil_grid()`, that a wavelength was given to the `Wavefront`, that the pupil diameter, focal length and wavelength were given to `make_focal_grid()` and that the focal length was given to the `FraunhoferPropagator`. Also note that we have used the parameter `grid_units` in the call to `imshow_field()` to give the numbers on the axes in micron, instead of meters.
#
# The function `make_focal_grid()` can actually be called in many different ways. We can either supply:
#
# * nothing, in which case normalized units are assumed;
# * a spatial resolution, which is the size of diffraction width;
# * a F-number and reference wavelength;
# * a pupil diameter, focal length and reference wavelength.
#
# This allows for much flexibility in defining your sampling at the focal plane.
| doc/tutorial_notebooks/BroadbandTelescopePSF/BroadbandTelescopePSF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Managed Data Processing with SageMaker Processing in Python
# <img align="left" width="130" src="https://raw.githubusercontent.com/PacktPublishing/Amazon-SageMaker-Cookbook/master/Extra/cover-small-padded.png"/>
#
# This notebook contains the code to help readers work through one of the recipes of the book [Machine Learning with Amazon SageMaker Cookbook: 80 proven recipes for data scientists and developers to perform ML experiments and deployments](https://www.amazon.com/Machine-Learning-Amazon-SageMaker-Cookbook/dp/1800567030)
# ### How to do it...
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.sklearn.processing import SKLearnProcessor
# +
role = get_execution_role()
sklearn_processor = SKLearnProcessor(framework_version='0.20.0',
role=role,
instance_count=1,
instance_type='ml.m5.large')
# +
from sagemaker.processing import ProcessingInput, ProcessingOutput
source = 'tmp/dataset.processing.csv'
pinput1 = ProcessingInput(
source=source,
destination='/opt/ml/processing/input')
poutput1 = ProcessingOutput(source='/opt/ml/processing/output')
# -
sklearn_processor.run(
code='processing.py',
arguments = ['--sample-argument', '3'],
inputs=[pinput1],
outputs=[poutput1]
)
sklearn_processor.__dict__
sklearn_processor.latest_job.__dict__
latest_job = sklearn_processor.latest_job
destination = latest_job.outputs[0].destination
destination
# !aws s3 cp "{destination}/output.csv" tmp/output.processing.csv
# !cat tmp/output.processing.csv
| Chapter04/11 - Managed Data Processing with SageMaker Processing in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys, os, time, shutil
from IPython.core.display import display, HTML
# display(HTML("<style>.container { width:98% !important; }</style>"))
os.environ['FOUNDATION_RUN_MODE'] = 'jupyter'
# # %load_ext autoreload
# # %autoreload 2
# # %pdb
from tqdm import tqdm_notebook as tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as distrib
import torch.multiprocessing as mp
import torchvision.models
from torch.utils.data import Dataset, DataLoader, TensorDataset
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import normalize
import gym
import inspect
import numpy as np
import pickle
# #%matplotlib tk
import matplotlib.pyplot as plt
#plt.switch_backend('Qt5Agg') #('Qt5Agg')
import foundation as fd
from foundation import models
from foundation import util
from foundation import train as trn
#from foundation.util import replicate, Cloner
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib import animation
import matplotlib as mpl
# mpl.rc('image', cmap='gray')
# import gpumap
# import umap, shap
# import umap.plot
from sklearn.decomposition import PCA
import sklearn.datasets
# import gpumap
# %matplotlib notebook
import project as proj
# import pointnets as ptn
np.set_printoptions(linewidth=120)
# +
# dataset = trn.get_dataset('3dshapes', train=None, labeled=True, device='cpu')
# len(dataset)
# -
root = os.environ['FOUNDATION_SAVE_DIR'] # should be set to "trained_nets"
name = '3ds-ae-conv_0002-6337051-00_200508-232722'
name = '3ds-vae-b4-conv_0002-6337051-05_200508-232713'
name = '3ds-ae-12b1_0052-6286534-00_200429-030431'
results = torch.load(os.path.join(root, name, 'results.pth.tar'))
print(results.keys())
print(results['out'].keys())
A = trn.get_config()
A.din = (3, 64, 64)
A.dout = A.din
model, = trn.load(name, config=A, get_data=None, update_config=True, load_last=True) # for loading only the model
print(model)
X = results['out']['original']
X.shape
with torch.no_grad():
raw_Q = model.encode(X)
if isinstance(raw_Q, distrib.Distribution):
Q = raw_Q.mean
else:
Q = raw_Q
rec = model.decode(raw_Q)
Q.shape, rec.shape
util.show_imgs(rec[:64])
pass
util.plot_distribs(Q)
pass
# comparing to saved results
save_Q = results['out']['latent']
if isinstance(save_Q, distrib.Distribution):
save_Q = save_Q.mean
util.show_imgs(results['out']['reconstruction'][:64])
util.plot_distribs(save_Q)
F.mse_loss(rec, results['out']['reconstruction']), F.mse_loss(Q, save_Q)
| notebooks/old/loading_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Bar chart
#
# Bar charts visualize data that is organized according to categories as a series of bars, where the height of each bar represents the values of the data in this category.
#
# For example, in this exercise, you will visualize the number of gold medals won by each country in the provided `medals` DataFrame. The DataFrame contains the countries as the index, and a column called `"Gold"` that contains the number of gold medals won by each country, according to their rows.
#
# Instructions
#
# - Call the `ax.bar` method to plot the `"Gold"` column as a function of the country.
# - Use the `ax.set_xticklabels` to set the x-axis tick labels to be the country names.
# - In the call to `ax.set_xticklabels` rotate the x-axis tick labels by 90 degrees by using the `rotation` key-word argument.
# - Set the y-axis label to `"Number of medals"`.
# +
# Import libraries
import pandas as pd
import matplotlib.pyplot as plt
# Import the DataFrame
medals = pd.read_csv('medals_by_country_2016.csv', index_col=0)
fig, ax = plt.subplots()
# Plot a bar-chart of gold medals as a function of country
ax.bar(medals.index, medals["Gold"])
# Set the x-axis tick labels to the country names
# ax.set_xticklabels(medals.index, rotation=90)
plt.xticks(rotation=90)
# Set the y-axis label
ax.set_ylabel('Number of medals')
plt.show();
# -
# ## Stacked bar chart
#
# A stacked bar chart contains bars, where the height of each bar represents values. In addition, stacked on top of the first variable may be another variable. The _additional_ height of this bar represents the value of this variable. And you can add more bars on top of that.
#
# In this exercise, you will have access to a DataFrame called `medals` that contains an index that holds the names of different countries, and three columns: `"Gold"`, `"Silver"` and `"Bronze"`. You will also have a Figure, `fig`, and Axes, `ax`, that you can add data to.
#
# You will create a stacked bar chart that shows the number of gold, silver, and bronze medals won by each country, and you will add labels and create a legend that indicates which bars represent which medals.
#
# Instructions
#
# - Call the `ax.bar` method to add the `"Gold"` medals. Call it with the label set to `"Gold"`.
# - Call the `ax.bar` method to stack `"Silver"` bars on top of that, using the `bottom` key-word argument so the bottom of the bars will be on top of the gold medal bars, and `label` to add the label `"Silver"`.
# - Use `ax.bar` to add `"Bronze"` bars on top of that, using the bottom key-word and `label` it as `"Bronze"`.
# +
# NOTE: ax.bar didn't work
# Add bars for "Gold" with the label "Gold"
plt.bar(medals.index, medals['Gold'], label='Gold')
# Stack bars for "Silver" on top with label "Silver"
plt.bar(medals.index, medals['Silver'], bottom=medals['Gold'], label='Silver')
# Stack bars for "Bronze" on top of that with label "Bronze"
plt.bar(medals.index, medals['Bronze'], bottom=medals['Gold'] + medals['Silver'], label='Bronze')
# Rotate the x-axis tick labels by 90 degrees
plt.xticks(rotation=90)
# Display the legend
plt.legend()
plt.show();
# -
# ## Creating histograms
#
# Histograms show the full distribution of a variable. In this exercise, we will display the distribution of weights of medalists in gymnastics and in rowing in the 2016 Olympic games for a comparison between them.
#
# You will have two DataFrames to use. The first is called `mens_rowing` and includes information about the medalists in the men's rowing events. The other is called `mens_gymnastics` and includes information about medalists in all of the Gymnastics events.
#
# Instructions
#
# - Use the `ax.hist` method to add a histogram of the `"Weight"` column from the `mens_rowing` DataFrame.
# - Use `ax.hist` to add a histogram of `"Weight"` for the `mens_gymnastics` DataFrame.
# - Set the x-axis label to `"Weight (kg)"` and the y-axis label to `"# of observations"`.
# +
# Import the DataFrames
mens_rowing = pd.read_csv('mens_rowing.csv')
mens_gymnastics = pd.read_csv('mens_gymnastics.csv')
fig, ax = plt.subplots()
# Plot a histogram of "Weight" for mens_rowing
ax.hist(mens_rowing['Weight'])
# Compare to histogram of "Weight" for mens_gymnastics
ax.hist(mens_gymnastics['Weight'])
# Set the x-axis label to "Weight (kg)"
ax.set_xlabel('Weight (kg)')
# Set the y-axis label to "# of observations"
ax.set_ylabel('# of observations')
plt.show()
# -
# ## "Step" histogram
#
# Histograms allow us to see the distributions of the data in different groups in our data. In this exercise, you will select groups from the Summer 2016 Olympic Games medalist dataset to compare the height of medalist athletes in two different sports.
#
# The data is stored in a Pandas DataFrame object called `summer_2016_medals` that has a column "Height". In addition, you are provided a Pandas GroupBy object that has been grouped by the sport.
#
# In the exercise below, you will visualize and label the histograms of two sports: "Gymnastics" and "Rowing" and see the marked difference between medalists in these two sports.
#
# Instructions
#
# - Use the `hist` method to display a histogram of the `"Weight"` column from the `mens_rowing` DataFrame, label this as `"Rowing"`.
# - Use `hist` to display a histogram of the `"Weight"` column from the `mens_gymnastics` DataFrame, and label this as `"Gymnastics"`.
# - For both histograms, use the `'histtype'` argument to visualize the data using the `'step'` type and set the number of bins to use to 5.
# - Add a legend to the figure, before it is displayed.
# +
fig, ax = plt.subplots()
# Plot a histogram of "Weight" for mens_rowing
ax.hist(mens_rowing['Weight'], label='Rowing',
histtype='step',
bins=5)
# Compare to histogram of "Weight" for mens_gymnastics
ax.hist(mens_gymnastics['Weight'], label='Gymnastics',
histtype='step',
bins=5)
ax.set_xlabel("Weight (kg)")
ax.set_ylabel("# of observations")
# Add the legend and show the Figure
plt.legend()
plt.show()
# -
# ## Adding error-bars to a bar chart
#
# Statistical plotting techniques add quantitative information for comparisons into the visualization. For example, in this exercise, we will add error bars that quantify not only the difference in the means of the height of medalists in the 2016 Olympic Games, but also the standard deviation of each of these groups, as a way to assess whether the difference is substantial relative to the variability within each group.
#
# For the purpose of this exercise, you will have two DataFrames: `mens_rowing` holds data about the medalists in the rowing events and `mens_gymnastics` will hold information about the medalists in the gymnastics events.
#
# Instructions
#
# - Add a bar with size equal to the mean of the `"Height"` column in the `mens_rowing` DataFrame and an error-bar of its standard deviation.
# - Add another bar for the mean of the `"Height"` column in `mens_gymnastics` with an error-bar of its standard deviation.
# - Add a label to the the y-axis: `"Height (cm)"`.
# +
fig, ax = plt.subplots()
# Add a bar for the rowing "Height" column mean/std
ax.bar('Rowing', mens_rowing['Height'].mean(), yerr=mens_rowing['Height'].std())
# Add a bar for the gymnastics "Height" column mean/std
ax.bar('Gymnastics', mens_gymnastics['Height'].mean(), yerr=mens_gymnastics['Height'].std())
# Label the y-axis
ax.set_ylabel("Height (cm)")
plt.show()
# -
# ## Adding error-bars to a plot
#
# Adding error-bars to a plot is done by using the `errorbars` method of the `Axes` object.
#
# Here, you have two DataFrames loaded: `seattle_weather` has data about the weather in Seattle and `austin_weather` has data about the weather in Austin. Each DataFrame has a column `"MONTH"` that has the names of the months, a column `"MLY-TAVG-NORMAL"` that has the average temperature in each month and a column `"MLY-TAVG-STDDEV"` that has the standard deviation of the temperatures across years.
#
# In the exercise, you will plot the mean temperature across months and add the standard deviation at each point as y errorbars.
#
# Instructions
#
# - Use the `ax.errorbar` method to add the Seattle data: the `"MONTH"` column as x values, the `"MLY-TAVG-NORMAL"` as y values and `"MLY-TAVG-STDDEV"` as `yerr` values.
# - Add the Austin data: the `"MONTH"` column as x values, the `"MLY-TAVG-NORMAL"` as y values and `"MLY-TAVG-STDDEV"` as yerr values.
# - Set the y-axis label as `"Temperature (Fahrenheit)"`.
# +
# Import the DataFrames
seattle_weather = pd.read_csv('seattle_weather.csv')
austin_weather = pd.read_csv('austin_weather.csv')
fig, ax = plt.subplots()
# Add Seattle temperature data in each month with error bars
ax.errorbar(seattle_weather['MONTH'],
seattle_weather['MLY-TAVG-NORMAL'],
yerr=seattle_weather['MLY-TAVG-STDDEV'])
# Add Austin temperature data in each month with error bars
ax.errorbar(austin_weather['MONTH'],
austin_weather['MLY-TAVG-NORMAL'],
yerr=austin_weather['MLY-TAVG-STDDEV'])
# Set the y-axis label
ax.set_ylabel('Temperature (Fahrenheit)')
plt.show()
# -
# ## Creating boxplots
#
# Boxplots provide additional information about the distribution of the data that they represent. They tell us what the median of the distribution is, what the inter-quartile range is and also what the expected range of approximately 99% of the data should be. Outliers beyond this range are particularly highlighted.
#
# In this exercise, you will use the data about medalist heights that you previously visualized as histograms, and as bar charts with error bars, and you will visualize it as boxplots.
#
# Again, you will have the `mens_rowing` and `mens_gymnastics` DataFrames available to you, and both of these DataFrames have columns called `"Height"` that you will compare.
#
# Instructions
#
# - Create a boxplot that contains the `"Height"` column for `mens_rowing` on the left and `mens_gymnastics` on the right.
# - Add x-axis tick labels: `"Rowing"` and `"Gymnastics"`.
# - Add a y-axis label: `"Height (cm)"`.
# +
fig, ax = plt.subplots()
# Add a boxplot for the "Height" column in the DataFrames
ax.boxplot([mens_rowing['Height'],
mens_gymnastics['Height']])
# Add x-axis tick labels:
ax.set_xticklabels(['Rowing', 'Gymnastics'])
# Add a y-axis label
ax.set_ylabel('Height (cm)')
plt.show()
# -
# ## Simple scatter plot
#
# Scatter are a bi-variate visualization technique. They plot each record in the data as a point. The location of each point is determined by the value of two variables: the first variable determines the distance along the x-axis and the second variable determines the height along the y-axis.
#
# In this exercise, you will create a scatter plot of the `climate_change` data. This DataFrame, which is already loaded, has a column `"co2"` that indicates the measurements of carbon dioxide every month and another column, `"relative_temp"` that indicates the temperature measured at the same time.
#
# Instructions
#
# - Using the `ax.scatter method`, add the data to the plot: `"co2"` on the x-axis and `"relative_temp"` on the y-axis.
# - Set the x-axis label to `"CO2 (ppm)"`.
# - Set the y-axis label to `"Relative temperature (C)"`.
# +
# Import the DataFrame
climate_change = pd.read_csv('climate_change.csv')
fig, ax = plt.subplots()
# Add data: "co2" on x-axis, "relative_temp" on y-axis
ax.scatter(climate_change['co2'], climate_change['relative_temp'])
# Set the x-axis label to "CO2 (ppm)"
ax.set_xlabel('CO2 (ppm)')
# Set the y-axis label to "Relative temperature (C)"
ax.set_ylabel('Relative temperature (C)')
plt.show()
# -
# ## Encoding time by color
#
# The screen only has two dimensions, but we can encode another dimension in the scatter plot using color. Here, we will visualize the `climate_change` dataset, plotting a scatter plot of the `"co2"` column, on the x-axis, against the `"relative_temp"` column, on the y-axis. We will encode time using the color dimension, with earlier times appearing as darker shades of blue and later times appearing as brighter shades of yellow.
#
# Instructions
#
# - Using the `ax.scatter` method add a scatter plot of the `"co2"` column (x-axis) against the `"relative_temp"` column.
# - Use the `c` key-word argument to pass in the index of the DataFrame as input to color each point according to its date.
# - Set the x-axis label to `"CO2 (ppm)"` and the y-axis label to `"Relative temperature (C)"`.
# +
fig, ax = plt.subplots()
# Add data: "co2", "relative_temp" as x-y, index as color
ax.scatter(climate_change['co2'], climate_change['relative_temp'],
c=climate_change.index)
# Set the x-axis label to "CO2 (ppm)"
ax.set_xlabel('CO2 (ppm)')
# Set the y-axis label to "Relative temperature (C)"
ax.set_ylabel('Relative temperature (C)')
plt.show()
| introduction_to_data_visualization_with_matplotlib/3_quantitative_comparisons_and_statistical_visualizations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# language: python
# name: python3
# ---
# # Tweepy Learning Process, begin tweet input
# ### Import Statements
# os, dotenv help to load in api keys/ tokens saved to a .env file. Tweepy provides API for tweet scraping
from os import getenv
from dotenv import load_dotenv
import tweepy
# +
# Load dot env takes a filepath as argument.
# r allows us to read the filepath as a string. Python can be weird about filepaths.
load_dotenv(r"C:\Users\<NAME>\OneDrive\Documents\GitHub\congressional_sentiment_NLP\app\.env")
# Read in and instantiate keys/tokens
twitter_api_key = getenv("API_KEY")
twitter_api_key_secret = getenv("API_KEY_SECRET")
access_token = getenv("ACCESS_TOKEN")
access_token_secret = getenv("ACCESS_TOKEN_SECRET")
# -
# Create authorization and access to scraping api
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print(api)
# +
# Test case to ensure proper access is granted.
congress_member = 'PattyMurray'
public_tweets = api.user_timeline(screen_name=congress_member)
for tweet in public_tweets:
print(tweet.text)
# -
| alex_production/tweepy_learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from cbayes import sample
from cbayes import distributions
from cbayes import solve
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
import scipy.stats as sstats
import ipywidgets as wid
plt.rcParams['font.size'] = 18
# +
def make_model(skew):
# this function makes a linear map whos first component is the x-unit vector
# and each subsequent component is a norm-1 vector satisfying the property
# that the 2-2 map made from it and the aforementioned unit vector is a map
# with skewness in skew_range, which is a list of desired skewnesses
# TODO currently this map only works for 2-D input space
def my_model(parameter_samples):
Q_map = skewmat(skew)
QoI_samples = np.dot(parameter_samples, np.transpose(Q_map))
# QoI_samples = Q_map@parameter_samples.T
return QoI_samples
return my_model
def skewmat(skew):
Q_map = [ [1.0, 0.0] ] # all map components have the same norm, rect_size to have measures of events equal btwn spaces.
Q_map.append( [np.sqrt(skew**2 - 1), 1] ) # taken with the first component, this leads to a 2-2 map with skewsness 's'
Q_map = np.array( Q_map )
return Q_map
def gauss_sol(prior_mean, prior_std, data_std, A, data):
if type(prior_mean) is int:
prior_mean = [prior_mean, prior_mean]
if type(prior_mean) is float:
prior_mean = [prior_mean, prior_mean]
if type(prior_mean) is list:
prior_mean = np.array(prior_mean).reshape(-1,1)
if type(prior_std) is list:
prior_std = np.array(prior_std).reshape(-1,1)
if type(data_std) is list:
data_std = np.array(data_std).reshapea(-1,1)
prior_cov = prior_std*prior_std*np.eye(2)
data_cov = data_std*data_std*np.eye(2)
ASA = A@prior_cov@A.T
precision = np.linalg.inv(ASA + data_cov)
kahlman_update = (prior_cov@A.T@precision)
post_mean = prior_mean + kahlman_update@(data - A@prior_mean)
post_cov = prior_cov - kahlman_update@A@prior_cov
return prior_mean, prior_cov, post_mean, post_cov
def id_model(input_samples):
return input_samples[:,0].reshape(-1,1) - input_samples[:,1].reshape(-1,1)# lambda is first entry
# +
def makemodel(t):
def model(lam = np.array([[0.5, 0.0]]) ):
QoI = lam[:,0].reshape(-1,1)*np.exp(-0.5*t) - lam[:,1].reshape(-1,1)
if QoI.shape[0] == 1:
return QoI.reshape(-1,1) # this allows support for simpler 1D plotting.
else:
return QoI
return model
##### FIXED PARAMETERS - DEFINE YOUR EXPERIMENT #####
num_observations = 1 # how many observations are you able to make?
start_time = 1
end_time = 5
####
t = np.linspace(start_time, end_time, num_observations)
ed_model = makemodel(t)
u = ed_model()
# +
n = 50
sd_test_n = 1E-2
# sd_test_u = 0.05
lam = np.random.random(size=(n,2))
lam[:,1]*= sd_test_n
lam[:,1]*= 0.25*0
mintime = 0
maxtime = 5
tt = np.linspace(mintime, maxtime, 1000)
m = makemodel(tt)
ui = m(lam)
um = m()
plt.figure(figsize=(20,10))
plt.cla()
show_obs = True
# Plotting the default IR results from the epidemic_IR function.
for i in range(n):
d = ui[i,:]
if i == n-1:
plt.plot(tt, d, c='k', alpha=5/n, label='Potential Signals')
else:
plt.plot(tt, d, c='k', alpha=5/n) # I
# plt.scatter(t, u + np.random.randn(num_observations)*sd_test_n, s=50, c='k', label='Normal', alpha=0.5)
# plt.scatter(t, u + (np.random.rand(num_observations)-0.5)*sd_test_u,c='b', s=5, label='Uniform')
# plt.scatter(t, u + (np.random.standard_cauchy(num_observations))*sd_test_c,c='g', s=50, marker='x',label='Cauchy')
plt.plot(tt, um, c='k', lw=3, ls='-', label='True Signal')
if show_obs:
plt.scatter(t, u + np.random.randn(num_observations)*sd_test_n, marker='x', s=50, c='k', label='{} Observations'.format(num_observations), alpha=1)
plt.legend(fontsize=18,loc='upper left')
plt.ylabel('Magnitude', fontsize=18)
plt.xlabel('Time (s)', fontsize=18)
plt.ylim([-.05,1.05])
plt.xlim([0,5])
ftype = 'png'
if show_obs:
summary_save_str = "saveimgs/HO/HO_%d_r-%d_obs-%de-3_sd-t_%d_%d_st%d_%d.%s"%(n, num_observations, 1000*sd_test_n, mintime, maxtime, start_time, end_time, ftype)
else:
summary_save_str = "saveimgs/HO/HO_signal-%d_t_%d_%d_st%d_%d.%s"%(n, mintime, maxtime, start_time, end_time, ftype)
# print("saving " + summary_save_str)
# plt.savefig(summary_save_str)
plt.show()
# +
data_std = 1E-2
prior_std = 0.25
N = 10000
M = 1
lam = 0.25
ns = 100
np.random.seed(1865) #7146
noise = np.random.randn()*data_std
lam_true = np.array([lam, 0.0]).reshape(1,-1)
model = id_model
obs_data = model(lam_true) + 0*noise
center = noise # noise | obs_data | lam # FOR Y AXIS
initial_dist = sstats.distributions.norm(scale=[prior_std, data_std], loc=[0,0])
input_samples = initial_dist.rvs(size=(N,2))
print('noise:', noise)
print('observed data:', obs_data)
def loss_fun(output_samples):
# return (1./M)*np.sum( np.power(np.divide(output_samples - obs_data, data_std), 2), axis=1)
# return (1./np.sqrt(2*M))*np.sum( np.power(np.divide(output_samples - observed_data, data_std) , 2) - 1.0, axis=1)
return (1./data_std)*(1./np.sqrt(M))*np.sum( output_samples - obs_data, axis=1)
output_samps = model(input_samples)
output_samples = loss_fun(output_samps)
# obs_dist = sstats.distributions.gamma(a=M/2.0, scale=2.0/M)
obs_dist = sstats.distributions.norm()
def pf_initial_dist_PDF(x):
pf_dist = sstats.gaussian_kde(output_samples)
return pf_dist.evaluate(x)
# a, l, s = sstats.distributions.gamma.fit(output_samples,floc=0)
# fit = sstats.distributions.gamma(a=a,loc=l,scale=s)
# return fit.pdf(x)
pf_eval = pf_initial_dist_PDF(output_samples)
obs_eval = obs_dist.pdf(output_samples)
ratio = np.divide(obs_eval, pf_eval)
print('ratio mean', ratio.mean())
print('center:', center)
# eval_pts = np.zeros((ns,2))
# eval_pts[:,0] = obs_data[0]*.99999
eval_L = np.linspace(-2*prior_std, 2*prior_std, ns)
eval_E = np.linspace(-2*data_std, 2*data_std, ns)
eval_pts = np.meshgrid(eval_L, eval_E)
eval_points = np.concatenate([eval_pts[0].ravel().reshape(-1,1), eval_pts[1].ravel().reshape(-1,1)],axis=1)
# eval_pts.reshape(ns,ns)
L = eval_pts[0].reshape(ns,ns)
E = eval_pts[1].reshape(ns,ns)
def eval_updated(x):
y = loss_fun(model(x))
return np.product(initial_dist.pdf(x),axis=1)*np.divide(obs_dist.pdf(y), pf_initial_dist_PDF(y))
post_eval = eval_updated(eval_points).reshape(ns,ns)
# eval_points
plt.figure(figsize=(10,10))
plt.contourf(L,E,post_eval, vmin=0, vmax=None)
# plt.vlines(obs_data[0],-.5, .5,alpha=0.5)
# plt.vlines(0,-.5, .5,alpha=0.5)
# plt.hlines(noise, -prior_std + obs_data, prior_std + obs_data, label='data $\pm 0.05\sigma_\lambda$')
plt.scatter([lam],[0*noise], color='black', s=150)
plt.scatter([lam],[0*noise], color='white', s=50, label='true lambda')
# plt.scatter(accepted_inputs[:,0], accepted_inputs[:,1])
plt.legend()
# plt.scatter(o[:,0], o[:,1])
try:
plt.plot([-2*data_std+obs_data[0][0],2*data_std+obs_data[0][0]], [-2*data_std,2*data_std], 'white', alpha=0.3)
except IndexError:
plt.plot([-2*data_std+obs_data,2*data_std+obs_data], [-2*data_std,2*data_std], 'white', alpha=0.3)
# plt.axis('equal')
# plt.savefig('ZZtestpost.png')]
def conditionalY(x, fixy = noise):
return eval_updated(np.array([[x, fixy]]))
def conditionalX(y, fixx = lam):
return eval_updated(np.array([[fixx, y]]))
I = quad(conditionalY, -1, 1, args=noise)
# I = quad(conditionalX, -2*data_std, 2*data_std, args=lam-1E-8)
print('integral of conditional on noise:', I[0], 'error:', I[1])
plt.show()
# -
input_samples_fix_noise = input_samples.copy()
input_samples_fix_noise[:,1] = noise
output_samps_fix_noise = model(input_samples_fix_noise)
output_samples_fix_noise = loss_fun(output_samps_fix_noise)
# +
def see_difference(eps):
input_samples_fix_noise = input_samples.copy()
input_samples_fix_noise[:,1] = eps
output_samps_fix_noise = model(input_samples_fix_noise)
output_samples_fix_noise = loss_fun(output_samps_fix_noise)
w = 5 # window size (to the left and right of zero)
plt.figure(figsize=(20,10))
plt.hist(output_samples, bins=40, density=True, color='b', alpha = 0.5, label='histogram of loss function for variable noise')
plt.hist(output_samples_fix_noise, bins=40, density=True, alpha = 0.5, color='r', label='histogram of loss function for fixed noise')
xmesh = np.linspace(-w, w, 1000)
full_eval = sstats.gaussian_kde(output_samples).evaluate(xmesh)
part_eval = sstats.gaussian_kde(output_samples_fix_noise).evaluate(xmesh)
plt.plot(xmesh, full_eval, c='b', label='estimate of density for full problem', lw=3)
plt.plot(xmesh, part_eval, c='r', label='estimate of density for approx problem', lw=3)
plt.plot(xmesh, obs_dist.pdf(xmesh), 'k:', label='observed')
plt.title('Data Space')
plt.xlim([-w, w])
plt.ylim([0, 0.025])
plt.legend()
plt.show()
ratio_diff = np.abs( (full_eval - part_eval)/full_eval )
print(ratio_diff.min(), ratio_diff.max())
print(100*ratio_diff.mean(), ratio_diff.std())
print(output_samples_fix_noise.min(), output_samples_fix_noise.max())
print(output_samples.min(), output_samples.max())
eps_choice = list(np.linspace(-0.025, 0.025, 11))
wid.interact(see_difference, eps=wid.SelectionSlider(value=0, options=eps_choice, continuous_update=False),
regularize=wid.Checkbox(value=True))
# -
# # Observed Noise
obs_data[0][0] + noise
noise
# +
def noise_vary(eps, regularize=False):
test_mesh = np.linspace(0,0.5,500)
I = quad(conditionalY, -1, 1, args=eps)[0]
test_eval = np.array([conditionalY(xi, fixy=eps) for xi in test_mesh])
test_eval = test_eval/(1+regularize*(I-1))
plt.figure(figsize=(20,10))
# plt.plot(test_mesh, sstats.norm(scale=data_std).pdf(test_mesh))
plt.vlines(obs_data[0][0] + noise, 0, 2, label='$d$')
plt.vlines(lam, 0, test_eval.max(), color='blue', label='$\lambda_0$')
plt.plot(test_mesh, sstats.norm(loc=lam, scale=data_std).pdf(test_mesh), label='N(0,$\sigma_d$)')
plt.plot(test_mesh, test_eval, c='r', label='conditional')
plt.xlabel('$\lambda$')
if regularize:
plt.ylim([0,75])
else:
plt.ylim([0,1000])
plt.legend()
plt.show()
return I
def lam_vary(lam0, regularize=False):
ww = 0.05
test_mesh = np.linspace(-ww, ww, 500)
I = quad(conditionalX, -ww, ww, args=lam0)[0]
test_eval = np.array([conditionalX(xi, fixx=lam0) for xi in test_mesh])
test_eval = test_eval/(1+regularize*(I-1))
plt.figure(figsize=(20,10))
plt.plot(test_mesh, sstats.norm(scale=data_std).pdf(test_mesh), label='N(0,$\sigma_d$)')
# plt.vlines(noise, 0, test_eval.max(), label='noise')
plt.vlines(0, 0, test_eval.max(), color='blue', label='0')
plt.plot(test_mesh, test_eval, c='r', label='conditional')
plt.xlabel('$\epsilon$')
if regularize:
plt.ylim([0,75])
else:
plt.ylim([0,1000])
plt.legend()
plt.show()
return I
# -
# # Slice through a particular noise value.
eps_choice = list(np.linspace(-0.025, 0.025, 21))
wid.interact(noise_vary, eps=wid.SelectionSlider(value=0, options=eps_choice, continuous_update=False),
regularize=wid.Checkbox(value=True))
# # Slice through a particular input parameter value.
lam_choice = list(np.linspace(0.2, 0.3, 21))
wid.interact(lam_vary, lam0=wid.SelectionSlider(value=lam, options=lam_choice, continuous_update=False),
regularize=wid.Checkbox(value=True))
test_mesh = np.linspace(-1,1,500)
test_eval = [conditionalX(xi) for xi in test_mesh]
plt.plot(test_mesh, sstats.norm(scale=data_std).pdf(test_mesh))
plt.plot(test_mesh, test_eval)
# plt.contourf(L, E, post_eval)
post_eval.shape
plt.figure(figsize=(20,10))
marg_L = np.sum(post_eval,axis=0)
plt.plot(eval_L.ravel(), marg_L, label='marginal_posterior')
plt.xlabel('$\lambda$')
plt.title('Marginal of Noise Space')
# plt.vlines(obs_data,0,100,'r', label='obs data')
plt.vlines(lam,0,100,'k', label='lam true')
# plt.ylim([0,1])
plt.legend()
plt.show()
plt.figure(figsize=(20,10))
marg_E = np.sum(post_eval,axis=1)
marg_E = marg_E
plt.plot(eval_E.ravel(), marg_E, label='marginal_posterior')
# plt.plot(eval_E, sstats.distributions.norm.pdf(eval_E, loc=0,scale=data_std))
plt.xlabel('$\epsilon$')
plt.title('Marginal of Data Space')
plt.vlines(noise, 0, 200, 'r', label='noise')
plt.vlines(0, 0, 200, label='mean zero')
plt.legend()
plt.show()
# # Old Code
| examples/1D_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Navigation
#
# ---
#
# You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!
#
# ## 0. Install the prerequisites
# ---
#
# Run the next code cell to install a few packages. This line will take a few minutes to run!
# !pip -q install ./python
# ## 1. Defining Agent, Env, Training Process
# ------
# ### Defining a Q Network
#
# A Q network that will approximate the state-action value function
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from collections import namedtuple, deque
import matplotlib.pyplot as plt
from unityagents import UnityEnvironment
import numpy as np
# -
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=74, fc2_units=74):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x)
# ### Defining the Agent
# +
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def get_action(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# print("next_state shapes: ", next_states.shape)
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
## gether() gets the actions according to given indexes, more here https://stackoverflow.com/a/54706716/6054066
## effectively Q_expected will have the expected reward value of each state-action pair
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
# -
# ### Defining a ReplayBuffer (to store memories to)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
# ### Defining the network training process
# 0. Define the monitoring of the process
# 1. Define first one episode
# 2. Define a training loop for multiple episodes
# 3. Define a testing loop for a single episode with no training
def monitor(i_episode, scores_window, score_batch):
if i_episode % score_batch == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
def one_episode(env, agent, brain_name, max_t=10, eps = 1.0):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
for t in range(max_t):
action = dqn_agent.get_action(state, eps) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
dqn_agent.step(state, action, reward, next_state, done)
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
return score
else:
return score
def run_training(env, agent, brain_name, score_batch = 100, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, ):
scores = []
scores_window = deque(maxlen=score_batch)
eps = eps_start
for i_episode in range(1, n_episodes+1):
score = one_episode(env, agent, brain_name, max_t, eps)
scores.append(score)
scores_window.append(score)
monitor(i_episode, scores_window, score_batch)
eps = max(eps_end, eps_decay*eps)
return scores
def one_episode_test(env, agent, brain_name, max_t=10):
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
for t in range(max_t):
action = dqn_agent.get_action(state, eps) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
return score
else:
return score
# ## 2. Creating Agent, Environment, then Training the network and plotting it
# --------
# ### Creating the environment and training the network
def create_env(file_name):
# please do not modify the line below
env = UnityEnvironment(file_name=file_name)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
return env, state_size, action_size, brain_name
env, state_size, action_size, brain_name = create_env("/data/Banana_Linux_NoVis/Banana.x86_64")
# ### Check how long an episode terminates
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
num_term_steps=0
while True:
action = np.random.randint(action_size) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
num_term_steps+=1
if done: # exit loop if episode finished
break
print("Max steps: ", num_term_steps)
# ## 3. Training the agent with the base DQN Network
# ---
# ### Create the agent, train it and save the trained network into a .pth file that can be loaded later.
# +
seed = 0
dqn_agent = Agent(state_size, action_size, seed)
score_batch = 100
n_episodes=2000
max_t=300
scores = run_training(env, dqn_agent, brain_name, score_batch = score_batch, n_episodes= n_episodes, max_t=max_t)
name = "checkpoint_" + str(score_batch) + "_"+str(n_episodes) + "_" + str(max_t) + "_dqn_base.pth"
torch.save(dqn_agent.qnetwork_local.state_dict(), name)
# -
# ### Plot the network performance
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# ### Watch the trained agent interact
score = one_episode_test(env, dqn_agent, brain_name, 300)
print(score)
# ## 4. Train a DQN Agent with Prioritized Learning
# ---
# ### Define a new Replay Buffer with Prioritized Replay sampling
class PL_ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed, e=0.01, a=0.70, beta=0.4):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done", "priority"])
self.seed = random.seed(seed)
self.e = e
self.a = a
self.beta = beta
def add(self, state, action, reward, next_state, done, error):
"""Add a new experience to memory."""
priority = self.get_priority(error)
e = self.experience(state, action, reward, next_state, done, priority)
self.memory.append(e)
def update(self, errors, ids):
for ii, _id in enumerate(ids):
state, action, reward, next_state, done, priority = self.memory[_id]
new_priority = self.get_priority(errors[ii])[0]
e = self.experience(state, action, reward, next_state, done, new_priority)
self.memory[_id] = e
def get_priority(self, error):
return (abs(error) + self.e) ** self.a
def sample(self):
"""Sample a batch of experiences from memory according to their normalized errors -> probabilities."""
priorities = np.array([e.priority for e in self.memory if e is not None])
sum_errors = np.array(np.sum(priorities))
probabilities = priorities/sum_errors
# Select experiences according to probabilities
weighted_random_indices = np.random.choice(len(probabilities), size=self.batch_size, p=probabilities)
experiences = [self.memory[i] for i in weighted_random_indices]
probabilities_of_chosen = np.array([probabilities[i] for i in weighted_random_indices])
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
imp_samp = torch.from_numpy(np.power(((1/len(self.memory)) * (1/probabilities_of_chosen)), self.beta)).float().to(device)
return (states, actions, rewards, next_states, dones), imp_samp, weighted_random_indices
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
# ### Define a new agent which uses Prioritized Replay buffer and Imprtance Sampling
# +
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class PL_Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = PL_ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def get_action(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
experience=(state, action, reward, next_state, done)
# error = self.calc_Q_targets(experience, GAMMA)
error = 0.0
self.memory.add(state, action, reward, next_state, done, error)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences, imp_samp, ids = self.memory.sample()
self.learn(experiences, imp_samp, ids, GAMMA)
# def calc_Q_targets(self, experience, gamma):
# state, action, reward, next_state, done = experience
# conv_state = torch.from_numpy(state).float().unsqueeze(0).to(device)
# conv_next_state = torch.from_numpy(next_state).float().unsqueeze(0).to(device)
# # Get max predicted Q values (for next states) from target model
# Q_targets_next = self.qnetwork_target(conv_next_state).detach().max(1)[0].unsqueeze(1)
# # Compute Q targets for current states
# Q_targets_target = reward + (gamma * Q_targets_next * (1 - done))
# Q_target_expected = self.qnetwork_target(conv_state).detach().max(1)[0].unsqueeze(1)
# error = Q_targets_target - Q_target_expected
# return error
def learn(self, experiences, imp_samp, ids, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# calculate TD Errors and update priority
errors = Q_expected - Q_targets
self.memory.update(errors.cpu().detach().numpy(), ids)
# Compute loss
# print("IMP SAMP")
# print("shape: ", imp_samp.shape)
# print(imp_samp)
# print("IDS")
# print("shape: ", ids.shape)
# print(ids)
# print("MSE LOSS")
mse_loss = F.mse_loss(Q_expected, Q_targets, reduce =False).view(-1)
# print("shape: ", mse_loss.shape)
# print(mse_loss)
# print("Simple loss")
delta_w = imp_samp * mse_loss
# print(delta_w)
loss = torch.sum(delta_w)
# print("LOSS Mean")
# print(loss)
# print("==============")
# print("==============")
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
# -
# ### Create agent and run training with Prioritized Replay
# +
seed = 0
dqn_pl_agent = PL_Agent(state_size, action_size, seed)
score_batch = 100
n_episodes=2000
max_t=300
scores = run_training(env, dqn_pl_agent, brain_name, score_batch = score_batch, n_episodes=n_episodes, max_t=max_t)
name = "checkpoint_" + str(score_batch) + "_"+str(n_episodes) + "_" + str(max_t) + "_dqn_pl.pth"
torch.save(dqn_agent.qnetwork_local.state_dict(), name)
# -
# ### Plot the performance
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# ### Watch the trained agent interact
score = one_episode_test(env, dqn_pl_agent, brain_name, 300)
print(score)
# ## 5. End the session
# ---
env.close()
| DQN Agent - Navigation Environment/Navigation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import keras
import pandas
from keras_tqdm import TQDMNotebookCallback
from sklearn import preprocessing
data = np.array(pandas.read_csv("~/trainingdata.csv", header=0))
print(data.shape)
# +
from sympy import *
init_printing(use_latex=True)
import matplotlib.pyplot as plt
# %matplotlib inline
X = data[:,0:6]
X = preprocessing.scale(X)
print(X.shape)
display(X)
labels = data[:,6]
print(labels.shape)
display(labels)
Y = keras.utils.to_categorical(labels, len(np.unique(labels)))
# -
input_size = X.shape[1]
output_size = Y.shape[1]
display(X.shape[1])
# +
model = keras.models.Sequential()
model.add(keras.layers.Dense(100,input_dim=6,activation='relu', bias_initializer=keras.initializers.Constant(value=0.01)))
model.add(keras.layers.Dense(100,input_dim=6,activation='relu', bias_initializer=keras.initializers.Constant(value=0.01)))
model.add(keras.layers.Dense(100,input_dim=6,activation='relu', bias_initializer=keras.initializers.Constant(value=0.01)))
model.add(keras.layers.Dense(3,activation='softmax'))
#binary_crossentropy
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
print(model.summary())
# -
history = model.fit(X, Y,
batch_size=56,
epochs=100,
verbose=0,
callbacks=[TQDMNotebookCallback()],
validation_split = 0.25)
# +
plt.figure(1)
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.tight_layout()
plt.show()
score = model.evaluate(X, Y, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
| .ipynb_checkpoints/Project_Multilayer_Net-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CS7290 Causal Modeling in Machine Learning: Homework 3
# ## <NAME>
# ### Topics
#
# • Recognizing valid adjustment sets
#
# • Covariate adjustment with parent and back-door criterion
#
# • Front-door criterion
#
# • Propensity matching and inverse probability weighting
#
# • Intro to structural causal models
# +
from IPython.display import Image
#Pyro imports
import matplotlib.pyplot as plt
import numpy as np
import torch
import pyro
import pyro.distributions as dist
import pandas as pd
import random
pyro.set_rng_seed(100)
# -
# ### Question 1: Valid adjustment sets
# #### 1.1 The following DAG represents a causal model of user behavior in an app.
Image(filename='1.png')
# #### U represents the user specific preferences. X represents the introduction of a feature designed to make users make certain in-app purchases, Y was whether or not the user made the purchase, W represents app usage after the feature is introduced.
# #### 1.1.a You are interested in estimating the causal effect of X on Y. What is the valid adjustment set? Valid adjustment set is the set of variables that if you adjust, you will get the unbiased results. (3 points)
# The valid adjustment set here is the null set.
#
# This is because there is no other backdoor path.
# #### 1.1.b What would happen if you adjusted for W? Be specific. (2 points)
# If we adjusted for $W$, this will activate the path $U\leftarrow W\rightarrow X$ and therefore there would be a backdoor path to $Y$.
# #### 1.1.c Suppose you want to assess the effect of X on Y for users who go on to have a high amount of app usage. Fill in the blanks on the right-hand-side and left-hand-side for the adjustment formula of interest:
#
# $$P(Y=y|do(X=x), W=high) = \sum_{?}P(Y=y|?)P(?|?)$$
# $$P(Y=y|do(X=x),W=high) = \sum_{u}P(Y=y|X=x, W=high, U=u)P(U=u|W=high)$$
# #### 1.2 Consider the following DAG.
Image(filename='12.png')
# #### You are interest in estimating the causal effect of X on Y.
# #### 1.2.a Is the set containing only Z a valid adjustment set? Why or why not? (2 points)
# No. Conditioning on $Z$ alone makes the collider $A\rightarrow Z \leftarrow E$ active, so we need to condition either on $A$ or $E$ as well.
# #### 1.2.b List all of the adjustment sets that blocks all the back doors(there are three) and write the adjustment formula for each adjustment set. (3 points)
# So the adjustment sets are $\{Z,E\}, \{Z,A\}, \{A,E,Z\}$
#
# Formulas are $\sum_{z,e}P(Y=y|X=x,Z=z,E=e)P(Z=z|E=e)P(E=e)$
#
# $\sum_{z,a}P(Y=y|X=x,A=a,Z=z)P(Z=z|A=a)P(A=a)$
#
# and $\sum_{a,e,z}P(Y=y|X=x,A=a,E=e,Z=z)P(Z=z|A=a,E=e)P(A=a)P(E=e)$
# #### 1.2.c Suppose that E and A are both observable, but observing E costs \\$10 per data point and observing A costs \\$5 per data point. Which conditioning set do you go with? (1 point)
# It's best to go with the set $\{Z,A\}$.
# #### 1.3 Consider the following DAG:
Image(filename='13.png')
# #### 1.3.a List all of the sets of variables that satisfy the backdoor criterion to determine the causal effect of X on Y. (3 points)
# $$
# \{A,Z\}\\
# \{B,Z\}\\
# \{C,Z\}\\
# \{D,Z\}\\
# \{A,B,Z\}\\
# \{A,C,Z\}\\
# \{A,D,Z\}\\
# \{B,C,Z\}\\
# \{B,D,Z\}\\
# \{C,D,Z\}\\
# \{A,B,C,Z\}\\
# \{A,B,D,Z\}\\
# \{B,C,D,Z\}\\
# \{A,C,D,Z\}\\
# \{A,B,C,D,Z\}
# $$
# #### 1.3.b List all of the minimal sets of variables that satisfy the backdoor criterion to determine the causal effect of X on Y (i.e., any set of variables such that, if you removed any one of the variables from the set, it would no longer meet the criterion). (3 points)
# $$
# \{A,Z\}\\
# \{B,Z\}\\
# \{C,Z\}\\
# \{D,Z\}\\
# $$
# #### 1.3.c List all the minimal sets of variables that need to be measured in order to identify the effect of D on Y. (3 points)
# The sets are $$
# \{C\}\\
# \{A,Z\}\\
# \{B,Z\}\\
# \{X,Z\}\\
# \{W,Z\}\\
# $$
# #### 1.3.d Now suppose we want to know the causal effect of intervening on 2 variables. List all the minimal sets of variables that need to be measured in order to identify the effect of set {D, W} on Y, i.e., P (Y = y|do(D = d), do(W = w)). (3 points)
# The sets are
# $$
# \{Z\}\\
# \{C,X\}
# $$
# ### Question 2: Covariate adjustment
# #### 2.1.a Build the model with Pyro using the values in the table. Use pyro.condition to calculate the causal effect by adjusting for happiness. (5 points)
# +
#Creating model
def bn():
#Z = [unhappy, happy]
Z = pyro.sample("Happiness", dist.Categorical(torch.tensor([0.509,0.491])))
#X = [promotion 0, promotion 1] -> Z
X_probs = torch.tensor([[0.246,0.762],[0.754,0.237]])
X = pyro.sample("Promotion", dist.Categorical(X_probs[Z]))
#Y = [not renewed, renewed] -> X, Z
Y_probs = torch.tensor([[[0.068,0.932],[0.267,0.733]],[[0.131,0.869],[0.313,0.687]]])
Y = pyro.sample("Renewed", dist.Categorical(Y_probs[X][Z]))
# +
#Conditioning model on B=on, C=on
conditioned_bn_00 = pyro.condition(bn, data={'Promotion':torch.tensor(0), 'Happiness':torch.tensor(0)})
conditioned_bn_01 = pyro.condition(bn, data={'Promotion':torch.tensor(0), 'Happiness':torch.tensor(1)})
conditioned_bn_10 = pyro.condition(bn, data={'Promotion':torch.tensor(1), 'Happiness':torch.tensor(0)})
conditioned_bn_11 = pyro.condition(bn, data={'Promotion':torch.tensor(1), 'Happiness':torch.tensor(1)})
# +
#P(Y|X=0,Z=0)
posterior_00 = pyro.infer.Importance(conditioned_bn_00, num_samples=1000).run()
marginal_00 = pyro.infer.EmpiricalMarginal(posterior_00, "Renewed")
Y_samples_00 = np.array([marginal_00().item() for _ in range(1000)])
p_00 = Y_samples_00.mean()
# +
#P(Y|X=0,Z=1)
posterior_01 = pyro.infer.Importance(conditioned_bn_01, num_samples=1000).run()
marginal_01 = pyro.infer.EmpiricalMarginal(posterior_01, "Renewed")
Y_samples_01 = np.array([marginal_01().item() for _ in range(1000)])
p_01 = Y_samples_01.mean()
# +
#P(Y|X=0)
p_y_x0 = p_00*0.509 + p_01*0.491
# +
#P(Y|X=1,Z=0)
posterior_10 = pyro.infer.Importance(conditioned_bn_10, num_samples=1000).run()
marginal_10 = pyro.infer.EmpiricalMarginal(posterior_10, "Renewed")
Y_samples_10 = np.array([marginal_10().item() for _ in range(1000)])
p_10 = Y_samples_10.mean()
# +
#P(Y|X=1,Z=1)
posterior_11 = pyro.infer.Importance(conditioned_bn_11, num_samples=1000).run()
marginal_11 = pyro.infer.EmpiricalMarginal(posterior_11, "Renewed")
Y_samples_11 = np.array([marginal_11().item() for _ in range(1000)])
p_11 = Y_samples_11.mean()
# +
#P(Y|X=1)
p_1 = p_10*0.509 + p_11*0.491
# -
p_0 - p_1
# #### 2.1.b Suppose you could not observe happiness. Use pyro.do to calculate the causal effect with do-calculus. (5 points)
# +
#intervention model
intervened_bn_0 = pyro.do(bn, data={'Promotion':torch.tensor(0)})
intervened_bn_1 = pyro.do(bn, data={'Promotion':torch.tensor(1)})
# +
#P(Y|X=0)
posterior_0 = pyro.infer.Importance(intervened_bn_0, num_samples=1000).run()
marginal_0 = pyro.infer.EmpiricalMarginal(posterior_0, "Renewed")
Y_samples_0 = np.array([marginal_0().item() for _ in range(1000)])
p_0_int = Y_samples_0.mean()
# +
#P(Y|X=1)
posterior_1 = pyro.infer.Importance(intervened_bn_1, num_samples=1000).run()
marginal_1 = pyro.infer.EmpiricalMarginal(posterior_1, "Renewed")
Y_samples_1 = np.array([marginal_1().item() for _ in range(1000)])
p_1_int = Y_samples_1.mean()
# -
p_0_int - p_1_int
# #### 2.2 You are a data scientist investigating the effects of social media use on purchasing a product. You assume the dag shown below. User info here is unobserved. One of the team members argues that social media usage does not drive purchase based on Table 1. Only 15% social media user made the purchase, while 90.25% non social media users made the purchase. Moreover, within each group, no-adblock and adblock, social media users show a much lower rate of purchase than non social media users. However, another team member argues that social media usage increases purchases. When we look at each group, social media user and non social media user as show in Table 2 (Table 1 and Table 2 both represent the same dataset), advertisement increases purchases in both groups. Among social media users, purchases increases from 10% to 15% for people who have seen advertisement. Among non social media users, purchases increases from 90% to 95% for people who have seen advertisement. Which view is right?
Image(filename='22.png')
# #### 2.2.a User info is unobserved. Use pyro.condition to calculate the causal effect of social media on product purchase using front-door adjustment (Section 3.4 in Front Door Criterion).(5 points)
# +
#Creating model
def bn2():
#X = [no social,social]
X = pyro.sample("Social", dist.Categorical(torch.tensor([0.5,0.5])))
#Z = [no ad,ad] -> X
Z_probs = torch.tensor([[0.95,0.05],[0.05,0.95]])
Z = pyro.sample("Advertisement", dist.Categorical(Z_probs[X]))
#Y = [no purchase,purchase] -> Z
Y_probs = torch.tensor([[0.19,0.81],[0.86,0.14]])
Y = pyro.sample("Purchase", dist.Categorical(Y_probs[Z]))
# -
conditioned_bn2_00 = pyro.condition(bn2, data={"Social":torch.tensor(0), "Advertisement":torch.tensor(0)})
conditioned_bn2_01 = pyro.condition(bn2, data={"Social":torch.tensor(0), "Advertisement":torch.tensor(1)})
conditioned_bn2_10 = pyro.condition(bn2, data={"Social":torch.tensor(1), "Advertisement":torch.tensor(0)})
conditioned_bn2_11 = pyro.condition(bn2, data={"Social":torch.tensor(1), "Advertisement":torch.tensor(1)})
# +
#P(Y|X=0, Z=0)
posterior2_00 = pyro.infer.Importance(conditioned_bn2_00, num_samples=1000).run()
marginal2_00 = pyro.infer.EmpiricalMarginal(posterior2_00, "Purchase")
Y_samples2_00 = np.array([marginal2_00().item() for _ in range(1000)])
p2_00 = Y_samples2_00.mean()
#P(Y|X=1, Z=0)
posterior2_10 = pyro.infer.Importance(conditioned_bn2_10, num_samples=1000).run()
marginal2_10 = pyro.infer.EmpiricalMarginal(posterior2_10, "Purchase")
Y_samples2_10 = np.array([marginal2_10().item() for _ in range(1000)])
p2_10 = Y_samples2_10.mean()
#P(Y|X=0, Z=1)
posterior2_01 = pyro.infer.Importance(conditioned_bn2_01, num_samples=1000).run()
marginal2_01 = pyro.infer.EmpiricalMarginal(posterior2_01, "Purchase")
Y_samples2_01 = np.array([marginal2_01().item() for _ in range(1000)])
p2_01 = Y_samples2_01.mean()
#P(Y|X=1,Z=1)
posterior2_11 = pyro.infer.Importance(conditioned_bn2_11, num_samples=1000).run()
marginal2_11 = pyro.infer.EmpiricalMarginal(posterior2_11, "Purchase")
Y_samples2_11 = np.array([marginal2_11().item() for _ in range(1000)])
p2_11 = Y_samples2_11.mean()
# +
#P(Y|X=0) = sum_z(P(Z=z|X=0)*sum_x'(P(Y|X=x',Z=z)*P(X=x')))
sum_x_z0 = p2_00*0.5 + p2_10*0.5
sum_x_z1 = p2_01*0.5 + p2_11*0.5
sum_z0_x0 = 0.95*(sum_x_z0)
sum_z1_x0 = 0.05*(sum_x_z1)
p_y_x0 = sum_z0_x0 + sum_z1_x0
# +
#P(Y|X=1) = sum_z(P(Z=z|X=1)*sum_x(P(Y|X=x,Z=z)*P(X=x)))
sum_z0_x1 = 0.05*(sum_x_z0)
sum_z1_x1 = 0.95*(sum_x_z1)
p_y_x1 = sum_z0_x1 + sum_z1_x1
# -
p_y_x0 - p_y_x1
# #### 2.2.b Verify your result using do-calculus with pyro.do.(P (Y = 1|do(X = 0)) − P (Y = 1|do(X = 1))) (5 points)
# +
#intervention models - do(X=0), do(X=1)
intervened_bn2_0 = pyro.do(bn2, data={"Social":torch.tensor(0)})
intervened_bn2_1 = pyro.do(bn2, data={"Social":torch.tensor(1)})
# -
posterior2_0 = pyro.infer.Importance(intervened_bn2_0, num_samples=1000).run()
marginal2_0 = pyro.infer.EmpiricalMarginal(posterior2_0, "Purchase")
Y_samples2_0 = np.array([marginal2_0().item() for _ in range(1000)])
p2_0 = Y_samples2_0.mean()
posterior2_1 = pyro.infer.Importance(intervened_bn2_1, num_samples=1000).run()
marginal2_1 = pyro.infer.EmpiricalMarginal(posterior2_1, "Purchase")
Y_samples2_1 = np.array([marginal2_1().item() for _ in range(1000)])
p2_1 = Y_samples2_1.mean()
p2_0 - p2_1
# As we can see here, social media usage does not drive purchases. Absence of social media has marginally higher purchase percentage than presence.
# ### Question 3: Inverse probability weighting with a propensity score.
# #### 3.1 Use the data in Question 2.1 to create the following propensity score function. (3 points)
#
# ##### def propensity(x, z):
# returns P(X = x | Z = z)
def propensity(z,x):
X_probs = torch.tensor([[0.246,0.762],[0.754,0.237]])
return X_probs[x][z]
# #### 3.2 Use the model from Question 2.1 to generate 1000 samples, along with the sample probabilities. Print the first 10 samples. (3 points)
trace_handler = pyro.poutine.trace(bn)
samples = []
probs = []
for i in range(1000):
trace = trace_handler.get_trace()
z = trace.nodes['Happiness']['value']
x = trace.nodes['Promotion']['value']
y = trace.nodes['Renewed']['value']
log_prob = trace.log_prob_sum()
p = np.exp(log_prob)
samples.append((int(z),int(x),int(y)))
probs.append(p)
samples[:10]
# #### 3.3 Compute weighted joint probabilities for each possible combinations of X, Y, Z. Hint: Use your propensity function to create a list of weights for each combination, and multiplying original joint probability of each combination by this weight. Normalize the weighted probabilities if they don’t sum up to 1. (3 points) (Refer to Section 3.6 :Inverse Probability Weighting)
prop_score = {}
for z_val in range(0,2):
for x_val in range(0,2):
prop_score[(z_val,x_val)] = propensity(z_val,x_val)
prop_score
poss_combs = []
for z_val in range(0,2):
for x_val in range(0,2):
for y_val in range(0,2):
poss_combs.append((z_val,x_val,y_val))
poss_combs
probs_combs = {}
for i in poss_combs:
if i in samples:
probs_combs[i] = probs[samples.index(i)]
probs_combs
weights = {}
for i in poss_combs:
weights[i] = float(probs_combs[i]/prop_score[(i[0],i[1])])
weights
tot = sum(np.array([k[1] for k in weights.items()]))
weights = {k:v/tot for k,v in weights.items()}
# #### 3.4 Sample with replacement 1000 samples from the weighted probabilty distribution obtained in Question 3.3. (1 point)
omega = random.choices(poss_combs, list(weights.values()), k=1000)
# #### 3.5 Call this new set of samples $\Omega$. Let $p^{\Omega}(X =x)$ be the proportion of times $X == x$ in $\Omega$ and $p^{\Omega}(X = x|Y = y)$ be the proportion of the $\Omega$ samples where $X == x$ after filtering for samples where $Y == y$. If you performed the above inverse probability weighting procedure correctly, then $P^{model}(Y = y|do(X = x)) ≈ p^{\Omega}(Y = y|X = x)$ (the LHS and RHS are equal as the sample size goes to infinity). Confirm this by recalculating the causal effect from Question 2.1 using this method. (3 points)
# +
x0z0 = 0
x1z1 = 0
x0z1 = 0
x1z0 = 0
y1_x0z0 = 0
y1_x0z1 = 0
y1_x1z0 = 0
y1_x1z1 = 0
for i in omega:
if i[0] == 0:
if i[1] == 0:
x0z0 += 1
if i[2] == 1:
y1_x0z0 += 1
else:
x1z0 += 1
if i[2] == 1:
y1_x1z0 += 1
else:
if i[1] == 0:
x0z1 += 1
if i[2] == 1:
y1_x0z1 += 1
else:
x1z1 += 1
if i[2] == 1:
y1_x1z1 += 1
# -
p_y1_x0 = ((y1_x0z0/x0z0)*0.509) + ((y1_x0z1/x0z1)*0.491)
p_y1_x1 = ((y1_x1z0/x1z0)*0.509) + ((y1_x1z1/x1z1)*0.491)
p_y1_x0-p_y1_x1
# ### Question 4: Structural causal models
# #### 4.1 Consider the SCM M:
#
# $$X := N_X\\
# Y := X^2 + N_Y\\
# N_X, N_Y \sim^{iid} N(0,1)$$
#
# #### Write this model in Pyro and generate 10 samples of X and Y. (3 points)
def model():
nx = pyro.sample("Nx", dist.Normal(0.0,1.0))
ny = pyro.sample("Ny", dist.Normal(0.0,1.0))
x = pyro.sample("x", dist.Delta(nx))
y = torch.pow(x,2) + ny
y = pyro.sample('y', dist.Normal(y, 0.01))
posterior4 = pyro.infer.Importance(model, num_samples=1000).run()
marginal4x = pyro.infer.EmpiricalMarginal(posterior4, "x")
marginal4y = pyro.infer.EmpiricalMarginal(posterior4, "y")
x_samples = np.array([marginal4x().item() for _ in range(10)])
y_samples = np.array([marginal4y().item() for _ in range(10)])
x_samples, y_samples
# ### 4.2 Consider the SCM $M$:
# $$X := N_X\\
# Y := 4X + N_Y\\
# N_X, N_Y ∼^{iid} N(0, 1)$$
#
# #### 4.2.a Draw a picture of the model’s DAG.(1 point)
Image(filename='Q4graph.png')
# #### 4.2.b $P^{M}_Y$ is a normal distribution with what mean and variance? (2 points)
# $P^{M}_Y \sim N(0, 17)$
# #### 4.2.c $P^{M:do(X=2)}_{Y}$ is a normal distribution with what mean and variance? (2 points)
#
# $P^{M:do(X=2)}_Y \sim N(8, 1)$
# #### 4.2.d How and why does $P_{Y}^{M:X=2}$ differ or not differ from $P_{Y}^{M:do(X=2)}$? (2 points)
# Both will follow $N(8,1)$.
# #### 4.2.e $P_{X}^{M:Y =2}$ is a normal distribution with what mean and variance? Note: Need explanation (2 Points)
# $$\sigma_{XY} = E(XY) - E(X)E(Y)\\
# = E(4X^2 + N_Y) + E(X)E(Y)\\
# = 4E(X^2) + E(N_Y) + E(X)E(Y)\\
# = 4$$
# Mean of conditional dist:
# $$\mu_X + \frac{\sigma_{XY}}{\sigma_Y}(y-\mu_Y)\\
# = 0 + \frac{4}{17}(2)\\
# = 8/17$$
# Variance of conditional dist:
# $$\sigma_X - \frac{\sigma_{XY}^2}{\sigma_{Y}}\\
# 1 - \frac{16}{17}\\
# \frac{1}{17}
# $$
# #### 4.2.f $P_{X}^{M:do(Y =2)}$ is a normal distribution with what mean and variance? (2 points)
# We have removed the causal edge from $X$ and $N_Y$ to $Y$. So $X$ will not be affected by any value $Y$ takes (they're independent), and follows $N(0,1)$.
# #### 4.2.g Write model $P^M_{X,Y}$ in code and generate 10 samples. (3 points)
def model2():
nx = pyro.sample("Nx", dist.Normal(0.0,1.0))
ny = pyro.sample("Ny", dist.Normal(0.0,1.0))
x = pyro.sample("x", dist.Delta(nx))
y = 4*x + ny
y = pyro.sample("y", dist.Normal(y, 0.01))
posterior42 = pyro.infer.Importance(model2, num_samples=1000).run()
marginal4x2 = pyro.infer.EmpiricalMarginal(posterior42, "x")
marginal4y2 = pyro.infer.EmpiricalMarginal(posterior42, "y")
x_samples2 = np.array([marginal4x2().item() for _ in range(10)])
y_samples2 = np.array([marginal4y2().item() for _ in range(10)])
x_samples2, y_samples2
# #### 4.2.h Use the do operator to generate 100 samples from model $P_{Y}^{M:do(X=2)}$ and visualize the results in a histogram. (3 points)
int_model = pyro.do(model2, data={'x':torch.tensor(2)})
posterior_int = pyro.infer.Importance(int_model, num_samples=1000).run()
marginal_y_int = pyro.infer.EmpiricalMarginal(posterior_int, "y")
y_int_samples = np.array([marginal_y_int().item() for _ in range(1000)])
plt.hist(y_int_samples)
plt.title("P(Y|do(X=2))")
plt.xlabel("Y values")
plt.ylabel("Frequencies");
# #### 4.2.i Use the condition operator and a Pyro inference algorithm to generate 10 samples from $P_X^{M:Y=2}$. Use one of the Bayesian inference procedures described in the lecture notes. (3 points)
cond_model = pyro.condition(model2, data={"y":torch.tensor(2.)})
posterior_cond = pyro.infer.Importance(cond_model, num_samples=1000).run()
marginal_x_cond = pyro.infer.EmpiricalMarginal(posterior_cond, "x")
x_cond_samples = np.array([marginal_x_cond().item() for _ in range(10)])
x_cond_samples
| HW/hw3_answers/hw3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gaussian Process Regression
# **COMP9418-17s2, W09 Tutorial**
#
# *School of Computer Science and Engineering, UNSW Sydney*
#
# - **Instructor**: <NAME>
# - **Teaching Assistant**: <NAME>
# We will be using Gaussian process (GP) models for regression. A Gaussian process defines a prior distribution over functions, which we use to obtain a posterior distribution over functions after having observed some data. As such, the predictions from a GP model take the form of a full predictive distribution.
#
# In this week's lab, we will focus gaining a practical understanding of these concepts - how to sample from a prior over functions, how to obtain and visualize the full predictive distribution, how the characteristics of a GP model changes as we vary some underlying hyperparameters, and so on. Finally, we will apply a GP regression model to a real-world dataset, and select a covariance function and its parameters to obtain automatic relevance determination (ARD), which can help us discard the irrelevant features. For a more complete treatment of Gaussian processes for regression, please refer to [GPML](http://www.gaussianprocess.org/gpml/) $\S$2.2 (Rasmussen & Williams, 2006) or [MLaPP](https://www.cs.ubc.ca/~murphyk/MLbook/) $\S$15.2 (Murphy, 2012)
#
# This lab makes extensive use of [GPFlow](https://github.com/GPflow/GPflow), with portions derived directly from their tutorial [GP Regression with GPflow](http://gpflow.readthedocs.io/en/latest/notebooks/regression.html).
# ### Preamble
#
# The following Python packages are required for this exercise:
#
# - `numpy`
# - `pandas`
# - `scikit-learn`
# - `tensorflow`
# - `GPflow`
# - `matplotlib`
#
# Most of these may be installed with `pip`:
#
# pip install numpy pandas scikit-learn matplotlib tensorflow
#
# #### Tensorflow Installation
#
# The recommended way to install TensorFlow is in a `virtualenv` with `pip`. If your computer satisfies the [requirements](https://www.tensorflow.org/install/install_linux#nvidia_requirements_to_run_tensorflow_with_gpu_support) to run TensorFlow with GPU support, you should definitely take advantage of it. You can install TensorFlow with GPU support
#
# pip install tensorflow-gpu
#
# You can also take advantage of specialized CPU instruction sets for numerical computing by compiling TensorFlow from sources. This is outside the scope of this lab and we refer you to the [install guide](https://www.tensorflow.org/install/install_sources) for this.
#
# See the [full install guide](https://www.tensorflow.org/install/) for comprehensive instructions on installing TensorFlow for different environments with different options.
#
# #### GPflow Installation
#
# GPflow has not been released on PyPI. You can either download the package from the [official Github repo](https://github.com/GPflow/GPflow) and run `python setup.py install`, or install directly from Github via `pip` (recommended):
#
# pip install git+https://github.com/GPflow/GPflow.git
#
# See the [install guide](http://gpflow.readthedocs.io/en/latest/intro.html#install) for further information.
#
# Here we import the required modules and set up our notebook environment
# %matplotlib notebook
# +
from __future__ import division
from __future__ import print_function
# Required modules
import numpy as np
import pandas as pd
import tensorflow as tf
import gpflow
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.preprocessing import normalize
from sklearn.utils import shuffle
from IPython.display import Markdown
# -
plt.style.use('seaborn-notebook')
np.set_printoptions(precision=3,
edgeitems=5,
suppress=False)
# #### Constant Definitions
n_train = 12 # nbr. training points in synthetic dataset
n_query = 100 # nbr. query points
seed = 23 # set random seed for reproducibility
rng = np.random.RandomState(seed)
# ### Synthetic Dataset
# First let's create a small synthetic dataset with `n_train` datapoints.
X = rng.rand(n_train, 1)
Y = np.sin(12.*X) + .66*np.cos(25.*X) + rng.randn(n_train, 1)*.1 + 3.
# +
fig, ax = plt.subplots(figsize=(6, 4))
ax.scatter(X, Y, marker='x', color='k')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.show()
# -
# ## Model specification
# A Gaussian process model is specified in terms of a mean function $m(\mathbf{x})$ and kernel or covariance function $\kappa(\mathbf{x}, \mathbf{x}')$. The Gaussian process prior on regression functions is denoted by
#
# $$
# f(\mathbf{x}) \sim GP(m(\mathbf{x}), \kappa(\mathbf{x}, \mathbf{x}'))
# $$
#
# It is common to simply specify $m(x)=0$ since the GP is flexible enough to also model the mean arbitrarily well, so let's go ahead and specify a covariance function.
# ### Kernels (covariance functions)
# GPFlow offers a number of commonly-used kernels, as well as an interface to define your own. See the tutorial [Using kernels in GPflow](http://gpflow.readthedocs.io/en/latest/notebooks/kernels.html) for more information. Let's take a look at the Radial Basis Function (RBF, also known as squared-exponential) kernel.
# RBF kernel with input_dim=2 and lengthscale=1.
k = gpflow.kernels.RBF(1, lengthscales=1.)
# For any finite set of $N$ points $\mathbf{X}$, the GP prior above defines a joint Gaussian
#
# $$
# p(\mathbf{f} \mid \mathbf{X}) = \mathcal{N}(\mathbf{f} \mid \boldsymbol{\mu}, \mathbf{K})
# $$
#
# where $K_{ij} = \kappa(\mathbf{x}_i, \mathbf{x}_j)$ and $\boldsymbol{\mu} = (m(\mathbf{x}_1, \dotsc, \mathbf{x}_N))$.
# Let's define a set of `n_query` points.
Xq = np.linspace(-5., 5., n_query).reshape(-1, 1)
# With one input fixed at 0, we can see how the kernel changes as the other input varies and moves away from 0. The method `compute_K` takes 2 arrays as arguments and gives the kernel computed pairwise between the points in the arrays.
# +
fig, ax = plt.subplots(figsize=(5, 4.5))
ax.plot(Xq, k.compute_K(Xq, np.zeros(1).reshape(-1, 1)))
ax.set_xlabel('$x$')
ax.set_ylabel('$k(x, 0)$')
ax.set_title('RBF Kernel ($\ell={{{[0]:.2f}}}$)'
.format(k.lengthscales.value))
plt.show()
# -
# Now let's call `compute_K_symm` on $\mathbf{X}$, which gives the covariance matrix $\mathbf{K}$ described above. We can then sample functions $\mathbf{f}$ from the joint Gaussian with covariance $\mathbf{K}$. This is what it means to sample functions from a GP prior. Note that $\mathbf{f}$ is a collection of function values evaluated at the finite set of points in $\mathbf{X}$.
#
# ### Exercise
#
# Implement the function `prior_samples`, which takes a 2d array of points, a GPFlow kernel and integer argument `n_samples`, and returns `n_samples` samples of function $\mathbf{f}$ from the GP prior with zero mean function $m(\mathbf{x})=0$ and the specified kernel.
#
# Note that while the set of `n_query` finite points `Xq` we're working with is 1-dimensional, we defined it to be a 2d array of shape `(n_query, 1)`. This is because GPFlow only accepts 2d arrays.
### EXERCISE ###
def prior_samples(x, kernel, n_samples=3):
# mean vector
mu = np.squeeze(np.zeros_like(x))
# covariance matrix
K = kernel.compute_K_symm(x)
# TODO: return n_samples from a joint Gaussian distribution
return ...
### SOLUTION ###
def prior_samples(x, kernel, n_samples=3):
mu = np.squeeze(np.zeros_like(x))
K = kernel.compute_K_symm(x)
return rng.multivariate_normal(mu, K, n_samples).T
# Now we can visualize draws from the GP prior.
# +
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(Xq, prior_samples(Xq, k))
ax.set_xlabel('$x$')
ax.set_ylabel('$f(x)$')
ax.set_title('Draws of $f(x)$ from GP prior')
plt.show()
# -
# #### Effects of kernel parameters
#
# The kernel variance parameter $\sigma_f$ (`k.variance`) controls the vertical scale of the function, while the length scale $\ell$ (`k.lengthscales`) is the horizontal scale over which the function changes. The sampled functions look smoother as we increase the length scale. This makes sense as the effective distance between query points is inversely proportional to the length scale. Increasing the length scale causes the function values to become less correlated.
#
#
# +
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7, 7),
subplot_kw=dict(xticks=[], yticks=[]))
fig.tight_layout()
for i, ax_row in enumerate(axes):
variance = .05 * (i + 1)
for j, ax in enumerate(ax_row):
len_scale = .1 + .7 * j
k = gpflow.kernels.RBF(1, variance=variance,
lengthscales=len_scale)
ax.plot(Xq, prior_samples(Xq, k))
ax.set_ylim(-1.5, 1.5)
if i == len(axes) - 1:
ax.set_xlabel('$\ell={{{:.2f}}}$'.format(len_scale))
if not j:
ax.set_ylabel('$\sigma_f={{{:.2f}}}$'.format(variance))
plt.show()
# -
# ### Posterior predictive distribution
# Now suppose we observe the training set $(\mathbf{X}, \mathbf{y})$ we created earlier, where $y_i = f(\mathbf{x}_i) + \epsilon$ is the noisy observation of the function evaluated at $\mathbf{x}_i$ and noise $\epsilon \sim \mathcal{N}(0, \sigma_y^2)$. Given a query set $\mathbf{X}_*$, we can obtain a full posterior distribution over the function outputs $\mathbf{f}_*$. The joint distribution has the following form
#
# $$
# \begin{pmatrix}
# \mathbf{y}\\
# \mathbf{f}_*
# \end{pmatrix}
# \sim
# \mathcal{N} \left (
# 0,
# \begin{pmatrix}
# \mathbf{K}_y & \mathbf{K}_* \\
# \mathbf{K}_*^T & \mathbf{K}_{**}
# \end{pmatrix}
# \right )
# $$
#
# where
#
# $$
# \begin{align}
# \mathbf{K}_y &= \mathbf{K} + \sigma_y^2\mathbf{I} \\
# \mathbf{K} &= \kappa(\mathbf{X}, \mathbf{X}) \\
# \mathbf{K}_* &= \kappa(\mathbf{X}, \mathbf{X}_*) \\
# \mathbf{K}_{**} &= \kappa(\mathbf{X}_*, \mathbf{X}_*)
# \end{align}
# $$
#
# Then, using the standard method for conditioning Gaussians, the posterior predictive density is given by
#
# $$
# \begin{align}
# p(\mathbf{f}_* \mid \mathbf{X}_*, \mathbf{X}, \mathbf{y})
# &= \mathcal{N}(\mathbf{f}_*, \mu_*, \Sigma_*) \\
# \mu_* &= \mathbf{K}_*^T \mathbf{K}_y^{-1} \mathbf{y} \\
# \Sigma_* &= \mathbf{K}_{**} - \mathbf{K}_*^T \mathbf{K}_y^{-1} \mathbf{K}_*
# \end{align}
# $$
# Let's instantiate a GP regression model, conditioned on the synthetic training set (`X`, `Y`), with a squared-exponential kernel, and a sensible initial setting of the variance and length scale hyperparameters ($\sigma_y, \sigma_f, \ell$).
k = gpflow.kernels.RBF(1, lengthscales=.2)
m = gpflow.gpr.GPR(X, Y, kern=k)
m.likelihood.variance = 0.01
# For a given query set $\mathbf{X}_*$, `m.predict_y` returns the means and variances $\mu_*, \Sigma_*$. Additionally, `m.predict_f_samples` yields samples from our posterior. We can use these to visualize the full posterior predictive distribution.
# #### Exercise
#
# Complete the indicated portions of the function `plot_posterior_predictive` below.
### EXERCISE ###
def plot_posterior_predictive(model, start=0., stop=1., n_query=100,
n_samples=3, ax=None):
if ax is None:
ax = plt.gca()
Xq = np.linspace(start, stop, n_query).reshape(-1, 1)
# COMPLETE ME #
# Compute the mean and variances of the predictive
# distribution, given the query points `Xq`
mean, var = ...
# scatter plot of the training points
ax.scatter(model.X.value, model.Y.value, marker='x', color='k')
# predictive mean mu_*
ax.plot(Xq, mean, 'b', lw=2., label='$m(x)$')
# COMPLETE ME #
# Plot a shaded region that represents mu_* +- 2 std(f_*)
ax.fill_between(np.squeeze(Xq),
# mu_* - 2 std(f_*)
# mu_* + 2 std(f_*)
color='blue', alpha=.2)
# samples from the posterior distribution
ax.plot(Xq, np.squeeze(model.predict_f_samples(Xq, n_samples)).T,
'--', alpha=.8)
ax.legend()
### SOLUTION ###
def plot_posterior_predictive(model, start=0., stop=1., n_query=100,
n_samples=3, ax=None):
if ax is None:
ax = plt.gca()
Xq = np.linspace(start, stop, n_query).reshape(-1, 1)
mean, var = model.predict_y(Xq)
# scatter plot of the training points
ax.scatter(model.X.value, model.Y.value, marker='x', color='k')
# predictive mean mu_*
ax.plot(Xq, mean, 'b', lw=2., label='$m(x)$')
ax.fill_between(np.squeeze(Xq),
np.squeeze(mean - 2*np.sqrt(var)),
np.squeeze(mean + 2*np.sqrt(var)),
color='blue', alpha=.2)
# samples from the posterior distribution
ax.plot(Xq, np.squeeze(model.predict_f_samples(Xq, n_samples)).T,
'--', alpha=.8)
ax.legend()
# This is what the posterior predictive distribution looks like under our GP model. It may seem that our model doesn't yield a great fit. The function is smooth and doesn't interpolate the observed values very well. Furthermore, there is a high degree of uncertainty at the observed points. If we believe our prior and our likelihood models then this is the right answer. However, as we shall see below, we can also estimate the model hyperparameters from the data.
# +
fig, ax = plt.subplots(figsize=(6, 4))
plot_posterior_predictive(m, ax=ax)
plt.show()
# -
# Similar to what we did with the prior distribution, we can visualize the effects of varying the length scale parameter $\ell$.
# +
fig, axes = plt.subplots(nrows=3, figsize=(6, 8))
# subplot_kw=dict(xticks=[], yticks=[]))
fig.tight_layout()
for i, ax in enumerate(axes):
len_scale = .05 * (i + 1)
k = gpflow.kernels.RBF(1, lengthscales=len_scale)
m = gpflow.gpr.GPR(X, Y, kern=k)
m.likelihood.variance = .1
ax.set_title('$\ell={{{:.2f}}}$'.format(len_scale))
plot_posterior_predictive(m, ax=ax)
plt.show()
# -
# ### Hyperparameter Estimation
# To find the kernel parameters that result in the best fit, it is prohibitively slow to exhaustively search over a discrete grid of values to minimize some validation loss. Instead, we resort to an empirical Bayes approach (also known as type II maximum likelihood) where we maximize the *marginal likelihood*. This is much faster and amenable to gradient-based optimization methods.
#
# GPflow implements this in the method `m.optimize` and uses L-BFGS-B from SciPy as the underlying gradient-based optimization procedure. First, we initialize our GP model again an display its parameters:
k = gpflow.kernels.RBF(1, lengthscales=.3)
m = gpflow.gpr.GPR(X, Y, kern=k)
m.likelihood.variance = 0.01
m
# Now let us optimize these parameters and display the full predictive distribution again.
m.optimize()
m
# We see that this model fits the data much better than our initial model. It interpolates the observed values well and has the highest degree of uncertainty at points furthest away from the observed datapoints.
# +
fig, ax = plt.subplots(figsize=(6, 4))
plot_posterior_predictive(m, ax=ax)
plt.show()
# -
# ### Other covariance functions
# As we mentioned early, GPFlow implements a number of common covariance functions. Let us experiment with the following subset of them.
kernels = [
gpflow.kernels.Matern12(1),
gpflow.kernels.Matern32(1),
gpflow.kernels.Matern52(1),
gpflow.kernels.RBF(1),
]
# #### Exercise
#
# As we did for the squared-exponential (RBF) kernel, draw and visualize 3 samples from the GP priors specified by the kernels in the list defined above.
# +
### EXERCISE ###
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6, 6),
subplot_kw=dict(xticks=[], yticks=[]))
fig.tight_layout()
for ax, k in zip(axes.flat, kernels):
### COMPLETE ME ###
ax.set_title(k.__class__.__name__)
plt.show()
# +
### SOLUTION ###
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6, 6),
subplot_kw=dict(xticks=[], yticks=[]))
fig.tight_layout()
for ax, k in zip(axes.flat, kernels):
ax.set_title(k.__class__.__name__)
ax.plot(Xq, prior_samples(Xq, k))
plt.show()
# -
# #### Exercise
#
# For each of the kernels in the list defined above, fit the hyperparameters to the training set and plot the full posterior predictive distribution as we did before for the squared-exponential kernel.
#
# Note that the log marginal likelihood is non-convex so a sensible initialization of the hyperparameters is crucial to avoid getting stuck in a local optimum.
# +
### EXERCISE ###
fig, axes = plt.subplots(nrows=4, figsize=(6, 10))
fig.tight_layout()
for ax, k in zip(axes, kernels):
m = gpflow.gpr.GPR(X, Y, kern=k)
### COMPLETE ME ###
ax.set_title(
('{k} ($\ell={{{l[0]:.2f}}},'
' \sigma_f={{{sf[0]:.2f}}},'
' \sigma_y={{{sy[0]:.2f}}}$)').format(k=k.__class__.__name__,
l=m.kern.lengthscales.value,
sf=m.kern.variance.value,
sy=m.likelihood.variance.value))
plot_posterior_predictive(m, ax=ax)
plt.show()
# +
### SOLUTION ###
fig, axes = plt.subplots(nrows=4, figsize=(6, 10))
fig.tight_layout()
for ax, k in zip(axes, kernels):
m = gpflow.gpr.GPR(X, Y, kern=k)
m.likelihood.variance = .01
m.kern.lengthscales = .1
m.optimize()
ax.set_title(
('{k} ($\ell={{{l[0]:.2f}}},'
' \sigma_f={{{sf[0]:.2f}}},'
' \sigma_y={{{sy[0]:.2f}}}$)').format(k=k.__class__.__name__,
l=m.kern.lengthscales.value,
sf=m.kern.variance.value,
sy=m.likelihood.variance.value))
plot_posterior_predictive(m, ax=ax)
plt.show()
# -
# ### Real-world Dataset: Boston Housing Prices
# Finally, we apply our GP regression model to the Boston housing prices dataset.
boston = load_boston()
# The dataset has 506 datapoints, with 13 continuous/categorical features and a single target, the median property value. You can view a full description of the dataset and its features:
Markdown(boston.DESCR)
# We can load this dataset into a Pandas DataFrame and view some of its summary statistics.
boston_df = pd.DataFrame(boston.data, columns=boston.feature_names)
boston_df['MEDV'] = pd.Series(boston.target)
boston_df.describe()
# For example, we can see how the median property value (`MEDV`) varies with the average number of rooms per dwelling (`RM`).
# +
fig, ax = plt.subplots()
boston_df.plot.scatter(x='RM', y='MEDV', alpha=.8, ax=ax)
plt.show()
# -
# ### Automatic Relevance Determination (ARD)
# Now let us fit a GP regression model to this dataset. We consider the squared-exponential covariance function as before, except now, we use the *anisotropic* variant of the kernel. That is, we consider a length scale *vector* of 13 positive values.
#
# These hyperparameter values determine how far you need to move along a particular axis in the input space for the function values to become uncorrelated. By estimating these values we effectively implement automatic relevance determination, as the inverse of the length scale determines the relevance of the dimension. If the length scale is very large, the covariance will practically become independence of that input, and effectively remove it from the inference (GPML $\S$5.1 Rasmussen & Williams, 2006).
X, y = shuffle(normalize(boston.data), # scaling individual samples to have unit norm
np.atleast_2d(boston.target).T, # gpflow labels must be 2D
random_state=rng)
# In GPFlow, we can easily obtain an anisotropic kernel by specifying `ARD=True`.
k = gpflow.kernels.RBF(13, ARD=True)
m = gpflow.gpr.GPR(X, y, k)
m.likelihood.variance = .1
m.optimize()
m
# Let's create a Pandas Series to hold the length scales for ease of visualization and manipulation.
s = pd.Series(m.kern.lengthscales.value, index=boston.feature_names)
s
# We display the bar chart of the length scales corresponding to each dimension.
# +
fig, ax = plt.subplots()
s.plot.bar(ax=ax)
ax.set_ylabel('$\ell_i$')
plt.show()
# -
# Generating the scatter plot with respect to the feature that has the smallest length scale, we find that it is indeed highly correlated with the median property value.
# +
fig, ax = plt.subplots()
boston_df.plot.scatter(x=s.argmin(), y='MEDV', alpha=.8, ax=ax)
plt.show()
# -
# And vice versa for the feature with the largest length scale.
# +
fig, ax = plt.subplots()
boston_df.plot.scatter(x=s.argmax(), y='MEDV', alpha=.8, ax=ax)
plt.show()
# -
| scratch/COMP9418_W09_Gaussian_Process_Regression_Questions_with_Answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''nassy-hasler-vBIYXc2u'': pipenv)'
# name: python3
# ---
# This notebook splits entire recording into tasks and excludes all data not beeing baseline or task.
#
# Required data can be found smb://nas-weber01.unisg.ch/data/Nassy/03_Online_Model/data
#
# Make sure to create the folder splilt
# +
import pandas as pd
from os import walk
pd.set_option('display.max_rows', 1000)
# +
_, _, file_names = next(walk('./data'))
files = [(name.split('_')[0], name) for name in file_names]
files
# +
tasks = [
('t1', 't1_gsr', 't1_eclipse (drawing_a_1.java)'),
('t2', 't2_gsr', 't2_eclipse (drawing_a_2.java)'),
('t3', 't3_gsr', 't3_eclipse (drawing_b_1.java)'),
('t4', 't4_gsr', 't4_eclipse (drawing_b_2.java)'),
('t5', 't5_gsr', 't5_eclipse (drawing_b_3.java)'),
('t6', 't6_gsr', 't6_eclipse (drawing_b_6.java)'),
('t7', 't7_gsr', 't7_eclipse (drawing_b_9.java)'),
('t8', 't8_gsr', 't8_eclipse (drawing_b_11.java)'),
]
tasks
# +
for file_name in files:
print(file_name)
df = pd.read_csv(f'./data/{file_name[1]}', comment='#')
df.set_index(pd.TimedeltaIndex(df['Timestamp'].values, unit="ms"), inplace=True)
et = df[['SourceStimuliName', 'SlideEvent', 'ET_PupilRight', 'ET_PupilLeft', 'ET_DistanceRight', 'ET_DistanceLeft', 'ET_ValidityLeft', 'ET_ValidityRight', 'Path', 'Line', 'Col', 'ET_GazeLeftx', 'ET_GazeLefty', 'ET_GazeRightx', 'ET_GazeRighty', "Timestamp", "ET_TimeSignal"]]
stimuli = et[['SourceStimuliName', 'SlideEvent']].fillna('')
for task in tasks:
print(task)
t1_gsr_start_idx = stimuli[stimuli['SlideEvent'] == 'StartMedia']['SourceStimuliName'].eq(task[1]).idxmax()
t1_gsr_end_idx = stimuli[stimuli['SlideEvent'] == 'EndMedia']['SourceStimuliName'].eq(task[1]).idxmax()
t1_gsr = et[(et.index > t1_gsr_start_idx) & (et.index < t1_gsr_end_idx)]
t1_gsr['ET_PupilRight'] = t1_gsr['ET_PupilRight'].replace(-1, pd.np.nan)
t1_gsr['ET_PupilLeft'] = t1_gsr['ET_PupilLeft'].replace(-1, pd.np.nan)
t1_gsr['ET_GazeLeftx'] = t1_gsr['ET_GazeLeftx'].replace(-1, pd.np.nan)
t1_gsr['ET_GazeLefty'] = t1_gsr['ET_GazeLefty'].replace(-1, pd.np.nan)
t1_gsr['ET_GazeRightx'] = t1_gsr['ET_GazeRightx'].replace(-1, pd.np.nan)
t1_gsr['ET_GazeRighty'] = t1_gsr['ET_GazeRighty'].replace(-1, pd.np.nan)
t1_gsr['Timestamp'] = t1_gsr.apply(lambda x: int(x['Timestamp'] * 10000000), axis=1)
t1_gsr['type'] = 'B'
t1_eclipse_start_idx = stimuli[stimuli['SlideEvent'] == 'StartMedia']['SourceStimuliName'].eq(task[1]).idxmax()
t1_eclipse_end_idx = stimuli[stimuli['SlideEvent'] == 'EndMedia']['SourceStimuliName'].eq(task[2]).idxmax()
t1_eclipse = et[(et.index > t1_eclipse_start_idx) & (et.index < t1_eclipse_end_idx)]
t1_eclipse['ET_PupilRight'] = t1_eclipse['ET_PupilRight'].replace(-1, pd.np.nan)
t1_eclipse['ET_PupilLeft'] = t1_eclipse['ET_PupilLeft'].replace(-1, pd.np.nan)
t1_eclipse['ET_GazeLeftx'] = t1_eclipse['ET_GazeLeftx'].replace(-1, pd.np.nan)
t1_eclipse['ET_GazeLefty'] = t1_eclipse['ET_GazeLefty'].replace(-1, pd.np.nan)
t1_eclipse['ET_GazeRightx'] = t1_eclipse['ET_GazeRightx'].replace(-1, pd.np.nan)
t1_eclipse['ET_GazeRighty'] = t1_eclipse['ET_GazeRighty'].replace(-1, pd.np.nan)
t1_eclipse['Timestamp'] = t1_eclipse.apply(lambda x: int(x['Timestamp'] * 10000000), axis=1)
t1_eclipse['type'] = 'M'
concat = pd.concat([t1_gsr, t1_eclipse])
concat['subject'] = file_name[0]
concat['task'] = task[0]
concat.to_csv(f'split/{file_name[0]}_{task[0]}.csv', index=False, header=True)
# -
| model/split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (oat-use)
# language: python
# name: oat-use
# ---
# %run "Common setup.ipynb"
# %reload_ext autoreload
# %autoreload 2
from SALib.analyze.delta import analyze
def incremental_delta_analysis(lower, upper, step=1):
res, idx = [], []
for reps in range(lower, upper, step):
try:
results = analyze(CIM_SPEC, numeric_vals[:reps], np_res[:reps], seed=101)
except np.linalg.LinAlgError:
res.append(np.nan)
idx.append(reps)
continue
total = results.to_df()
res.append(total.loc[tgt_param, 'S1'])
idx.append(reps)
# End for
return res, idx
# End incremental_delta_analysis()
numeric_samples = pd.read_csv(f'{DATA_DIR}moat_10_samples.csv', index_col=0)
numeric_samples = numeric_samples[perturbed_cols]
numeric_vals = numeric_samples.values
# +
# Coupling disabled
# DMIM does not work when there is no change in parameter values
moat_no_irrigation_results = pd.read_csv(f'{DATA_DIR}moat_no_irrigation_10_results.csv', index_col=0)
moat_no_irrigation_results['Avg. $/ML'].fillna(moat_no_irrigation_results["Avg. Annual Profit ($M)"], inplace=True)
np_res = moat_no_irrigation_results.loc[:, tgt_metric].values
runs = np_res.shape[0]
res, idx = incremental_delta_analysis(54, runs+1, 54)
# -
disabled = pd.DataFrame({'S1': res}, index=idx)
# +
# Coupling enabled
moat_with_irrigation_results = pd.read_csv(f'{DATA_DIR}moat_with_irrigation_10_results.csv', index_col=0)
moat_with_irrigation_results['Avg. $/ML'].fillna(moat_with_irrigation_results["Avg. Annual Profit ($M)"], inplace=True)
np_res = moat_with_irrigation_results.loc[:, tgt_metric].values
runs = np_res.shape[0]
res, idx = incremental_delta_analysis(54, runs+1, 54)
# -
enabled = pd.DataFrame({'S1': res}, index=idx)
# +
fig, axes = plt.subplots(1,2, figsize=(12,4), sharey=True, sharex=True)
disabled.loc[:, 'S1'].plot(kind='bar',
legend=None,
title='Disabled',
ax=axes[0],
use_index=True,
rot=45,
width=0.8,
edgecolor='C0')
enabled.loc[:, 'S1'].plot(kind='bar',
legend=None,
title='Enabled',
ax=axes[1],
use_index=True,
rot=45,
width=0.8,
edgecolor='C0').legend(
bbox_to_anchor=(1.35, 0.65)
)
fig.suptitle("DMIM Analysis\non Morris Samples", x=0.5, y=1.05, fontsize=22)
plt.xlabel("$N$", x=-0.1, labelpad=15);
# -
fig.savefig(FIG_DIR+'DMIM_morris_larger_sample.png', dpi=300, bbox_inches='tight')
| notebooks/6b DMIM Analysis on Morris Samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this lab, we will
# - read our project data into a Pandas DataFrame
# - write a function to compute simple features for each row of the data frame
# - fit a LogisticRegression model to the data
# - print the top coefficients
# - compute measures of accuracy
#
# I've given you starter code below. You should:
# - First, try to get it to work with your data. It may require changing the load_data file to match the requirements of your data (e.g., what is the object you are classifying -- a tweet, a user, a news article?)
# - Second, you should add additional features to the make_features function:
# - Be creative. It could be additional word features, or other meta data about the user, date, etc.
# - As you try out different feature combinations, print out the coefficients and accuracy scores
# - List any features that seem to improve accuracy. Why do you think that is?
from collections import Counter
import numpy as np
import pandas as pd
import re
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction import DictVectorizer
# +
def load_data(datafile, checkfacts):
"""
Read your data into a single pandas dataframe where
- each row is an instance to be classified
(this could be a tweet, user, or news article, depending on your project)
- there is a column called `label` which stores the class label (e.g., the true
category for this row)
"""
df = pd.read_csv(datafile,dtype={'social_id': str })[['social_id','comment_tokens']]
cf = pd.read_csv(checkfacts,dtype={'social_id': str })[['site','social_id','ruling']]
df = pd.read_csv(datafile)[['social_id','comment_tokens']]
cf = pd.read_csv(checkfacts)[['site','social_id','ruling']]
cf = cf[cf['site'] == 'facebook']
df.columns = ['item_id', 'content']
cf.columns = ['site','item_id','label']
df = df.drop_duplicates(['item_id','content'])
df = pd.merge(cf,df,on=['item_id'],how = 'inner')
df.loc[df['label'] =='False','label']='FALSE'
df.loc[df['label'] =='True','label']='TRUE'
df.loc[df['label'] =='Mostly False','label']='MOSTLY FALSE'
df.loc[df['label'] =='Mostly True','label']='MOSTLY TRUE'
return df
df = load_data('..\\..\\training_data\\facebook.csv.gz', '..\\..\\training_data\\factchecks.csv')
df.head()
# -
# what is the distribution over class labels?
print(df.label.value_counts())
print()
# Percentage statistics
print(df.label.value_counts(normalize=True))
# +
def make_features(df):
vec = DictVectorizer()
feature_dicts = []
# just as an initial example, we will consider three
# word features in the model.
words_to_track = ['certainly', 'impossible']
for i, row in df.iterrows():
features = {}
token_counts = Counter(re.sub('\W+', ' ', row['content'].lower()).split())
for w in words_to_track:
features[w] = token_counts[w]
feature_dicts.append(features)
X = vec.fit_transform(feature_dicts)
return X, vec
df['content'] = df['content'].astype(str)
X, vec = make_features(df)
# -
# what are dimensions of the feature matrix?
X.shape
# what are the feature names?
# vocabulary_ is a dict from feature name to column index
vec.vocabulary_
# how often does each word occur?
for word, idx in vec.vocabulary_.items():
print('%20s\t%d' % (word, X[:,idx].sum()))
# can also get a simple list of feature names:
vec.get_feature_names()
# e.g., first column is 'hate', second is 'love', etc.
# we'll first store the classes separately in a numpy array
y = np.array(df.label)
Counter(y)
# to find the row indices with FALSE label
np.where(y=='FALSE')[0]
# store the class names
class_names = set(df.label)
# how often does each word appear in each class?
for word, idx in vec.vocabulary_.items():
for class_name in class_names:
class_idx = np.where(y==class_name)[0]
print('%20s\t%20s\t%d' % (word, class_name, X[class_idx, idx].sum()))
# fit a LogisticRegression classifier.
clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf.fit(X, y)
# for binary classification, LogisticRegression stores a single coefficient vector
clf.coef_
# this would be a matrix for a multi-class probem.
# for binary classification, the coefficients for the negative class is just the negative of the positive class.
coef = -clf.coef_
print(coef)
for ci, class_name in enumerate(clf.classes_):
print('coefficients for %s' % class_name)
display(pd.DataFrame([coef[ci]], columns=vec.get_feature_names()))
# sort coefficients by class.
features = vec.get_feature_names()
for ci, class_name in enumerate(clf.classes_):
print('top features for class %s' % class_name)
for fi in coef[ci].argsort()[::-1]: # descending order.
print('%20s\t%.2f' % (features[fi], coef[ci][fi]))
# +
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
kf = KFold(n_splits=5, shuffle=True, random_state=42)
accuracies = []
for train, test in kf.split(X):
clf.fit(X[train], y[train])
pred = clf.predict(X[test])
accuracies.append(accuracy_score(y[test], pred))
print('accuracy over all cross-validation folds: %s' % str(accuracies))
print('mean=%.2f std=%.2f' % (np.mean(accuracies), np.std(accuracies)))
# -
| notebooks/W2L1_Zhicheng_Liu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/frahlg/MECH550N/blob/master/import_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={} colab_type="code" id="I4sphozHkPWB"
import pandas as pd
# + colab={} colab_type="code" id="NYrl50P3pZEm"
df = pd.read_excel('https://github.com/frahlg/MECH550N/blob/master/Data_analysis/data/dataset1/out_0.xlsx?raw=true', index_col=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 878} colab_type="code" id="m7zmdEPUpZMk" outputId="b6a5c1d4-748c-4cbd-fea2-08b02f29e57b"
df
# + colab={} colab_type="code" id="88_r8uJqpZPP"
# + [markdown] colab_type="text" id="l_7MQLRwkp9Y"
#
| notebooks/create_db/import_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# ### Ideas:
# 1. initalizing guess:
# A. rejection sampling relative to pdf
# B. convultion prior to rejection sampling
# 2. issues:
# A. x space should extend beyond possible means
# B. extension should be by some sigma
# +
#Special Ditribution Function
#'v' : array float: mu center of normal
#'s' : float: sigma standard deviation
def pdf(x,v,s):
dist = 0
num = len(v)
for m in v:
dist = dist + 1/(np.sqrt(2*np.pi)*s)*np.exp(-.5*((x-m)/s)**2)
return dist/num
def Pdf(x,m,s):
return 1/(np.sqrt(2*np.pi)*s)*np.exp(-.5*((x-m)/s)**2)
# +
#Expectation Maximization Algorithm 1D using pdf
#Assumption: variance of all normals is the same
#INPUTS:
# 'x' array : positions for probability density values
# 'p': array : probabilty density values over 'x'
# 'guess': array :initial 'k' guesses of centers of normals
# 'ite': int : number of iterations for EM
#OUTPUTS:
# 'mu' : array : values of centers of normal
def EM(x,p,guess,ite):
k = len(guess)
n = len(x)
#likelihood function (includes prior)
#alt: prior = could np.sum(p*likeli)/np.sum(p)
likeli = np.zeros((k,n))
for j in range(ite):
for i in range(k):
likeli[i,:] = p*Pdf(x,guess[i],1)
norm = np.sum(likeli, axis = 0)
likeli = likeli/norm
#weighted center of mass given likeli with prior
guess = np.nansum(likeli*x*p, axis = 1)/np.nansum(likeli*p, axis = 1)
mu = guess
return mu
#Includes graphical output
def EM_graph(x,p,guess,ite):
k = len(guess)
n = len(x)
plt.title('Intial distibution with guesses')
plt.plot(x,p)
plt.vlines(guess, 0, np.max(p)*1.02, 'b')
plt.show()
#likelihood function (includes prior)
#alt: prior = could np.sum(p*likeli)/np.sum(p)
likeli = np.zeros((k,n))
plt.title('Probability within set and sequence of centers')
plt.plot(x,p)
plt.vlines(guess, 0, np.max(p)*1.02, 'b')
for j in range(ite):
for i in range(k):
likeli[i,:] = p*Pdf(x,guess[i],1)
norm = np.sum(likeli, axis = 0)
likeli = likeli/norm
#weighted center of mass given likeli with prior
guess = np.nansum(likeli*x*p, axis = 1)/np.nansum(likeli*p, axis = 1)
#plt.vlines(guess, 0, np.max(p)*1.02, 'r')
for l in range(k):
plt.plot(x,likeli[l])
plt.show()
mu = guess
return mu
# +
#Chooses BEST of a number of randomly intialized
#Expectation Maximization Algorithm (1D using pdf)
#Assumption: variance of all normals is the same
#INPUTS:
# 'x' array : positions for probability density values
# 'p': array : probabilty density values over 'x'
# 'k': int : number of centers to find
# 'ite': int : number of subiterations for EM
# 'num': int : number of EM runs in set to choose best from
#OUTPUTS:
# 'm' : array : best from set of values for centers of normal
# 'l' : float : L_2 loss on returned solution
def best_EM(x,p,k,ite,num,dist = False, B = 0):
#matrix with rows of centers after each individual EM run
M = np.empty((num,k))
#L_2 loss of output
L = np.empty(num)
for j in range(num):
g = B + np.random.rand(k)*(100-B)
m = EM(x,p,g,ite)
P = pdf(x,m,s)
M[j,:] = m
L[j] = np.mean((p-P)**2)
ind = np.argmin(L)
if dist == True:
return L
else:
return M[ind], L[ind]
#OUTPUTS:
# 'M' : matrix (num,k) : num many soltions of size k
# 'L' : array : L_2 loss of each row of solutions
def list_EM(x,p,k,ite,num):
#matrix with rows of centers after each individual EM run
M = np.empty((num,k))
#L_2 loss of output
L = np.empty(num)
for j in range(num):
g = np.random.rand(k)*100
m = EM(x,p,g,ite)
P = pdf(x,m,s)
M[j,:] = m
L[j] = np.mean((p-P)**2)
return M, L
# +
#number of normals
k = 10
#lower bound for possible centers
L = 0
#simulating true centers
grades = L + np.random.rand(k)*(100-L)
#number of points to simulate distribution
n = 10000
#variance of simulated normals
s = 1.8
#discrete positions
x = np.linspace(0,100,n)
#pdf values at positions
p = pdf(x,grades,s)
plt.title('Simulated Distribution')
plt.plot(x,p)
plt.show()
#number of sub steps
I = 20
#number of solutions
N = 1000
#Pull a list of soultions and losses
M, L = list_EM(x,p,k,I,N)
plt.title('Distribution of Quality of Solutions')
plt.hist(L, bins = 3*int(np.sqrt(N)))
plt.show()
#Best Solution Graph
ind = np.argmin(L)
m = M[ind]
P = pdf(x,m,s)
plt.title('Best Solution Graph')
plt.plot(x,p)
plt.plot(x,P)
#plt.scatter(grades, np.zeros(k))
#plt.scatter(m, np.zeros(k), color = 'r')
plt.show()
print('Best Loss:',l)
# #Simulation of solutions
# for q in range(10):
# g = np.random.rand(k)*100
# #print(np.sort(EM_graph(x,p,g,200)))
# #print(np.sort(grades))
# m = EM(x,p,g,800)
# P = pdf(x,m,s)
# plt.plot(x,p)
# plt.plot(x,P)
# plt.scatter(grades, np.zeros(k))
# plt.scatter(g, np.zeros(k))
# plt.scatter(m, np.zeros(k), color = 'r')
# plt.show()
# print(np.mean((p-P)**2))
# #print(np.sum(pdf(x,EM(x,p,g,200),s))*dx,np.sum(p)*dx)
# +
#Convolution
g_student = 100*np.array([0.66413662, 0.76660342, 0.86622391, 0.88045541, 0.82163188,
0.9259962 , 0.91461101, 0.88614801, 0.85768501, 1.00948767,
0.79696395, 0.74003795, 0.83586338, 0.86907021])
k = len(g_student)
y = pdf(x,g_student,1.5)
#Deconvolution
m, l = best_EM(x,y,k,20,400, False, B = 60)
Y = pdf(x,m,1.5)
#Graph
plt.plot(x,y)
plt.plot(x,Y)
plt.title('Best Solution Graph')
plt.show()
print(l)
# -
print(np.sort(m))
print(np.sort(g_student))
dx = 100/n
dx
np.sum(y)*dx
| EM_algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# [View in Colaboratory](https://colab.research.google.com/github/sonukick/backtests/blob/master/gap_research_metrics.ipynb)
# + [markdown] id="gfaHkGOKah0j" colab_type="text"
# ### Back test results
# ### Strategy: Gap Strategy
#
# Market: NSE-India
#
# Data: Qunadl
#
# Duration: 2012-01-01 to 2018-08-17
#
# Segment: Stocks in NIFTY50 as of 2018-08-17
#
# ---
#
#
#
# Rules:
#
# 1. If Gap Down more than 2% buy at opening
# 2. If Gap up more than 2% short at opening
# Returns: If gap is covered assumed day return = gap
# If gap is not covered wait till EOD
# **Prices are taken at Open/Close
#
#
# ---
#
#
# * Maximum drawdown : 13.6%
# * maximum intraday day loss: 9%
# * maximum intraday profit : 19.8%
#
#
# ***Total returns : 591.21%**
#
# CAGR: 47.20%
#
#
# ***Future developments:
# extending to F&O stocks
#
# changing the exit strategy: instead of holding till EOD, exiting at predetermined stoploss***
#
#
#
#
# + id="vNFrvx8HCMVR" colab_type="code" colab={}
# !pip install openpyxl
# !pip install pydrive
# !pip install quandl
import quandl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import io
import seaborn as sns
import matplotlib.pyplot as plt
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
file_list = drive.ListFile({'q': "'1P1c2VCcaHquJC0Fp5yfeiZjgqwNZlBEb' in parents and trashed=false"}).GetList()
for file1 in file_list:
print('title: %s, id: %s' % (file1['title'], file1['id']))
data = drive.CreateFile({'id': '1cqSAgTq8Q7a-YLDoEwW312aAxZHY3MuG'})
data.GetContentFile('ind_nifty50list.csv')
api_key = '<KEY>'
quandl.ApiConfig.api_key = api_key
#from google.colab import files
#uploaded = files.upload()
#symbols = pd.read_csv(io.StringIO(uploaded['ind_nifty50list.csv'].decode('utf-8')))
symbols = pd.read_csv('ind_nifty50list.csv')
symbols = symbols['Symbol']
symbols[0]
gaps_list = []
gaps_list.append(('Date','symbol','Open','prv_Close','Close','High','Low','Gap'))
for j in symbols:
print(j)
try:
data = quandl.get('NSE/'+str(j), start_date='2012-01-1', end_date='2018-08-20')
except:
continue
for i in range(len(data.Close)-1):
if data.Open[i+1]<data.Close[i]*0.98:
gaps_list.append((data.index[i+1],j,data.Open[i+1],data.Close[i],data.Close[i+1],data.High[i+1],data.Low[i+1],np.round((data.Open[i+1]/data.Close[i])-1,3)))
if data.Open[i+1]>data.Close[i]*1.02:
gaps_list.append((data.index[i+1],j,data.Open[i+1],data.Close[i],data.Close[i+1],data.High[i+1],data.Low[i+1],np.round((data.Open[i+1]/data.Close[i])-1,3)))
# + id="y3XmIxGHgKDZ" colab_type="code" colab={}
df = pd.DataFrame(gaps_list)
# + id="HshwFR8PKpKy" colab_type="code" colab={}
df.columns = df.iloc[0]
df = df.iloc[1:]
covered_list = []
gap_left = 0
for i in range(1,len(df.Gap)+1):
if df.Gap[i]<0:
if df.High[i] >= df.prv_Close[i]:
gap_left = 0
covered_list.append(gap_left)
else:
gap_left = np.round(df.High[i]/df.prv_Close[i]-1,3)
covered_list.append(gap_left)
if df.Gap[i]>0:
if df.Low[i] <= df.prv_Close[i]:
gap_left = 0
covered_list.append(gap_left)
else:
gap_left = np.round(df.Low[i]/df.prv_Close[i]-1,3)
covered_list.append(gap_left)
# + id="gC-yysJ_T54i" colab_type="code" colab={}
return_list = []
return1 = 0
for i in range(1,len(df.Gap)+1):
if df.Gap[i]<0:
if df.High[i] >= df.prv_Close[i]:
return1 = abs(df.Gap[i])
return_list.append(return1)
else:
return1 = np.round(df.Close[i]/df.Open[i]-1,3)
return_list.append(return1)
if df.Gap[i]>0:
if df.Low[i] <= df.prv_Close[i]:
return1 = abs(df.Gap[i])
return_list.append(return1)
else:
return1 = np.round(1-(df.Close[i]/df.Open[i]),3)
return_list.append(return1)
# + id="TfSpieqxDMYX" colab_type="code" colab={}
df['gap_left'] = covered_list
df.Gap = df.Gap*100
df.gap_left = df.gap_left*100
df['percent_left'] = 100*df.gap_left/df.Gap
df['returns'] = return_list
df.returns = df.returns*100
# + id="x29LTNGKVo4f" colab_type="code" colab={}
frequency = pd.cut(df.percent_left,bins = range(-1,110,11))
sns.countplot(frequency)
#total gaps
print(len(df))
#no of gaps with 100% covered
gap_stats = df.percent_left.value_counts()
# + id="8dgw07blf_Eq" colab_type="code" colab={}
#returns
dates = list(set(df.Date))
returns = 0
dates[1]
returns_data= []
daily_ret = 0
for i in dates:
x = df[df.Date==i]
daily_ret = np.round(np.average(x['returns']),2)
returns_data.append((i,daily_ret))
returns_by_date = pd.DataFrame(returns_data)
returns_by_date.index = returns_by_date[0]
returns_by_date.drop(columns=0,inplace=True)
returns_by_date['year'] = returns_by_date.index.year
# + id="3u2bQTJDcEGL" colab_type="code" colab={}
cumulative = np.cumsum(returns_by_date[1])
cumulative = list(cumulative)
plt.title('Cumulative Returns')
plt.plot(cumulative)
plt.show()
xs = cumulative
i = np.argmax(np.maximum.accumulate(xs) - xs) # end of the period
j = np.argmax(xs[:i]) # start of period
print(cumulative[i],cumulative[j])
plt.plot(xs)
plt.plot([i, j], [xs[i], xs[j]], 'o', color='Red', markersize=5)
# + id="VKuwCaTeYAJK" colab_type="code" colab={}
'''df.to_excel('gaps.xlsx')
from google.colab import files
files.download('gaps.xlsx')'''
# + id="T7mJ1nm2alFY" colab_type="code" colab={}
'''returns_by_date.to_excel('returns_by_date.xlsx')
from google.colab import files
files.download('returns_by_date.xlsx')'''
# + id="Q4srswesF7ZN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 5010} outputId="51fcb9f4-7e10-477b-c1af-cf882fe1a02b"
import pyfolio as pf
pf.create_full_tear_sheet(returns_by_date[1]/100)
# + id="wMv_HC4JFnH_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1109} outputId="f73573dc-b9e6-47de-b9cb-634f852f8428"
returns_by_date[1] =
# + id="TBVbzS5YbOUl" colab_type="code" colab={}
| gap_research_metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
'''
For SLP visualization.
'''
from data.SLP_RD import SLP_RD
import opt
from utils import vis
# + pycharm={"is_executing": false, "name": "#%%\n"}
opts = opt.parseArgs()
opts = opt.aug_opts(opts)
SLP_rd = SLP_RD(opts, phase='test')
idx = 0
arr, jt, bb = SLP_rd.get_array_joints(idx_smpl=idx)
ptc = SLP_rd.get_ptc(idx=idx) # all pixel
arr_IR2depth = SLP_rd.get_array_A2B(idx=idx, modA='IR', modB='depthRaw')
arr_PM2depth = SLP_rd.get_array_A2B(idx=idx, modA='PM', modB='depthRaw')
# + pycharm={"is_executing": false, "name": "#%%\n"}
# test the plotly
eye = [1.0, -1.25, 0.7]
# vis.vis_IR_D_PM(arr, arr_IR2depth, arr_PM2depth, eye=eye, pth='tmp/demo_SLP.png') #for save out
vis.vis_IR_D_PM(arr, arr_IR2depth, arr_PM2depth, eye=eye)
| eg_vis_SLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="g-atf3gekcgR"
# # Assessment 1: I can train and deploy a neural network
# + [markdown] colab_type="text" id="_7wkT17FkmU6"
# At this point, you've worked through a full deep learning workflow. You've loaded a dataset, trained a model, and deployed your model into a simple application. Validate your learning by attempting to replicate that workflow with a new problem.
#
# We've included a dataset which consists of two classes:
#
# 1) Face: Contains images which include the face of a whale
# 2) Not Face: Contains images which do not include the face of a whale.
#
# The dataset is located at ```/dli/data/whale/data/train```.
#
# Your challenge is:
#
# 1) Use [DIGITS](/digits) to train a model to identify *new* whale faces with an accuracy of more than 80%.
#
# 2) Deploy your model by modifying and saving the python application [submission.py](../../../../edit/tasks/task-assessment/task/submission.py) to return the word "whale" if the image contains a whale's face and "not whale" if the image does not.
#
# Resources:
#
# 1) [Train a model](../../task1/task/Train%20a%20Model.ipynb)
# 2) [New Data as a goal](../../task2/task/New%20Data%20as%20a%20Goal.ipynb)
# 3) [Deployment](../../task3/task/Deployment.ipynb)
#
# Suggestions:
#
# - Use empty code blocks to find out any informantion necessary to solve this problem: eg: ```!ls [directorypath] prints the files in a given directory```
# - Executing the first two cells below will run your python script with test images, the first should return "whale" and the second should return "not whale"
# + [markdown] colab_type="text" id="YaaY1Vb3o3mC"
# Start in [DIGITS](/digits/).
# -
# !python submission.py '/dli/data/whale/data/train/face/w_8519.jpg' #This should return "whale" at the very bottom
# !python submission.py '/dli/data/whale/data/train/not_face/w_8207.jpg' #This should return "not whale" at the very bottom
# !ls /dli/data/whale/data/train/face/
# !ls /dli/data/whale/data/train/not_face/
# !ls /dli/data/digits/20200325-141910-d2f4/
# !ls /dli/data/digits/20200325-141910-d2f4/dep*
# !ls /dli/data/digits/20200325-141910-d2f4/*.caffemodel
# 100 epochs
# !ls /dli/data/digits/20200325-142619-1525/
# !ls /dli/data/digits/20200325-142619-1525/deploy.prototxt
# !ls /dli/data/digits/20200325-142619-1525/snapshot_iter_5400.caffemodel
# !ls /dli/data/digits/20200325-141614-588c/
# !ls /dli/data/digits/20200325-141614-588c/mean*
| NVIDIA Training/final_project_whale_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from konlpy.tag import Okt
from gensim.models.word2vec import Word2Vec
okt = Okt()
f = open(r'C:\dataset.txt', encoding = "utf8")
i = 0
result = []
while True:
line = f.readline()
if not line: break
i = i+1
if i % 5000 == 0:
print("%d번째 줄 진행 중."%i)
tokenlist = okt.pos(line, stem = True, norm = True)
temp = []
for word in tokenlist:
if word[1] in ["Noun", "Verb"]:
temp.append((word[0]))
if temp:
result.append(temp)
f.close()
# -
print('추출된 데이터: {}'.format(len(result)))
model = Word2Vec(result, size = 100, window=5, min_count=2, workers=4, sg=0, iter = 200)
model_result = model.wv.most_similar("한국")
print(model_result)
| word2vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from os import listdir, makedirs
from os.path import join, exists, expanduser
from tqdm import tqdm
import random
import numpy as np
import pandas as pd
import datetime as dt
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
# %matplotlib inline
import cv2
import keras
from keras.callbacks import TensorBoard
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from mpl_toolkits.axes_grid1 import ImageGrid
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.layers import Dense, GlobalAveragePooling2D
from keras import optimizers
from keras.optimizers import RMSprop
from keras.callbacks import ReduceLROnPlateau
from keras import backend as K
# from keras.applications import vgg16, resnet50, mobilenet
# from keras.applications.vgg16 import VGG16
# from keras.applications.resnet50 import ResNet50
# from keras.applications import xception
# from keras.applications import inception_v3
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.optimizers import SGD
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep')
# +
def show_pred(preds, Y, val_breed, index, seq, ran):
leng = len(preds)
if seq:
for i in range(index):
if ran:
index = random.randint(0, leng)
_, imagenet_class_name, prob = decode_predictions(preds, top=1)[index][0]
plt.title("Original: " + val_breed[Y[index]] + "\nPrediction: " + imagenet_class_name)
plt.imshow(X_train[index])
plt.show()
else:
_, imagenet_class_name, prob = decode_predictions(preds, top=1)[index][0]
plt.title("Original: " + val_breed[Y[index]] + "\nPrediction: " + imagenet_class_name)
plt.imshow(X_train[index])
plt.show()
def accuracy_func(preds, Y, val_breed):
leng = len(preds)
count = 0;
for i in range(leng):
_, imagenet_class_name, prob = decode_predictions(preds, top=1)[i][0]
if val_breed[Y[i]] == imagenet_class_name:
count+=1
accuracy = (count/leng)*100
print("Accuracy: ", accuracy)
return accuracy
# -
cache_dir = expanduser(join('~', '.keras'))
if not exists(cache_dir):
makedirs(cache_dir)
models_dir = join(cache_dir, 'models')
if not exists(models_dir):
makedirs(models_dir)
# +
# #Load the VGG model
# vgg_model = vgg16.VGG16(weights='imagenet')
# #Load the Inception_V3 model
# inception_model = inception_v3.InceptionV3(weights='imagenet')
# #Load the ResNet50 model
# resnet_model = resnet50.ResNet50(weights='imagenet')
# #Load the MobileNet model
# mobilenet_model = mobilenet.MobileNet(weights='imagenet')
# -
training_path = 'data/training'
validation_path = 'data/validation'
testing_path = 'data/test'
batch_size = 32
target_size=(224, 224)
norm = 255.0
class_mode='categorical'
# +
train_datagen = ImageDataGenerator(
rescale=1./norm,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale=1./norm)
test_datagen = ImageDataGenerator(rescale=1./norm)
train_generator = train_datagen.flow_from_directory(
training_path,
target_size=target_size,
batch_size=batch_size,
class_mode=class_mode)
validation_generator = validation_datagen.flow_from_directory(
validation_path,
target_size=target_size,
batch_size=batch_size,
class_mode=class_mode)
# test_generator = test_datagen.flow_from_directory(
# testing_path,
# target_size=target_size,
# batch_size=batch_size,
# class_mode=class_mode)
# -
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(10, activation='softmax')(x)
# +
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# +
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 249 layers and unfreeze the rest:
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
# +
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
sgd = optimizers.Adam()
# sgd = optimizers.SGD()
# sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# sgd = optimizer=SGD(lr=0.0001, momentum=0.9)
model.compile(sgd, loss='categorical_crossentropy', metrics=["accuracy"])
# model.compile(loss='mean_squared_error', optimizer='sgd')
# +
model_dir = 'outputs/models/'
log_file = "outputs/logs"
model_file = model_dir+"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
# -
checkpoint = keras.callbacks.ModelCheckpoint(model_file, monitor='val_acc', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None)
tensorboard = keras.callbacks.TensorBoard(log_dir=log_file, histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)
tensorboard.set_model(model)
callbacks_list = [early_stopping, checkpoint, tensorboard]
# callbacks_list = [checkpoint, tensorboard]
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
history = model.fit_generator(
train_generator,
epochs=15,
validation_data=validation_generator,
verbose=1,
callbacks=callbacks_list)
# +
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# -
filepath = 'outputs/models/weights-improvement-02-0.84.hdf5'
model = keras.models.load_model(filepath)
# +
# preds = model.predict_generator(test_generator, verbose=1)
# +
# categorical_accuracy = keras.metrics.categorical_accuracy(test_generator.classes, preds)
# +
# result = model.evaluate_generator(generator=test_generator, verbose=1)
# +
# print("%s%.2f%s"% ("Accuracy: ", result[1]*100, "%"))
# print("%s%.2f"% ("Loss: ", result[0]))
# +
def get_class_values(gen):
vals = []
for i in gen:
vals.append(gen[i])
return vals
def get_classes(gen):
rev_gen = {}
for i in gen:
rev_gen[gen[i]] = i
return rev_gen
def find_max(preds_i):
num = -1
index = -1
for i in range(len(preds_i)):
if preds_i[i]>num:
num = preds_i[i]
index = i
return index
def get_label(cls, val):
return cls[val]
# -
vals = get_class_values(train_generator.class_indices)
cls = get_classes(train_generator.class_indices)
monkeys = {"n0" : ["alouatta_palliata", "mantled_howler"],
"n1" : ["erythrocebus_patas", "patas_monkey"],
"n2" : ["cacajao_calvus", "bald_uakari"],
"n3" : ["macaca_fuscata", "japanese_macaque"],
"n4" : ["cebuella_pygmea", "pygmy_marmoset"],
"n5" : ["cebus_capucinus", "white_headed_capuchin"] ,
"n6" : ["mico_argentatus", "silvery_marmoset"],
"n7" : ["saimiri_sciureus", "common_squirrel_monkey"] ,
"n8" : ["aotus_nigriceps", "black_headed_night_monkey"],
"n9" : ["trachypithecus_johnii", "nilgiri_langur"]}
# +
import re
latin_name = {}
original_name = {}
for sp in monkeys:
latin_name[sp] = re.sub(r'_', ' ', monkeys[sp][0]).title()
original_name[sp] = re.sub(r'_', ' ', monkeys[sp][1]).title()
print(latin_name)
print(original_name)
# -
f="data\\validation\\n0\\n000.jpg"
img1 = cv2.imread(f, 3)
img1 = cv2.resize(img1, (255,255))
img1 = np.array(img1).reshape((1, 255, 255, 3))#do not miss the order in tuple
img1 = img1/norm
p = model.predict(img1)
pp=find_max(p.flatten())
pp = cls[pp]
lat = latin_name[pp]
org = original_name[pp]
print(pp)
print(lat)
print(org)
# +
# total = 0
# acc = 0
# direc = "data\\test"
# files = test_generator.filenames
# for i in range(len(preds)):
# cls_index = find_max(preds[i])
# pred_label = cls[cls_index]
# org_label = cls[test_generator.classes[i]]
# file_name = os.path.join(direc, files[i])
# if org_label==pred_label:
# acc +=1
# if total%10==0:
# result = "Wrong!!!\n"
# if org_label==pred_label:
# result = "Right!!!\n"
# title_text = "Original: "+ org_label+"\nPredicted: "+ pred_label+"\nFile: "+ file_name
# title_text=result+ title_text
# img = cv2.imread(file_name, 3)
# if org_label==pred_label:
# plt.title(title_text, color='blue')
# else:
# plt.title(title_text, color='red')
# plt.imshow(img)
# plt.show()
# total+=1
# accuracy = (acc/total)*100
# print("Accuracy: ", accuracy)
# -
| Monkey Recognition/code/obsolete/Monkey Identification-Fine - Tuning Predefined Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
print("{0:.2f}".format(sum(student_marks[query_name])/len(student_marks[query_name])))
# -
| Finding the percentage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LAB0 - Podstawy
# Przedmiot: _Sztuczna Inteligencja w Systemach Informatycznych_
#
# Autor: <NAME>
#
# kontakt: <EMAIL>
#
# # Wprowadzenie
#
# Celem zajęć jest zaznajomienie się z podstawowymi technikami sotosowanymi w uczeniu maszynowym. W trakcie zajęć laboratoryjnych zostanie wykorzystany zbiór danych Irys, a następnie dla wybranych algorytmów uczenia maszynowego zostanie zidentyfikowany jeden, który cechuje się najwyższą jakością klasyfikacji.
#
# Środowisko pracy: Anaconda, Jupyter Notebook
#
# Narzędzia: Scikit Learn, Python
# ## Zbiór _Iris_
#
# Zbiór danych *iris* zawiera 150 instancji opisujących kwiaty Irysa. Kwiaty są określone przy pomocy 4 atrybutów numerycznych opisujących długości i szerokości płatków kwiatu *sepal* i *petal*. Ostatni atrybut jakościowy definiuje gatunek opisywanego Irysa (species). Jednym z problemów możliwych do rozwiązania przy użyciu tego zbioru uczącego jest określenie gatunku dla nowego kwiatu Irysa w zależności od wielkości jego płatków. Jest to przykład zadania klasyfikacji.
#
# Zbiór uczący *iris* jest zbiorem etykietowanym ponieważ klasa, czyli konkretny gatunek irysa jest znany dla każdej instancji (obiektu) w zbiorze uczącym:
#
# https://archive.ics.uci.edu/ml/datasets/iris
#
# ```
# Source:
#
# Creator:
#
# <NAME>
#
# Donor:
#
# <NAME> (MARSHALL%PLU '@' io.arc.nasa.gov)
#
# Data Set Information:
#
# This is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other.
#
# Predicted attribute: class of iris plant.
#
# This is an exceedingly simple domain.
#
# This data differs from the data presented in Fishers article (identified by <NAME>, spchadwick '@' espeedaz.net ). The 35th sample should be: 4.9,3.1,1.5,0.2,"Iris-setosa" where the error is in the fourth feature. The 38th sample: 4.9,3.6,1.4,0.1,"Iris-setosa" where the errors are in the second and third features.
#
# Attribute Information:
#
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
# 5. class:
# -- Iris Setosa
# -- Iris Versicolour
# -- Iris Virginica
# ```
# 
# # Konfiguracja wstępna
# +
# Konfiguracja interakcji z wykresami pakietu matplotlib
# Standardowe rozwiązanie
# %matplotlib inline
# Interaktywne wykresy - może powodować błędy
# # %matplotlib notebook
# +
# Załaduj biblioteki
import pandas as pd
from pandas import read_csv
from matplotlib import pyplot
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import numpy as np
# -
# # Operacje na danych tablicowych - przykłady dla _NumPy_
# +
# NumPy - tworzenie tablic
a = np.array([1,2,3])
b = np.array([(1.5,2,3), (4,5,6)], dtype = float)
c = np.array([[(1.5,2,3), (4,5,6)], [(3,2,1), (4,5,6)]], dtype = float)
print(a)
print(b)
print(c)
# +
# NumPy - przeglądanie tablic
print(a.shape)
print(len(a))
print(b.ndim)
print(c.size)
print(b.dtype)
print(b.dtype.name)
print(b.astype(int))
# +
# NumPy - subsetting
print(a[2]) #wybierz element z indeksem równym 2
print(b[1,2]) # wybierz element z pierwszego wiersza i drugiej kolumny
# NumPy - slicing
print(a[0:2]) #wybierz elementy o indeksach 0 i 1
print(b[0:2,1]) #wybierz elementy z wiersza 0 i 1 oraz kolumny 1
print(b[:1]) #wybierz elementy z wiersza 0
print(b[0:1,:]) #wybierz elementy z wiersza 0
# -
# # Wczytywanie danych - biblioteka _Pandas_
# Przeanalizuj w edytorze teksotwym format i zawartośc pliku wejściowego ze zbiorem - plik _iris.csv_.
#
# _UWAGA! W przypadku błędów z odczytem danych popraw pliki zawierające dane wejściowe._
filename = 'iris.csv'
dataset = read_csv(filename)
# Wypisz rozmiar danych wejściowych.
print(dataset.size)
# Sprawdź poprawność odczytania danych poprzez wypisanie pierwszych 20 wierszy.
dataset[:20]
# # Analiza statystyczna
# Sprawdź jaki jest przedział wartości, czy nie ma elementów znacznie odbiegających od wartości oczekiwanej. Mogłoby to sugerować błędy w danych wejściowych.
# descriptions
print(dataset.describe())
# Sprawdź czy licznośc klas wynikowych jest zbliżona. Występowanie znacznych dysproporcji może skutkować błędnym wyuczeniem modelu.
# class distribution
print(dataset.groupby('variety').size())
# # Wizualizacja danych
# Narysuj wykresy przedstawiające dane wejściowe.
dataset.hist()
pyplot.show()
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
scatter_matrix(dataset)
pyplot.show()
# # Ewaluacja wybranego algorytmu ML
# Na potrzeby klasyfikacji irysów zdecydowano się wykorzystać model ML bazujący ba regresji logistycznej. W procesie uczenia wykrozystamy dwa zbiory danych - treningowy oraz testowy z podziałem 20%/80%.
# Stwórz dwie tablice:
# - X składającą się z 4 kolumn - cechy wejściowe
# - Y składającą się z 1 kolumny - etykiety klasy dla zbioru uczącego
# +
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
# -
# Wypisz tablice aby sprawdzić ich poprawność.
# +
print(X.shape)
print(X)
print(Y.shape)
print(Y)
# -
# Stwórz zbiór uczący i testowy z podziałem 80%/20%.
# +
validation_size = 0.20
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=validation_size, random_state=seed)
# -
# Naucz model, a następnie sprawdź jego skuteczność.
# +
klr = LogisticRegression(max_iter=2000)
klr.fit(X_train, Y_train)
predictions = klr.predict(X_test)
print(accuracy_score(Y_test, predictions))
print(confusion_matrix(Y_test, predictions))
print(classification_report(Y_test, predictions))
# -
# # Wybór najlepszego algorytmu ML dla zadanego problemu
# Sprawdź skuteczność innych modeli ML w problemie klasyfikacji Irysów. Jako miarę jakości klasyfikacji wybierz _accuracy_score_. Wyniki skuteczności ich działania przedstaw na wykresie.
#
# Przeanalizuj metody:
# - LogisticRegression()
# - KNeighborsClassifier()
# - DecisionTreeClassifier()
# - SVC()
# - GaussianNB()
#
# Lista dostępnych algorytmów w bibliotece _scikit-learn_:
# https://scikit-learn.org/stable/modules/multiclass.html#multiclass
#
# Dla wybranej metody wylicz wskaźniki jakościowe dotyczące predykcji.
# +
names = []
results = []
#TODO zaimplementuj uczenie i ocenianie wybranych algorytmów
#TODO wyniki zawrzyj w tablicach names i results
models = [LogisticRegression(max_iter=2000), KNeighborsClassifier(), DecisionTreeClassifier(), SVC(), GaussianNB()]
best_predictions = []
for model in models:
model.fit(X_train, Y_train)
predictions = model.predict(X_test)
names.append(model.__class__.__name__)
accuracy = accuracy_score(Y_test, predictions)
if not len(results) or accuracy > max(results):
best_predictions = predictions
results.append(accuracy)
# -
print(names)
print(results)
pyplot.xticks(rotation=45)
pyplot.bar(names,results)
pyplot.show()
# Policz skuteczność najlepszego modelu.
print(accuracy_score(Y_test, predictions))
print(confusion_matrix(Y_test, predictions))
print(classification_report(Y_test, predictions))
# # Podsumowanie
#
# ## jaki problem ML występował w zadaniu?
#
# W zadaniu występuje problem klasyfikacji irysów.
#
# ## jak się rozkłada liczność klas wynikowych
#
# Zbiory są równoliczne. Zarówno zbiór treningowy jak i testowy zawierają zbliżoną
# liczbę elementów w każdej z klas.
#
# ## jakie algorytmy ML były rozważane?
#
# Były rozważane algorytmy regresji logistycznej, klasyfikacji k-najbliższych sąsiadów, drzewa decyzyjne, algorytm SVC oraz algorytm Gaussa.
#
# ## który z algorytmów cechował się najwyższą skutecznością?
#
# Najwyższą skuteczność miał algorytm klasyfikacji k-najbliższych sąsiadów i drzewa decyzyjne.
| Lab0-student/Lab0-student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Runtime Prediction via QC
#
# ## About
#
# This notebook is designed to 1) create a dataframe that brings together runtime information with the [information](https://docs.google.com/spreadsheets/d/1zSs_hRQrCIBlUmPpDvyEwpJv3X4t8Y73s6ZRdkBkeI8/edit#gid=983410225) provided by the QC group. It then 2) calculates the correlation of QC variables to runtime for a core variant calling workflows. Based on those correlations, it 3) generates a linear regression model to predict runtime from QC variables and then 4) evaluates the performance of this model on ~20% of the dataset that was excluded from the training.
# +
# imports
import pandas as pd
import matplotlib.pyplot as plt
# this allows plots to appear directly in the notebook
# %matplotlib inline
# +
import math
# read a dict for runtimes
f = open("dkfz_embl_timing.tsv", "r")
runtimes = {}
for line in iter(f):
a = line.split("\t")
runtimes[a[0]] = a[1]
f.close()
# read data into a DataFrame
data = pd.read_csv('PCAWG-QC_Summary-of-Measures_QC_Measures.tsv', delimiter='\t')
data['runtime'] = 0.0
# loop over and annotate with runtime
for index, row in data.iterrows():
key = row['Project_code'] + "::" + row['Submitter_donor_ID']
try:
curr_runtime = math.log(float(runtimes[key]))
#curr_runtime = float(runtimes[key])
data.set_value(index, 'runtime', curr_runtime)
except:
continue
data.head()
# -
# now I have a merged dataframe that has all QC values along with the runtime for the workflow
print(data.shape)
# +
# Import matplotlib
import matplotlib.pyplot as plt
# Make a histogram of all the ratings in the average_rating column.
plt.hist(data["runtime"])
# Show the plot.
plt.show()
# -
# remove 0 runtimes
data = data[data["runtime"] > 0.0]
plt.hist(data["runtime"])
plt.show()
# remove any NAN
data = data.dropna()
data.isnull().values.any()
# showing zeros and nan have been removed
print(data.shape)
# general stats
data.describe()
# look at correlation
data.corr()["runtime"]
# visualize the relationship between the features and the response using scatterplots
import matplotlib.pyplot as plt
fig, axs = plt.subplots(4, 3, sharey=True)
fig.subplots_adjust(hspace=.5, wspace=.5)
data.plot(kind='scatter', x='Stars', y='runtime', ax=axs[0, 0], figsize=(16, 16))
data.plot(kind='scatter', x='Mean_Coverage_Normal', y='runtime', ax=axs[0, 1])
data.plot(kind='scatter', x='Mean_Coverage_Tumour', y='runtime', ax=axs[0, 2])
data.plot(kind='scatter', x='FWHM_Normal', y='runtime', ax=axs[1, 0])
data.plot(kind='scatter', x='Median/Mean_Coverage_Tumour', y='runtime', ax=axs[1, 1])
data.plot(kind='scatter', x='FWHM_Tumour', y='runtime', ax=axs[1, 2])
data.plot(kind='scatter', x='Somatic_Mutation_Calling_Coverage', y='runtime', ax=axs[2, 0])
data.plot(kind='scatter', x='%_of_paired_reads_mapping_to_different_chromosomes_Normal', y='runtime', ax=axs[2, 1])
data.plot(kind='scatter', x='%_of_paired_reads_mapping_to_different_chromosomes_Tumour', y='runtime', ax=axs[2, 2])
data.plot(kind='scatter', x='Ratio_of_difference_in_edits_between_paired_reads_Normal', y='runtime', ax=axs[3, 0])
data.plot(kind='scatter', x='Ratio_of_difference_in_edits_between_paired_reads_Tumour', y='runtime', ax=axs[3, 1])
data.plot(kind='scatter', x='runtime', y='runtime', ax=axs[3, 2])
# +
# now clear out the columns that we don't want to use
# Get all the columns from the dataframe.
columns = data.columns.tolist()
# Filter the columns to remove ones we don't want.
columns = [c for c in columns if c not in ["runtime", "Project_code", "Submitter_donor_ID", "Normal_WGS_aliquot_ID", "Tumour_WGS_aliquot_ID"]]
#columns = [c for c in columns if c not in ["runtime", "Project_code", "Submitter_donor_ID", "Normal_WGS_aliquot_ID", "Tumour_WGS_aliquot_ID", "Median/Mean_Coverage_Normal", "FWHM_Normal", "Median/Mean_Coverage_Tumour", "FWHM_Tumour", "Somatic_Mutation_Calling_Coverage", "%_of_paired_reads_mapping_to_different_chromosomes_Normal", "Ratio_of_difference_in_edits_between_paired_reads_Normal", "Ratio_of_difference_in_edits_between_paired_reads_Tumour"]]
# Store the variable we'll be predicting on.
target = "runtime"
# +
# Import a convenience function to split the sets.
from sklearn.model_selection import train_test_split
# Generate the training set. Set random_state to be able to replicate results.
train = data.sample(frac=0.8, random_state=1)
# Select anything not in the training set and put it in the testing set.
test = data.loc[~data.index.isin(train.index)]
# Print the shapes of both sets.
print(train.shape)
print(test.shape)
# +
# Import the linearregression model.
from sklearn.linear_model import LinearRegression
# Initialize the model class.
model = LinearRegression()
train.head()
# Fit the model to the training data.
model.fit(train[columns], train[target])
# +
# now test
# Import the scikit-learn function to compute error.
from sklearn.metrics import mean_squared_error
# Generate our predictions for the test set.
predictions = model.predict(test[columns])
# Compute error between our test predictions and the actual values.
mean_squared_error(predictions, test[target])
# +
# try random forest
# Import the random forest model.
from sklearn.ensemble import RandomForestRegressor
# Initialize the model with some parameters.
model = RandomForestRegressor(n_estimators=100, min_samples_leaf=10, random_state=1)
# Fit the model to the data.
model.fit(train[columns], train[target])
# Make predictions.
predictions = model.predict(test[columns])
# Compute the error.
mean_squared_error(predictions, test[target])
# +
# look at predicted vs. actual
import statsmodels.api as sm
import numpy as np
data['predicted_runtime'] = model.predict(data[columns])
#data.plot(kind='scatter', x='runtime', y='predicted_runtime')
results = sm.OLS(data['predicted_runtime'],sm.add_constant(data['runtime'])).fit()
print(results.summary())
plt.scatter(data['runtime'],data['predicted_runtime'])
#X_plot = np.linspace(0,1,100)
#plt.plot(X_plot, X_plot*results.params[0] + results.params[1])
# fit with np.polyfit
m, b = np.polyfit(data['runtime'], data['predicted_runtime'], 1)
#plt.plot(x, y, '.')
plt.plot(data['runtime'], m*data['runtime'] + b, '-', color='r')
plt.ylabel('predicted runtime (log)')
plt.xlabel('runtime (log)')
plt.show()
# -
# ## Summary
# Seems like the coverage dominates the runtime. The model agrees with actual runtime fairly well, R^2 of 0.734
| DKFZ EMBL Runtime Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # <NAME>
#
# This is the solution for the Milestone Project! A two player game made within a Jupyter Notebook. Feel free to download the notebook to understand how it works!
# First some imports we'll need to use for displaying output and set the global variables
# +
# Specifically for the iPython Notebook environment for clearing output.
from IPython.display import clear_output
# Global variables
board = [' '] * 10
game_state = True
announce = ''
# -
# Next make a function that will reset the board, in this case we'll store values as a list.
# Note: Game will ignore the 0 index
def reset_board():
global board,game_state
board = [' '] * 10
game_state = True
# Now create a function to display the board, I'll use the num pad as the board reference.
# Note: Should probably just make board and player classes later....
def display_board():
''' This function prints out the board so the numpad can be used as a reference '''
# Clear current cell output
clear_output()
# Print board
print " "+board[7]+" |"+board[8]+" | "+board[9]+" "
print "------------"
print " "+board[4]+" |"+board[5]+" | "+board[6]+" "
print "------------"
print " "+board[1]+" |"+board[2]+" | "+board[3]+" "
# Define a function to check for a win by comparing inputs in the board list. Note: Maybe should just have a list of winning combos and cycle through them?
def win_check(board, player):
''' Check Horizontals,Verticals, and Diagonals for a win '''
if (board[7] == board[8] == board[9] == player) or \
(board[4] == board[5] == board[6] == player) or \
(board[1] == board[2] == board[3] == player) or \
(board[7] == board[4] == board[1] == player) or \
(board[8] == board[5] == board[2] == player) or \
(board[9] == board[6] == board[3] == player) or \
(board[1] == board[5] == board[9] == player) or \
(board[3] == board[5] == board[7] == player):
return True
else:
return False
# Define function to check if the board is already full in case of a tie. (This is straightforward with our board stored as a list)
# Just remember index 0 is always empty.
def full_board_check(board):
''' Function to check if any remaining blanks are in the board '''
if " " in board[1:]:
return False
else:
return True
# Now define a function to get player input and do various checks on it.
# +
def ask_player(mark):
''' Asks player where to place X or O mark, checks validity '''
global board
req = 'Choose where to place your: ' + mark
while True:
try:
choice = int(raw_input(req))
except ValueError:
print("Sorry, please input a number between 1-9.")
continue
if choice not in range(1,10):
print("Sorry, please input a number between 1-9.")
continue
if board[choice] == " ":
board[choice] = mark
break
else:
print "That space isn't empty!"
continue
# -
# Now have a function that takes in the player's choice (via the ask_player function) then returns the game_state.
def player_choice(mark):
global board,game_state,announce
#Set game blank game announcement
announce = ''
#Get Player Input
mark = str(mark)
# Validate input
ask_player(mark)
#Check for player win
if win_check(board,mark):
clear_output()
display_board()
announce = mark +" wins! Congratulations"
game_state = False
#Show board
clear_output()
display_board()
#Check for a tie
if full_board_check(board):
announce = "Tie!"
game_state = False
return game_state,announce
# Finally put it all together in a function to play the game.
def play_game():
reset_board()
global announce
# Set marks
X='X'
O='O'
while True:
# Show board
clear_output()
display_board()
# Player X turn
game_state,announce = player_choice(X)
print announce
if game_state == False:
break
# Player O turn
game_state,announce = player_choice(O)
print announce
if game_state == False:
break
# Ask player for a rematch
rematch = raw_input('Would you like to play again? y/n')
if rematch == 'y':
play_game()
else:
print "Thanks for playing!"
# Let's play!
play_game()
| Complete-Python-Bootcamp-master/.ipynb_checkpoints/Milestone Project 1 - Advanced Solution-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
# following tutorial at https://www.dataquest.io/blog/web-scraping-tutorial-python/
# + nbpresent={"id": "c45f8f95-0806-4ef1-a900-e26483bf5a5c"}
import requests
# + nbpresent={"id": "d48dfe8a-0a8c-49af-a92f-52d32814295b"}
page = requests.get("http://dataquestio.github.io/web-scraping-pages/simple.html")
# + nbpresent={"id": "f064519a-bb1f-49a5-9be2-00b7a96a0770"}
page.status_code
# + nbpresent={"id": "9031bf2e-1269-4162-bbba-b2bd1c32d56c"}
page.content
# + nbpresent={"id": "aa8b4196-c349-422e-a7d5-00a43f3b4f2f"}
from bs4 import BeautifulSoup
# + nbpresent={"id": "6c2384e7-f58c-4629-a1e8-cd6b3a4bbd66"}
soup = BeautifulSoup(page.content, 'html.parser')
# + nbpresent={"id": "b990aff5-c9f8-48c2-99bb-735a5095968d"}
print(soup.prettify())
# + nbpresent={"id": "beada69b-15ee-4d41-9ff0-dc0e33028071"}
list(soup.children)
# + nbpresent={"id": "f1e73ed7-e8eb-4a3c-9b5f-3af5459c1858"}
[type(item) for item in list(soup.children)]
# + nbpresent={"id": "b70640ba-9fcc-461c-84bc-b9ccb77b28e1"}
html= list(soup.children)[2]
list(html.children)
# + nbpresent={"id": "1006775c-e411-4688-840a-fff1b492f9f7"}
body = list(html.children)[3]
print(body.prettify())
# + nbpresent={"id": "f558480c-a799-4e28-8524-4f8b8745e7dd"}
body = list(html.children)[3]
list(body.children)
# + nbpresent={"id": "36fce7da-847c-479a-9646-4bee83aea9c4"}
p = list(body.children)[1]
p.get_text()
# -
soup.find_all('p')
soup.find_all('p')[0].get_text()
soup.find('p')
page = requests.get("http://dataquestio.github.io/web-scraping-pages/ids_and_classes.html")
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.prettify())
soup.find_all('p', class_='outer-text')
soup.find_all(class_='outer-text')
soup.find_all(id="first")
soup.select("div p") # using CSS selectors
| Web_Scrapping_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %cd ..
# +
import json
import random
import torch
import speech.loader as loader
#import speech.models as models
# -
# ## Config
# +
config_fp = "examples/timit/transducer_config.json"
config2 = "examples/librispeech/config.json"
with open(config_fp, "r") as f:
config = json.load(f)
with open(config2, "r") as f:
config2 = json.load(f)
#config["data"] = config2["data"] #use toy data
#print(config2)
config
# -
train_set = "/home/ubuntu/Data/LibriSpeech/train-clean-100.json"
dev_set = "/home/ubuntu/Data/LibriSpeech/dev-clean.json"
save_path = "/home/ubuntu/persistent/experiments/rnnt/baseline/first/"
config["save_path"] = save_path
config["data"]["train_set"] = train_set
config["data"]["dev_set"] = dev_set
config
random.seed(config["seed"])
torch.manual_seed(config["seed"])
def nparams(model):
return sum([p.numel() for p in model.parameters()])
# ## Model discussion
# * The implementation combines the joint and prediction networks into the 'decoder' network
# * SO the ```decoder["layers"]``` is actually the number of layers in the prediction network
#
#
# +
#encoder definition
opt_cfg = config["optimizer"]
data_cfg = config["data"]
model_cfg = config["model"]
enc = model_cfg["encoder"]
dec = model_cfg["decoder"]
HIDDEN_SIZE = 2048
CONV_SIZE = 8 #convolutional channel size
BIDIRECTIONAL = False
DEC_LAYERS = 2
#conv
convs = enc["conv"]
enc["rnn"]["dim"] = HIDDEN_SIZE
enc["rnn"]["bidirectional"] = BIDIRECTIONAL
dec["embedding_dim"] = HIDDEN_SIZE #NOTE: in strawperson this is 1024.
#BUT in awni's implementation we add the encoder and
#Prediction network output
dec["layers"] = 2
out_convs = []
for conv in convs:
out_c, h, w, s = conv
out_c = CONV_SIZE
out_convs.append([out_c, h, w, s])
enc["conv"] = out_convs
for conv in convs:
out_c, h, w, s = conv
print("out_c ={}, h={}, w={}, s={}".format(out_c, h, w, s))
print(enc)
with open("configs/strawperson.json", "w") as f:
json.dump(config, f, indent=2)
config
# +
#START_AND_END = True #Not sure what this does
batch_size = opt_cfg["batch_size"]
preproc = loader.Preprocessor(data_cfg["train_set"], start_and_end=START_AND_END)
model_class = eval("models." + model_cfg["class"])
model1 = model_class(1024, 26,
model_cfg)
model2 = model_class(1024,1000,
model_cfg)
# train_ldr = loader.make_loader(data_cfg["train_set"],
# preproc, batch_size)
# dev_ldr = loader.make_loader(data_cfg["dev_set"],
# preproc, batch_size)
print(model1)
print(nparams(model1))
print()
print(model2)
print(model2.conv)
print(nparams(model2))
# -
sum([p.numel() for p in model2.parameters()])
38 / 4
| notebooks/model_explore.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# +
using HTTP
# フォルダ「data」が存在しない場合は作成する
data_dir = "./data/"
if !isdir(data_dir)
mkdir(data_dir)
end
# ImageNetのclass_indexをダウンロードする
## Kerasで用意されているものを利用
## https://github.com/fchollet/deep-learning-models/blob/master/imagenet_utils.py
url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
save_path = joinpath(data_dir, "imagenet_class_index.json")
if !isfile(save_path)
HTTP.open(:GET, url) do http
open(save_path, "w") do file
write(file, http)
end
end
end
# +
using PyCall
@pyimport zipfile
# アリとハチの画像データをダウンロードし解凍
## PyTorchのチュートリアルで用意されているものを利用
## https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
url = "https://download.pytorch.org/tutorial/hymenoptera_data.zip"
save_path = joinpath(data_dir, "hymenoptera_data.zip")
if !isfile(save_path)
HTTP.open(:GET, url) do http
open(save_path, "w") do file
write(file, http)
end
end
# ZIPファイルを読み込み
const zip = zipfile.ZipFile(save_path)
zip.extractall(data_dir) # ZIPを解凍
zip.close() # ZIPファイルをクローズ
# ZIPファイルを消去
rm(save_path)
end
# +
using PyCall
using PyPlot
# 必要パッケージのimport
const np = pyimport("numpy")
const json = pyimport("json")
const Image = pyimport("PIL.Image")
# PyTorch関連パッケージのimport
const torch = pyimport("torch")
const torchvision = pyimport("torchvision")
const models = torchvision.models
const transforms = torchvision.transforms
# -
# PyTorchのバージョン確認
println("PyTorch Version: ", torch.__version__)
println("Torchvision Version: ", torchvision.__version__)
# +
# 学習済みのVGG-16モデルをロード
# 初めて実行する際は、学習済みパラメータをダウンロードするため、実行に時間がかかる
# VGG-16モデルのインスタンスを生成
use_pretrained = true # 学習済みのパラメータを使用
net = models.vgg16(pretrained=use_pretrained)
net.eval() # 推論モードに設定
# モデルのネットワーク構成を出力
println(net)
# -
#= 入力画像前処理関数
@param resize::Int64 リサイズ先の画像の大きさ
@param mean::Tuple{Float64,Float64,Float64} 各色チャネルの平均値
@param std::Tuple{Float64,Float64,Float64} 各色チャネルの標準偏差
=#
BaseTransform(resize::Int64, mean::Tuple{Float64,Float64,Float64}, std::Tuple{Float64,Float64,Float64}) = transforms.Compose([
transforms.Resize(resize), # 短い辺の長さがresizeの大きさになる
transforms.CenterCrop(resize), # 画像中央をresize × resizeで切り取り
transforms.ToTensor(), # Torchテンソルに変換
transforms.Normalize(mean, std) # 色情報の標準化
])
# +
# 画像前処理の動作を確認
# 1. 画像読み込み
image_file_path = "./pytorch_advanced/1_image_classification/data/goldenretriever-3724972_640.jpg"
img = Image.open(image_file_path) # [高さ][幅][色RGB]
# 2. 元の画像の表示
imshow(img)
# +
# 3. 画像の前処理と処理済み画像の表示
resize = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transform = BaseTransform(resize, mean, std)
img_transformed = transform(img) # torch.Size([3, 224, 224])
# (色、高さ、幅)を (高さ、幅、色)に変換
## permutedims(対象配列, 次元の順番配列): 転置行列の生成関数
img_transformed = permutedims(img_transformed.numpy(), [2, 3, 1])
# 0-1に値を制限して表示
## clamp(x, low, high): if x < low then x = low, if x > high then x = high
## `.`演算子: 関数を行列に対して適用させる
img_transformed = clamp.(img_transformed, 0, 1)
imshow(img_transformed)
# -
# ILSVRCのラベル情報をロードし辞意書型変数を生成
ILSVRC_class_index = json.load(open("./data/imagenet_class_index.json", "r"))
ILSVRC_class_index
#= 出力結果からラベルを予測する後処理関数
@param class_index::Dict{Any, Any}
=#
ILSVRCPredictor(class_index::Dict{Any, Any}) = Dict(
#= 確率最大のILSVRCのラベル名を取得する。
@param out::torch.Size([1, 1000]) Netからの出力
@return predicted_label_name::String 最も予測確率が高いラベルの名前
=#
:predict_max => out -> begin
maxid = np.argmax(out.detach().numpy())
predicted_label_name = class_index[string(maxid + 1)][2]
return predicted_label_name
end
)
# +
# ILSVRCのラベル情報をロードし辞意書型変数を生成
ILSVRC_class_index = json.load(open("./data/imagenet_class_index.json", "r"))
# ILSVRCPredictorのインスタンスを生成
predictor = ILSVRCPredictor(ILSVRC_class_index)
# 入力画像を読み込む
image_file_path = "./pytorch_advanced/1_image_classification/data/goldenretriever-3724972_640.jpg"
img = Image.open(image_file_path) # [高さ][幅][色RGB]
# 前処理の後、バッチサイズの次元を追加
transform = BaseTransform(resize, mean, std) # 前処理クラス作成
img_transformed = transform(img) # torch.Size([3, 224, 224])
inputs = img_transformed.unsqueeze_(0) # torch.Size([1, 3, 224, 224])
# モデルに入力し、モデル出力をラベルに変換
out = net(inputs) # torch.Size([1, 1000])
result = predictor[:predict_max](out)
# 予測結果を出力
println("入力画像の予測結果:", result)
# -
| 05-algorithm/ml-dl/notebook/01-01_load_vgg-julia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# # Recurrent Neural Networks I
#
# Classical neural networks, including convolutional ones, suffer from two severe limitations:
#
# # + They only accept a fixed-sized vector as input and produce a fixed-sized vector as output.
# # + They do not consider the sequential nature of some data (language, video frames, time series, etc.)
#
# Recurrent neural networks overcome these limitations by allowing to operate over sequences of vectors (in the input, in the output, or both).
# -
# ## Vanilla Recurrent Neural Network
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/vanilla.png?raw=true" alt="" style="width: 400px;"/>
#
# ## Unrolling in time of a RNN
#
# By unrolling we mean that we write out the network for the complete sequence.
#
# $$ s_t = \mbox{tanh }(Ux_t + W s_{t-1}) $$
# $$ y_t = \mbox{softmax }(V s_t) $$
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/unrolling.png?raw=true" alt="" style="width: 600px;"/>
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/TanhReal.gif?raw=true" alt="" style="width: 200px;"/>
#
# ## Vanilla Recurrent Neural Network (minibatch version)
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/minibatch.png?raw=true" alt="" style="width: 400px;"/>
#
# + We can think of the **hidden state** $s_t$ as a memory of the network that captures information about the previous steps.
# + The RNN **shares the parameters** $U,V,W$ across all time steps.
# + It is not necessary to have outputs $y_t$ at each time step.
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/kar.png?raw=true" alt="" style="width: 600px;"/>
#
# <center>
# Source: http://karpathy.github.io/2015/05/21/rnn-effectiveness/
# </center>
# RNN have shown success in:
#
# + Language modeling and generation.
# + Machine Translation.
# + Speech Recognition.
# + Image Description.
# + Question Answering.
# + Etc.
# ## RNN Computation
#
# ```python
# class RNN:
# #...
# def step(self,x):
# self.h = np.tanh(np.dot(self.W_hh, self.h) +
# np.dot(self.W_xh, self.x))
# y = np.dot(self.W_hy, self.h)
# return y
# #...
# ```
#
# We can go deep by stacking RNNs:
# ```python
# y1 = rnn1.step(x)
# y2 = rnn2.step(y1)
# ```
# Training a RNN is similar to training a traditional NN, but some modifications. The main reason is that parameters are shared by all time steps: in order to compute the gradient at $t=4$, we need to propagate 3 steps and sum up the gradients. This is called **Backpropagation through time (BPTT)**.
#
# The inputs of a recurrent network are always vectors, but we can process sequences of symbols/words by representing these symbols by numerical vectors.
#
# Let's suppose we are classifying a series of words: $x_1, ..., x_{t-1}, x_t, x_{t+1}, ... x_{T}$ are the word vectors corresponding to a corpus with T symbols. Then, the relationship to compute the hidden layer output features at each time-step $t$ is $h_t = \sigma(W^{(hh)} h_{t-1} + W^{(hx)} x_{t})$, where:
#
# + $x_{t} \in \mathbb{R}^{d}$ is input word vector at time $t$.
# + $W^{hx} \in \mathbb{R}^{D_h \times d}$ is the weights matrix used to condition the input word vector, $x_t$.
# + $W^{hh} \in \mathbb{R}^{D_h \times D_h}$ is the weights matrix used to condition the output of the previous time-step, $h_{t-1}$.
# + $h_{t-1} \in \mathbb{R}^{D_h}$ is the output of the non-linear function at the previous time-step, $t-1$.
# + $h_0 \in \mathbb{R}^{D_h}$ is an initialization vector for the hidden layer at time-step $t = 0$.
# + $\sigma ()$ is the non-linearity function (normally, ``tanh``).
# + $\hat{y}_t = softmax (W^{(S)}h_t)$ is the output probability distribution over the vocabulary at each time-step $t$. Essentially, $\hat{y}_t$ is the next predicted word given the document context score so far (i.e. $h_{t-1}$) and the last observed word vector $x^{(t)}$. Here, $W^{(S)} \in \mathbb{R}^{|V| \times D_h}$ and $\hat{y} \in \mathbb{R}^{|V|}$ where $|V|$ is the vocabulary.
#
# The loss function used in RNNs is often the cross entropy error:
#
# $$
# L^{(t)}(W) = - \sum_{j=1}^{|V|} y_{t,j} \times log (\hat{y}_{t,j})
# $$
#
# The cross entropy error over a corpus of size $T$ is:
#
# $$
# L = \dfrac{1}{T} \sum_{t=1}^{T} L^{(t)}(W) = - \dfrac{1}{T} \sum_{t=1}^{T} \sum_{j=1}^{|V|} y_{t,j} \times log (\hat{y}_{t,j})
# $$
#
# In the case of classifying a series of symbols/words, the *perplexity* measure can be used to assess the goodness of our model. It is basically 2 to the power of the negative log probability of the cross entropy error function:
#
# $$
# Perplexity = 2^{L}
# $$
#
# Perplexity is a measure of confusion where lower values imply more confidence in predicting the next word in the sequence (compared to the ground truth outcome).
# ## RNN Training
#
# Recurrent neural networks propagate weight matrices from one time-step to the next. Recall the goal of a RNN implementation is to enable propagating context information through faraway time-steps. When these propagation results in a long series of matrix multiplications, weights can vanish or explode.
#
# Once the gradient value grows extremely large, it causes an overflow (i.e. ``NaN``) which is easily detectable at runtime; this issue is called the *Gradient Explosion Problem*.
#
# When the gradient value goes to zero, however, it can go undetected while drastically reducing the learning quality of the model for far-away words in the corpus; this issue is called the *Vanishing Gradient Problem*.
# ### Gradient Clipping
#
# To solve the problem of exploding gradients, <NAME> first introduced a simple heuristic solution that *clips* gradients to a small number whenever they explode. That is, whenever they reach a certain threshold, they are set back to a small number.
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/exploding.png?raw=true" alt="" style="width: 400px;"/>
#
# ### Better initialization
#
# To solve the problem of vanishing gradients, instead of initializing $W^{hh}$ randomly, starting off from **random orthogonal matrices** works better, i.e., a square matrix $W$ for which $W^T W=I$.
#
# There are two properties of orthogonal matrices that are useful for training deep neural networks:
# + they are norm-preserving, i.e., $ ||Wx||^2=||x||^2$, and
# + their columns (and rows) are all orthonormal to one another.
#
# At least at the start of training, the first of these should help to keep the norm of the input constant throughout the network, which can help with the problem of exploding/vanishing gradients.
#
# Similarly, an intuitive understanding of the second is that having orthonormal weight vectors encourages the weights to learn different input features.
#
# You can obtain a random $n \times n$ orthogonal matrix $W$, (uniformly distributed) by performing a QR factorization of an $n \times n$ matrix with elements i.i.d. Gaussian random variables of mean $0$ and variance $1$. Here is an example:
# +
import numpy as np
from scipy.linalg import qr
n = 3
H = np.random.randn(n, n)
print(H)
print('\n')
Q, R = qr(H)
print (Q.dot(Q.T))
print(Q)
# -
# ### Steeper Gates
#
# We can make the "gates steeper" so they change more repidly from 0 to 1 and the model is learnt quicker.
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/steeper.png?raw=true" alt="" style="width: 600px;"/>
#
# ### Gated Units
#
# The most important types of gated RNNs are:
#
# + Long Short Term Memories (LSTM). It was introduced by S.Hochreiter and J.Schmidhuber in 1997 and is widely used. LSTM is very good in the long run due to its high complexity.
# + Gated Recurrent Units (GRU). It was recently introduced by K.Cho. It is simpler than LSTM, fasters and optimizes quicker.
#
# #### LSTM
#
# The key idea of LSTMs is the cell state $C$, the horizontal line running through the top of the diagram.
#
# The cell state is kind of like a conveyor belt. It runs straight down the entire chain, with only some minor linear interactions. It’s very easy for information to just flow along it unchanged.
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/lstm.png?raw=true" alt="Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/" style="width: 600px;"/>
#
# LSTM has the ability to remove or add information to the cell state, carefully regulated by structures called gates.
#
# Gates are a way to optionally let information through. They are composed out of a *sigmoid* neural net layer and a pointwise multiplication operation.
#
# Let us see how a LSTM uses $h_{t-1}, C_{t-1}$ and $x_{t}$ to generate the next hidden states $C_t, h_{t}$:
#
# $$ f_t = \sigma(W_f \cdot [h_{t-1}, x_t]) \mbox{ (Forget gate)} $$
# $$ i_t = \sigma(W_i \cdot [h_{t-1}, x_t]) \mbox{ (Input gate)} $$
# $$ \tilde C_t = \operatorname{tanh}(W_C \cdot [h_{t-1}, x_t]) $$
# $$ C_t = f_t * C_{t-1} + i_t * \tilde C_t \mbox{ (Update gate)} $$
# $$ o_t = \sigma(W_o \cdot [h_{t-1}, x_t]) $$
# $$ h_t = o_t * \operatorname{tanh}(C_t) \mbox{ (Output gate)} $$
#
# There are other variants of LSTM (f.e. LSTM with peephole connections of Gers & Schmidhuber (2000))
# #### GRU
#
# The transition from hidden state $h_{t-1}$ to $h_{t}$ in vanilla RNN is defined by using an affine transformation and a point-wise nonlinearity.
#
# What motivates the use of gated units? Although RNNs can theoretically capture long-term dependencies, they are very hard to actually train to do this. Gated recurrent units are designed in a manner to have more persistent memory thereby making it easier for RNNs to capture long-term dependencies.
#
# <img src="https://github.com/DataScienceUB/DeepLearningfromScratch2018/blob/master/images/gru.png?raw=true"alt="" style="width: 300px;"/>
# <center>
# Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/
# </center>
#
# Let us see how a GRU uses $h_{t-1}$ and $x_{t}$ to generate the next hidden state $h_{t}$.
#
# $$ z_{t} = \sigma(W_z \cdot [x_{t}, h_{t-1}]) \mbox{ (Update gate)}$$
# $$ r_{t} = \sigma(W_r \cdot [x_{t}, h_{t-1}]) \mbox{ (Reset gate)}$$
# $$ \tilde{h}_{t} = \operatorname{tanh}(r_{t} \cdot [x_{t}, r_t \circ h_{t-1}] ) \mbox{ (New memory)}$$
# $$ h_{t} = (1 - z_{t}) \circ \tilde{h}_{t-1} + z_{t} \circ h_{t} \mbox{ (Hidden state)}$$
#
# It combines the forget and input gates into a single “update gate.” It also merges the cell state and hidden state, and makes some other changes. The resulting model is simpler than standard LSTM models.
# ## `keras`
#
# > Keras is a high-level neural networks library, written in Python and capable of running on top of either TensorFlow or Theano. It was developed with a focus on enabling fast experimentation.
#
# The core data structure of Keras is a model, a way to organize layers. The main type of model is the ``Sequential model``, a linear stack of layers.
#
# ```Python
# from keras.models import Sequential
# model = Sequential()
# ```
# Stacking layers is as easy as ``.add()``:
#
# ```Python
# from keras.layers import Dense, Activation
#
# model.add(Dense(output_dim=64, input_dim=100))
# model.add(Activation("relu"))
# model.add(Dense(output_dim=10))
# model.add(Activation("softmax"))
# ```
#
# Once your model looks good, configure its learning process with
# ``.compile()``:
#
# ```Python
# model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# ```
# If you need to, you can further configure your optimizer.
#
# ```Python
# from keras.optimizers import SGD
# model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9, nesterov=True))
# ```
# You can now iterate on your training data in batches:
#
# ```Python
# model.fit(X_train, Y_train, nb_epoch=5, batch_size=32)
# ```
#
# Evaluate your performance in one line:
# ```Python
# loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)
# ```
# Or generate predictions on new data:
#
# ```Python
# classes = model.predict_classes(X_test, batch_size=32)
# proba = model.predict_proba(X_test, batch_size=32)
# ```
# ### RNN in Keras
#
# Whenever you train or test your LSTM/GRU, you first have to build your input matrix $X$ of shape ``nb_samples``, ``timesteps``, ``input_dim`` where your batch size divides ``nb_samples``.
#
# For instance, if ``nb_samples=1024`` and ``batch_size=64``, it means that your model will receive blocks of 64 samples, compute each output (whatever the number of timesteps is for every sample), average the gradients and propagate it to update the parameters vector.
#
# > By default, **Keras shuffles (permutes) the samples in $X$** and the dependencies between $X_i$ and $X_{i+1}$ are lost.
#
# With the stateful model, all the states are propagated to the next batch. It means that the state of the sample located at index $i$, $X_i$, will be used in the computation of the sample $X_{i+bs}$ in the next batch, where $bs$ is the batch size (no shuffling).
#
# > Keras requires the batch size in ``stateful`` mode and ``shuffle=False``.
# +
'''Example script showing how to use stateful RNNs
to model long sequences efficiently.
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM, GRU
# %matplotlib inline
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Generating Data')
cos = gen_cosine_amp()
print('Input shape:', cos.shape)
expected_output = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_output.shape)
print("Sample: ",cos[0], expected_output[0])
plt.subplot(2, 1, 1)
plt.plot(expected_output)
plt.title('Expected')
plt.show()
# +
epochs = 25
print('Creating Model')
model = Sequential()
model.add(LSTM(50,
batch_input_shape=(batch_size, tsteps, 1),
return_sequences=True,
stateful=True))
model.add(LSTM(50,
batch_input_shape=(batch_size, tsteps, 1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='mse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit(cos,
expected_output,
batch_size=batch_size,
verbose=1,
nb_epoch=1,
shuffle=False)
model.reset_states()
print('Predicting')
predicted_output = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
plt.subplot(2, 1, 1)
plt.plot(predicted_output)
plt.title('Predicted')
plt.show()
# -
# ## Keras online demo
#
# https://transcranial.github.io/keras-js/#/
| 5.1 Recurrent Neural Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
# import packages
# %run ../../global_packages.py
# get the global parameters
# %run ../../global_pars.py
# import your local functions
sys.path.insert(1, '../../')
from global_functions import *
# make sure the figures plot inline rather than at the end
# %matplotlib inline
# -
# # Paths and Parameters
# +
outfn = 'chl_processed.nc'
# get data from tigress
inpath = 'raw_data/concatenated/'
infn = 'ESACCI-OC-L3S-CHLOR_A-MERGED-1M_MONTHLY_4km_GEO_PML_OCx-fv5.0_19970904_20200601.nc'
# -
# # Get Data and Subset
# +
ds = xr.open_dataset(inpath + infn)
# Subset ------------------------------------------------#
lat_slice = slice(lat_bounds[0], lat_bounds[1])
lon_slice = slice(lon_bounds[0], lon_bounds[1])
time_slice = slice(ts,te)
ds = ds.sel(lat=lat_slice,lon=lon_slice, time = time_slice)
# Get data, selecting lat/lon slice
mon_chl = ds['chlor_a'].sel(lat=lat_slice,lon=lon_slice, time = time_slice)
lat = mon_chl.lat.values
lon = mon_chl.lon.values
# -
# # Find Climatologies
# Resources: [link](http://xarray.pydata.org/en/stable/examples/monthly-means.html)
mon_chl_mon_clim = mon_chl.groupby('time.month').mean('time')
# # Find Anomalies
# %%time
# monthly avg data - monthly climatology
mon_chl_mon_anom = mon_chl.groupby('time.month') - mon_chl_mon_clim
# +
# convert to xarray dataset
ds=xr.Dataset(coords={'lon': mon_chl.lon,
'lat': mon_chl.lat,
'time': mon_chl.time})
# add variables to dataset
ds["mon_chl"]=xr.DataArray(mon_chl,dims = ['time','lat', 'lon'],
coords =[mon_chl.time,mon_chl.lat,mon_chl.lon])
# clim
ds["mon_chl_mon_clim"]=xr.DataArray(mon_chl_mon_clim,dims = ['month','lat', 'lon'],
coords =[mon_chl_mon_clim.month,mon_chl.lat,mon_chl.lon])
# anom
ds["mon_chl_mon_anom"]=xr.DataArray(mon_chl_mon_anom,dims = ['time','lat', 'lon'],
coords =[mon_chl_mon_anom.time,mon_chl.lat,mon_chl.lon])
# +
# delete if already present
if os.path.isfile(outfn):
os.remove(outfn)
ds.to_netcdf(outfn,mode='w',format = "NETCDF4")
ds
# -
| data_processing/4_CHL/0_process_chl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Homework 2 - BLP Estimation
# Solutions to ECO 384k Problem set 2 at the Universtiy of Texas by <NAME>
#
# ## Question 1 - Estimation
import pandas as pd
import math
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import fsolve
import statsmodels.api as sm
from scipy.stats import norm
from statsmodels.sandbox.regression.gmm import GMM
# ### Data setup
#
# Below I write functions to set up the data. The most important is `setup_hausman`. This function calculates the instruments for the estimation. I use 2. I use the mean price for product $i$ in other markets following Hausman. I also use the mean characteristics of the other products $j \neq i$ in market $m$ following BLP.
# +
def comp_outside_good(data,name):
"""pre-processing to calculate outside good shares"""
shares = data[['Market_ID',name]].copy()
group_shares = shares.groupby('Market_ID').sum()
group_shares['Outside Good Share'] = 1 - group_shares[name]
data = pd.merge(data,group_shares[['Outside Good Share']],
right_index=True, left_on = 'Market_ID')
return data
data = pd.read_csv('data.csv')
data = comp_outside_good(data,'Inside Good Share')
# +
def setup_data(data):
"""simplify setting up data correctly"""
#set up x and y
y = data[['Inside Good Share','Outside Good Share']]
x = data[['Network Score','Satisfaction Score','PPO','Premium']]
return x,y
def setup_hausman(data):
#calculate hausmann insturments
price = data['Premium']
mkt_dum = pd.get_dummies(data['Market_ID'],prefix='mkt',drop_first=True)
plan_dum = pd.get_dummies(data['Plan_ID'],prefix='plan',drop_first=True)
exog = np.array( data[['Network Score','Satisfaction Score', 'PPO']])
# number of other products (in different mkts)
hausman_instr = data[['Market_ID','Plan_ID']].groupby('Plan_ID').count()
hausman_instr = pd.merge(data[['Market_ID','Plan_ID']],
hausman_instr[['Market_ID']], right_index=True, left_on = 'Plan_ID')
hausman_instr = np.array([hausman_instr['Market_ID_y']]).transpose()
#calc avg price in other markets
hausman_instr2 = data[['Plan_ID','Premium']].groupby('Plan_ID').mean()
hausman_instr2 = pd.merge(data[['Plan_ID']],
hausman_instr2[['Premium']], right_index=True, left_on = 'Plan_ID')
hausman_instr2 = np.array(hausman_instr2)[:,-1:]
hausman_instr2 = (hausman_instr2*hausman_instr - exog[:,-1:])/(hausman_instr-1)
#no of competitors
BLP_instr = data[['Market_ID','Plan_ID']].groupby('Market_ID').count()
BLP_instr = pd.merge(data[['Market_ID','Plan_ID']],
BLP_instr[['Plan_ID']], right_index=True, left_on = 'Market_ID')
BLP_instr = np.array([BLP_instr['Plan_ID_y']]).transpose()
#average characteristics among competititors
BLP_instr2 = data[['Market_ID','Network Score','Satisfaction Score']].groupby('Market_ID').mean()
BLP_instr2 = pd.merge(data[['Market_ID']],
BLP_instr2[['Network Score','Satisfaction Score']], right_index=True, left_on = 'Market_ID')
BLP_instr2 = (np.array(BLP_instr2)[:,1:]*BLP_instr - exog[:,:-1])/(BLP_instr-1)
#sum the characteristics together?
BLP_instr2 = BLP_instr2.sum(axis=1).reshape((3300,1))
#concat hausman instr with exog variables
instr = np.concatenate( (exog, hausman_instr2, BLP_instr2), axis =1 )
return instr
#pre process for testing
x,y = setup_data(data)
z = setup_hausman(data)
X,Z = np.array(x), np.array(z)
V = np.linalg.inv( z.transpose().dot(z) ) #set up initial weight matrix
# +
#set up useful global variables
NMKTS = data['Market_ID'].nunique()
NPLANS = data['Plan_ID'].nunique()
NOBS = data['Plan_ID'].count()
NSIM = 50
theta1 = np.array([4,1.5,.7,-1.5])
theta2 = np.array([2,2,1]) # initialize theta2 for testing purposes
delta = np.ones(NOBS)*(-2)
#print global variables
print NMKTS,NPLANS,NOBS
# -
# I set up my draws at the market level, so if observation $i$ and $j$ are in the same
# market, they have the same random draw.
#
# If they are in different markets, their draws will be different as well.
# +
#set up random draws v
def gen_newsim():
mkt_ids = np.array(data['Market_ID'])
v = [0]*NOBS
for mkt in range(1,601):
v_i = np.random.normal(size=(3,NSIM))
for i in range(NOBS):
if mkt_ids[i] == mkt:
v[i] = v_i
return np.array(v).transpose()
v = gen_newsim()
#np.savetxt("simulations.csv", v.reshape(3*NSIM,3300), delimiter=",")
# -
#use same simulations each time
v = np.genfromtxt('simulations.csv', delimiter=',').reshape(NSIM,3,3300)
# ### Estimating coefficients
# #### Calculating $\delta_{jt}$, $\xi_{jt}$
#
# The first part of the estimation involves calculating the mean utility with the BLP inversion and the mean unobservable. I follow Nevo (2000) for the calculation
# +
def cal_sim_s(data, v, delta, theta2):
"""calculate market share for each simulated consumer"""
#copy x and delta for simulations using tiling
x = np.array(data.copy()[['Network Score','Satisfaction Score','PPO']]).transpose()
x = np.tile(x,(NSIM,1,1))
theta2 = np.tile( np.array([theta2]).transpose() ,(NSIM,1,3300))
delta = np.tile( delta ,(NSIM,1))
# Compute the numerator for each market
sim_exp = pd.DataFrame( np.exp(delta + (theta2*v*x).sum(axis=1)).transpose() )
#sum up between markets
sim_exp['mkt_id'] = data['Market_ID']
sum_exp = sim_exp.groupby('mkt_id').sum()
sum_exp = pd.merge(data.copy()[['Market_ID']], sum_exp,
right_index=True, left_on = 'Market_ID')
#format so I can broadcast
sim_exp = np.array(sim_exp).transpose()[:-1]
sum_exp = np.array(sum_exp).transpose()[1:] + 1
return sim_exp/sum_exp
def cal_s(data, v, delta, theta2):
"""Calculate market share
Calculates individual choice probability first, then take sum"""
shares = (1./NSIM)*cal_sim_s(data, v, delta, theta2).sum(axis=0)
return shares
# -
def cal_delta(data, v, theta2, error = 1e-3, maxiter = 500):
"""Calculate mean utility via contraction mapping
described in BLP 1995"""
niter = 0
#initialize loop parameters
delta = np.zeros(NOBS)
s = cal_s(data, v, delta, theta2)
diff = np.log(data['Inside Good Share']) - np.log(s)
while ((abs(diff).max() > 1e-10) #this is easier to converge
and (abs(diff).mean() > error)
and niter < maxiter):
s = cal_s(data, v, delta, theta2)
diff = np.log(data['Inside Good Share']) - np.log(s)
if np.isnan(diff).sum():
raise Exception('nan in diffs')
delta += diff
niter += 1
return delta
def cal_xi(data, delta, theta1):
"""Calculate xi (i.e. mean 'unexplained' utility) with F.O.C."""
x,y = setup_data(data)
explained = np.matmul(np.array(x),theta1)
xi = delta - explained
return xi
# #### Calculating $\theta_1,\theta_2$
#
# Here the linear parameter $\theta_1 = (\alpha, \beta)$
#
# I only solve GMM over $\theta_2$, the non-linear parameters. $\theta_1$ is calculated as a function of $\delta$ using the formula from Nevo 2000
#
# $$\hat{\theta_1} = (X'Z V^{-1} Z'X)^{-1} X'Z V^{-1} Z' \delta(\hat{\theta}_2) $$
#
#
# I calculate the covariance matrix for GMM following chapter 12 in Hansen's Econometrics textbook.
#
# Specifically, I used the formula:
#
# $$\hat{V} = \dfrac{1}{n} \sum_n z_i z_i' \hat{\xi}_i^2 - \overline{g}_n \overline{g}_n'$$
#
# Where $$\bar{g}_n = \dfrac{1}{n} \sum_n z_i \hat{\xi_i} $$
def calc_var(data, xi):
"""calculate optimal covariance matrix
for GMM """
x,y = setup_data(data)
z = setup_hausman(data)
X,Z = np.array(x), np.array(z)
if xi.shape == (3300,):
xi = np.array([xi]).transpose()
gn = Z.transpose().dot(xi)
gn = gn.dot(gn.transpose())
gn2 = np.zeros((5,5))
for i in range(NOBS):
Zi = np.array([Z[i]])
gn2 = gn2 + Zi.transpose().dot(Zi) *(xi[i])**2
return (1./NOBS)*(gn2 - gn)
def cal_theta1(data, delta, z, V):
""" calculate theta_1 using FOCs
(X1'Z T Z'X )^-1 X1'Z T Z' delta """
#set up variables
x,y = setup_data(data)
X,Z = np.array(x), np.array(z)
#build up to main equation
XtZ = X.transpose().dot(Z)
ZtX = Z.transpose().dot(X)
first_exp = np.linalg.inv( XtZ.dot(V).dot(ZtX))
second_exp = XtZ.dot(V).dot(Z.transpose()).dot(delta)
theta1 = first_exp.dot(second_exp)
return theta1
# +
def gmm_objective(theta2_init, data, v, z, V):
"""calculate the GMM objective and minimize it to find theta_2
I use the formula from Nevo 2000: w' z phi-1 z' w, of theta2"""
#set up variables
x,y = setup_data(data)
X,Z = np.array(x), np.array(z)
#do calculations
delta = cal_delta(data, v, theta2_init)
theta1 = cal_theta1(data, delta, z, V)
xi = cal_xi(data, delta, theta1)
xitZ = xi.transpose().dot(Z)
Ztxi = Z.transpose().dot(xi)
return xitZ.dot(V).dot(Ztxi)
def calc_theta2(data, v, z, T, theta2_init,NM=True):
"""calculate theta2 using scipy"""
if NM:
theta2 = minimize(gmm_objective, theta2_init, args=(data, v, z, T), method='Nelder-Mead',
options={'xatol': 0.001, 'fatol': 0.1, 'maxiter':100, 'disp': True})
else:
theta2 = minimize(gmm_objective, theta2_init, args=(data, v, z, T), method='BFGS',
options={'maxiter':100, 'disp': True})
return abs(theta2.x)
# +
theta2_init = np.array([2,2,1])
def calc_theta(data, v, theta2_init, stages=2):
"""put everything together to calculate theta1 and theta2"""
#initialize theta
x,y = setup_data(data)
z = setup_hausman(data)
X,Z = np.array(x), np.array(z)
theta2 = theta2_init
#on first step, use consistent approximation of V
V = np.linalg.inv( Z.transpose().dot(Z) )
for i in range(stages):
#on second use V using estimated xi
if i==1:
xi = cal_xi(data, delta, theta1)
xi =np.array([xi]).transpose()
V = np.linalg.inv( calc_var(data, xi) )
theta2 = calc_theta2(data, v, z, V, theta2)
delta = cal_delta(data, v, theta2)
theta1 = cal_theta1(data, delta, z, V)
return theta1, theta2
theta = calc_theta(data, v, theta2_init, stages=2)
# +
print '------------------------------------------------------------------'
print 'Mean Coefficients \n------------------------------------------------------------------'
labels1 = np.array(['Network Score','Satisfaction Score','PPO','Premium'])
print pd.DataFrame([labels1, theta[0]])
print '------------------------------------------------------------------'
print 'Variance Coefficients'
print '------------------------------------------------------------------'
print pd.DataFrame([labels1, theta[1]])
print '------------------------------------------------------------------'
# +
#save xi and write to array for counterfactuals
theta1_est, theta2_est = theta
delta_est = cal_delta(data, v, theta2_est)
xi_est = cal_xi(data, delta_est, theta1_est)
np.savetxt("xi.csv", xi_est, delimiter=",")
print theta
# -
# ### Calculating Standard Errors
#
# In order to calculate standard errors I used a numeric gradient.
#
# I tried following Nevo to calculate $\dfrac{\partial \delta_{jt}}{\partial \theta_l}$ using the implicit function theorem. However, I was unable to complete this do to time constraints
# +
def gradient_helper(theta1, theta2, data, v, z):
"""w' z phi z' w, this function computes
the perturbed value of the objective function"""
#set up variables
x,y = setup_data(data)
X,Z = np.array(x), np.array(z)
#do calculations
delta = cal_delta(data, v, theta2)
xi = cal_xi(data, delta, theta1)
return xi.transpose().dot(Z)
def gradient_numer(theta, data, v, z, h=1e-8):
"""This function cylces through the coefficients and perturbs them to
compute a numeric derivative"""
gamma = []
theta= np.concatenate(theta)
for i in range(len(theta)):
theta1 = theta[0:4]
theta2 = theta[4:]
fx = gradient_helper(theta1, theta2, data, v, z)
#perturb theta
theta_perturb = theta
theta_perturb[i] = theta_perturb[i] + h
theta1_perturb = theta_perturb[0:4]
theta2_perturb = theta_perturb[4:]
fx_plush = gradient_helper(theta1_perturb, theta2_perturb, data, v, z)
#calculate gradient
gamma_i = (fx_plush - fx)/h
gamma.append(gamma_i)
return np.array(gamma).transpose()
# -
# Below I have calculated standard errors using the formula $$(\Gamma' A \Gamma)^{-1}(\Gamma' A V A \Gamma)^{-1} (\Gamma' A \Gamma)^{-1}$$
#
# Where $\Gamma$ is a numeric approximation of the gradient $A$ is the initial weighting matrix and $V$ is the covaraince matrix (also the optimal weight matrix)
# +
def cal_standard_errors(theta, xi, data, v):
"""Put everything together to compute standard
errors"""
#setup variables
xi =np.array([xi]).transpose()
x,y = setup_data(data)
z = setup_hausman(data)
#set up weight matrices
X,Z = np.array(x), np.array(z)
V = calc_var(data, xi)
A = np.linalg.inv( Z.transpose().dot(Z) )
G = gradient_numer(theta, data, v, Z)
GAG_inv = np.linalg.inv( G.transpose().dot(A).dot(G) )
GAVAG = G.transpose().dot(A).dot(V).dot(A).dot(G)
return GAG_inv.dot(GAVAG).dot(GAG_inv)/NOBS
se = np.sqrt ( abs (cal_standard_errors(theta, xi_est, data, v) ) )/NOBS
se1 = np.diagonal(se)[:4]
se2 = np.diagonal(se)[4:]
# -
# Below we can see the standard errors calculated using the formula.
# +
print '------------------------------------------------------------------'
print 'Mean Coefficients (Standard Error) \n------------------------------------------------------------------'
labels1 = np.array(['Network Score','Satisfaction Score','PPO','Premium'])
print pd.DataFrame([labels1, se1])
print '------------------------------------------------------------------'
print 'Coefficients Variance (Standard Error)'
print '------------------------------------------------------------------'
print pd.DataFrame([labels1,se2])
print '------------------------------------------------------------------'
# -
| hw2_io/BLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="J5Q1gSRtIPol"
# # 中文字手寫圖片合成,基礎教學
# 將印刷體或開源手寫圖片,與任意背景圖片合成
# + colab={"base_uri": "https://localhost:8080/"} id="ock70pfiedzI" executionInfo={"status": "ok", "timestamp": 1627106423771, "user_tz": -480, "elapsed": 10456, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="7c429d96-1756-46c4-fb14-f9ec1e3dec06"
#@title 下載開源手寫影像資料
# !git clone https://github.com/AI-FREE-Team/Traditional-Chinese-Handwriting-Dataset.git
# !cat /content/Traditional-Chinese-Handwriting-Dataset/data/cleaned_data*.zip* > /content/Traditional-Chinese-Handwriting-Dataset/data/all_data.zip
# !unzip -q /content/Traditional-Chinese-Handwriting-Dataset/data/all_data.zip -d "/content"
# folder: "cleaned_data(50_50)"
# + id="TeBnGCurnWrr" executionInfo={"status": "ok", "timestamp": 1627106425005, "user_tz": -480, "elapsed": 1240, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}}
#@title 下載開源印刷體字型
# !wget -q https://github.com/googlefonts/noto-cjk/raw/main/Sans/OTF/TraditionalChinese/NotoSansCJKtc-Medium.otf
# !wget -q https://github.com/googlefonts/noto-cjk/raw/main/Sans/OTF/TraditionalChinese/NotoSansCJKtc-Bold.otf
# font: "NotoSansCJKtc-Medium.otf", "NotoSansCJKtc-Bold.otf"
# + colab={"base_uri": "https://localhost:8080/"} id="pffcyjvpmRG0" executionInfo={"status": "ok", "timestamp": 1627106425360, "user_tz": -480, "elapsed": 367, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="36047d6f-bfcc-4832-fe82-f346b1ab7eb5"
#@title 取得背景圖片範例
# !git clone https://github.com/KuanHaoHuang/tbrain-esun-handwriting-recognition/
# + colab={"base_uri": "https://localhost:8080/"} id="WKE_CE7ombBJ" executionInfo={"status": "ok", "timestamp": 1627106434393, "user_tz": -480, "elapsed": 9035, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="ecc463c1-f826-4db1-93fd-a8d76137cf44"
#@title 安裝套件
# !pip install -U albumentations --quiet
# + colab={"base_uri": "https://localhost:8080/"} id="uUzbTWFGmgdC" executionInfo={"status": "ok", "timestamp": 1627106434394, "user_tz": -480, "elapsed": 28, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="c4fe39e4-92d1-4d0e-b59e-62bca92ba5f0"
# !ls
# + id="ii3-ZPN_mtrY" executionInfo={"status": "ok", "timestamp": 1627106614314, "user_tz": -480, "elapsed": 344, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}}
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import random
import cv2
from skimage import io, transform
import albumentations as A
from pathlib import Path
RANDOM_SEED = 42
img_path = Path("cleaned_data(50_50)") # 手寫圖片
bg_path = Path("tbrain-esun-handwriting-recognition/images") # 範例背景圖片
# 函式:圖片統一大小
def resize(img, height=120, width=120):
resize = A.Resize(height=height, width=width)
return resize(image=img)['image']
# 函式:Data Augmentation
def transform_image(img):
trafo = A.Compose(
[
A.ShiftScaleRotate(p=1.0, shift_limit=0.15, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_REPLICATE),
A.GaussianBlur(blur_limit=(3, 5), p=0.1),
A.RGBShift(p=1, r_shift_limit=(6, 60), g_shift_limit=(6, 60), b_shift_limit=(6, 60)),
A.RandomBrightnessContrast(brightness_limit=0.05, contrast_limit=0.05, p=0.3)
]
)
return trafo(image=img)['image']
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="ptwYN-ilpMkl" executionInfo={"status": "ok", "timestamp": 1627106617365, "user_tz": -480, "elapsed": 880, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://<KEY>", "userId": "16324273688313666997"}} outputId="e8b2419f-e47b-4363-fb62-8999ba14c234"
#@title 範例:背景圖片
random.seed(RANDOM_SEED)
background_img = random.sample([f for f in bg_path.iterdir() if f.is_file()], 1)[0]
background_img = io.imread(background_img)
plt.imshow(background_img)
# + [markdown] id="BypjJHNVpFJ7"
# ## 教學 1: 合成印刷體圖片
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="H2Cnq0zeoJ0F" executionInfo={"status": "ok", "timestamp": 1627106620045, "user_tz": -480, "elapsed": 1072, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://<KEY>", "userId": "16324273688313666997"}} outputId="2a17738e-4c4a-4022-cee5-78fedd7e3e65"
#@title 範例:印刷體圖片
# 宣告字型與大小
font_type = 'NotoSansCJKtc-Bold.otf'
font_size = 50
font = ImageFont.truetype(font_type, font_size, encoding='utf-8')
# 設定圖片的畫框
frame = np.zeros((120, 120, 3), np.uint8)
frame[...] = 255. # 初始化:純白色底
frame = Image.fromarray(frame)
draw = ImageDraw.Draw(frame)
chr_position = (3, 12) # 印刷體出現在畫框的位置
chr_color = (0, 0, 0) # 印刷體顏色:純黑色
chr_content = "哈囉"
draw.text(chr_position, chr_content, chr_color, font=font)
img = np.array(frame)
print("印刷體")
plt.imshow(img)
plt.show()
print()
print("印刷體 + Data Augmentation")
plt.imshow(transform_image(img))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="THKyzmL3rNcn" executionInfo={"status": "ok", "timestamp": 1627106622340, "user_tz": -480, "elapsed": 529, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="d1a63575-aa69-47f4-f66f-815ff6bbcdec"
#@title 印刷體與背景合成
new_img = np.min(
[transform_image(resize(img)), resize(background_img)],
axis=0)
plt.imshow(new_img)
# + [markdown] id="wPufbZK-yCgo"
# ## 教學 2:合成手寫圖片
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="tvg0bKojx9aj" executionInfo={"status": "ok", "timestamp": 1627106633981, "user_tz": -480, "elapsed": 848, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="2079f52e-15ba-4837-eaea-3840dc7932bf"
#@title 範例:開源手寫圖片
random.seed(RANDOM_SEED)
handwrite_img = random.sample([f for f in img_path.iterdir() if f.is_file()], 1)[0]
handwrite_img = io.imread(handwrite_img)
plt.imshow(handwrite_img)
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="6B181OnYGoH1" executionInfo={"status": "ok", "timestamp": 1627106637313, "user_tz": -480, "elapsed": 389, "user": {"displayName": "\u9ec3\u51a0\u8c6a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjjS6kuYns9eyll_83s_PqybWSQ75IviqV86JQU=s64", "userId": "16324273688313666997"}} outputId="3c29ff9e-b7d0-4c37-eac9-23153c5cc16b"
#@title 手寫圖片與背景合成
new_img = np.min(
[transform_image(resize(handwrite_img)), resize(background_img)],
axis=0)
plt.imshow(new_img)
| make_handwriting_image_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook is to test the implementation of QuadHSIC.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import fsic.util as util
import fsic.data as data
import fsic.kernel as kernel
import fsic.indtest as it
import scipy.stats as stats
# +
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 16
}
plt.rc('font', **font)
plt.rc('lines', linewidth=2)
#matplotlib.rc('text', usetex=True)
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
# -
def get_quad_psfunc():
"""
Return a PairedSource to generate y = x^2 + Gaussian noise.
"""
px = lambda n: np.random.rand(n, 1)*8 - 4
f = lambda x: 0.2*x**2 + np.random.randn(x.shape[0], 1)
return data.PSFunc(f, px)
# +
# paired source
alpha = 0.05
n = 800
seed = 15
dx = 10
dy = 5
ps = data.PSIndSameGauss(dx, dy)
#ps = get_quad_psfunc()
#ps = data.PSIndUnif(xlb=[0, 3], xub=[1, 10], ylb=[-5, 5], yub=[8, 10])
pdata = ps.sample(n, seed=seed)
tr, te = pdata.split_tr_te(tr_proportion=0.5, seed=10)
# -
def kl_median(pdata):
"""
Get two Gaussian kernels constructed with the median heuristic.
"""
xtr, ytr = pdata.xy()
dx = xtr.shape[1]
dy = ytr.shape[1]
medx2 = util.sampled_median_distance(xtr, 1000)**2
medy2 = util.sampled_median_distance(ytr, 1000)**2
k = kernel.KGauss(medx2)
l = kernel.KGauss(medy2)
return k, l
# +
# number of test locations
k, l = kl_median(tr)
# perform test
n_permute = 20
qhsic = it.QuadHSIC(k, l, n_permute=n_permute, alpha=alpha)
qhsic.perform_test(te)
# -
# ## Check type-I errors, power
# +
alpha = 0.05
n = 800
n_permute = 100
repeats = 100
# data
ps = data.PSIndSameGauss(dx=2, dy=3)
pdata = ps.sample(n, seed=398)
#ps = get_quad_psfunc()
#pdata = ps.sample(n, seed=938)
tr, te = pdata.split_tr_te(tr_proportion=0.5, seed=11)
k, l = kl_median(tr)
# the test
qhsic = it.QuadHSIC(k, l, n_permute=n_permute, alpha=alpha)
# -
nte = 400
all_results = []
for r in range(repeats):
if (r+1)%10==0:
print('starting trial: %d'%(r+1))
te = ps.sample(nte, seed=r+2389)
test_result = qhsic.perform_test(te)
all_results.append(test_result)
pvalues = np.array([result['pvalue'] for result in all_results])
stats = np.array([result['test_stat'] for result in all_results])
prob_reject = np.mean(pvalues < alpha)
print('prob(reject H0) = %.4g'%prob_reject)
| tutorial/quad_hsic_test.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- # Day 12: <NAME>ail
inputLines = lines <$> readFile "input/day12.txt"
import qualified Data.Map as Map
import Data.Array
import Data.Char (isLetter)
import Data.Maybe (fromJust)
-- ## Data Types
-- The computer memory stores the values of the registers. It is implemented as a `Map` that maps the register name to the corresponding value.
type ComputerMemory = Map.Map String Int
-- The state of the computer consists of the instruction pointer (the integer index of the next instruction) and the memory.
type ComputerState = (Int, ComputerMemory)
-- An instruction maps a computer state to the next computer state.
type Instruction = ComputerState -> ComputerState
-- A program is an array of instructions.
type Program = Array Int Instruction
-- Asembunny instructions are made of an opcode and one or two operands. An operand is either a constant integer or a register value.
data Operand = Const Int | Register String deriving(Show)
-- ## Functions for handling operands
-- `getValue` determines the value of an operand. Access to the computer memory is needed if the operand is a register. If a register has not been written to, its value is zero.
getValue :: Operand -> ComputerMemory -> Int
getValue (Const value) _ = value
getValue (Register registerName) memory = Map.findWithDefault 0 registerName memory
-- `parseOperand` takes a `String` and returns the corresponding `Operand`.
-- +
parseOperand :: String -> Operand
parseOperand text
| isLetter $ head text = Register text
| otherwise = Const $ read text
-- Test if it works
map parseOperand ["-5", "d"] -- [Const (-5), Register "d"]
-- -
-- ## Functions that build the instructions
-- The following functions take one or two `Operands` and return the corresponding `Instruction`, i.e., the function that takes a computer state and returns the new state after the instruction is executed. Note that the instructions for all opcodes except for `jnz` increase the instruction pointer by one.
--
-- `cpy` writes the value of its first `Operand` to the second `Operand`, which must be a register
cpy :: Operand -> Operand -> Instruction
cpy source (Register registerName) (ip, memory) =
(succ ip, Map.insert registerName (getValue source memory) memory)
-- `modifyRegister` takes a function that modifies an `Int` and a register. The resulting instruction applies that modification to the register value.
modifyRegister :: (Int -> Int) -> Operand -> Instruction
modifyRegister modifier (Register registerName) (ip, memory) =
(succ ip,
Map.insert registerName (modifier $ Map.findWithDefault 0 registerName memory) memory)
-- `inc` and `dec` use `modifyRegister` to increase and decrease a register value, respectively.
inc :: Operand -> Instruction
inc = modifyRegister succ
dec :: Operand -> Instruction
dec = modifyRegister pred
-- `jnz` adds its first `Operand` to the instruction pointer, unless its second operand is zero.
jnz :: Operand -> Operand -> Instruction
jnz value offset (ip, memory)
| getValue value memory == 0 = (succ ip, memory)
| otherwise = (ip + getValue offset memory, memory)
-- ## Parsing instructions
-- `parseInstruction` takes a line of asembunny code and returns the corresponding `Instruction`.
parseInstruction :: String -> Instruction
parseInstruction line
| opcode `Map.member` unaryFunctions =
fromJust (opcode `Map.lookup` unaryFunctions)
(getSingleOperand operands)
| opcode `Map.member` binaryFunctions =
fromJust (opcode `Map.lookup` binaryFunctions)
(getFirstOperand operands)
(getSecondOperand operands)
where
tokens = words line
opcode = head tokens
operands = tail tokens
unaryFunctions = Map.fromList [("inc", inc), ("dec", dec)]
binaryFunctions = Map.fromList [("cpy", cpy), ("jnz", jnz)]
getSingleOperand [p] = parseOperand p
getFirstOperand [p1, p2] = parseOperand p1
getSecondOperand [p1, p2] = parseOperand p2
-- ## Verify that a few parsed instructions behave as expected
-- +
initialState = (42, Map.fromList [("a", 1)])
instructionsAndExpectedStates = [
("inc a", (43, Map.fromList [("a", 2)])),
("inc b", (43, Map.fromList [("a", 1), ("b", 1)])),
("dec c", (43, Map.fromList [("a", 1), ("c", -1)])),
("cpy a b", (43, Map.fromList [("a", 1), ("b", 1)])),
("cpy -5 d", (43, Map.fromList [("a", 1), ("d", -5)])),
("jnz a 10", (52, Map.fromList [("a", 1)])),
("jnz b 10", (43, Map.fromList [("a", 1)]))]
finalStates = map ((\f -> f initialState) . parseInstruction . fst) instructionsAndExpectedStates
expectedStates = map snd instructionsAndExpectedStates
all (uncurry (==)) $ zip finalStates expectedStates
-- -
-- ## Compile an asembunny program
compileProgram :: [String] -> Program
compileProgram sourceLines = listArray (0, lastIndex) $ map parseInstruction sourceLines
where
lastIndex = pred numberOfInstructions
numberOfInstructions = length sourceLines
-- ## Continue running a program in a specific computer state
-- This function takes a program, the final value of the instruction pointer that causes the program to terminate, and the current computer state. It returns the state that the computer memory will have after program termination.
continueExecution :: Program -> Int -> ComputerState -> ComputerMemory
continueExecution program finalIp state
| ip == finalIp = memory
| otherwise = continueExecution program finalIp newState
where
(ip, memory) = state
newState = currentInstruction state
currentInstruction = program ! ip
-- ## Run a program from the beginning until it terminates
-- The function takes the initial state of the computer memory and a program, and returns the computer memory after program termination.
runProgram :: ComputerMemory -> Program -> ComputerMemory
runProgram initialMemory program = continueExecution program finalIp initialState
where
(minIp, maxIp) = bounds program
finalIp = succ maxIp
initialState = (minIp, initialMemory)
-- ## Compile the given program
program = compileProgram <$> inputLines
-- ## Part 1: Run the program with uninitialized memory and read the value from register `"a"`
Map.lookup "a" . runProgram Map.empty <$> program
-- ## Part 2: Set the value of register `"c"` to 1
Map.lookup "a" . runProgram (Map.singleton "c" 1) <$> program
| 2016/day12-haskell.ipynb |