code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
trop_atmos = pd.read_csv('TAO_2006.csv')
trop_atmos.head()
# # A1- Dataset Size
A1= trop_atmos.shape
print('The total number of rows and columns are ' + str(A1))
# # A2- Max/Min values in each column
A2a = trop_atmos['PREC'].max()
print('The maximum value of Precipitation is ' + str(A2a))
A2b = trop_atmos['PREC'].min()
print('The minimum value of Precipitation is ' + str(A2b))
A2c = trop_atmos['AIRT'].max()
print('The maximum value of Air Temperature is ' + str(A2c))
A2d = trop_atmos['AIRT'].min()
print('The minimum value of Air Temperature is ' + str(A2d))
A2e = trop_atmos['SST'].max()
print('The maximum value of Sea Surface Temperature is ' + str(A2e))
A2f = trop_atmos['SST'].min()
print('The minimum value of Sea Surface Temperature is ' + str(A2f))
A2g = trop_atmos['RH'].max()
print('The maximum value of Relative Humidity is ' + str(A2g))
A2h = trop_atmos['RH'].min()
print('The minimum value of Relative Humidity is ' + str(A2h))
# # A3- Number of records in each month
trop_atmos['YYYYMMDD'] = pd.to_datetime(trop_atmos['YYYYMMDD'].astype(str), format = '%Y%m%d')
trop_atmos.head()
trop_atmos['Month']=trop_atmos['YYYYMMDD'].dt.month
trop_atmos.head()
trop_atmos['Month'].value_counts()
# # A4-Missing Values
column= ['PREC','SST', 'AIRT', 'RH']
cond1= trop_atmos['PREC']== -9.99
cond2= trop_atmos['SST']== -99.9
cond3= trop_atmos['AIRT']==-99.9
cond4= trop_atmos['RH']== -99.9
cond= cond1|cond2|cond3|cond4
trop_atmos[cond].count()
Set = trop_atmos[~cond]
Set['Month'].value_counts()
Set = trop_atmos[cond]
Set['Month'].value_counts()
D= set(trop_atmos.index).intersection(Set.index)
trop_atmos= trop_atmos.drop(D, axis=0)
trop_atmos.shape
trop_atmos.head()
# # A5. Investigating Sea surface temperature (SST) in different months
import matplotlib.pyplot as plt
# %matplotlib inline
trop_atmos.boxplot(column = 'SST', by = 'Month')
# # A6. Exploring precipitation measurements (PREC)
#
plt.scatter(trop_atmos['PREC'], trop_atmos['Timestamp'])
plt.xlabel('PREC')
plt.ylabel('Timestamp')
plt.show()
trop_atmos.loc[trop_atmos['PREC'] > 50, 'PREC'] = 0
trop_atmos.loc[trop_atmos['PREC'] < 0, 'PREC'] = 0
plt.scatter(trop_atmos['PREC'], trop_atmos['Timestamp'])
plt.xlabel('PREC')
plt.ylabel('Timestamp')
plt.show()
# # A7. Relationship between variables
import seaborn as sb
sb.pairplot(trop_atmos[['PREC', 'AIRT', 'SST']])
# +
import matplotlib.lines as mlines
model = LinearRegression()
x=trop_atmos['AIRT']
y=trop_atmos['RH']
x = x[:, np.newaxis]
y = y[:, np.newaxis]
model.fit(x, y)
plt.scatter(x, y)
y_pred = model.predict(x)
plt.plot(x, y_pred, color='r')
plt.xlabel('AIRT')
plt.ylabel('RH')
plt.show()
# -
trop_atmos.loc[trop_atmos['Q'] == 'HighQ', 'Q'] = 1
trop_atmos.loc[trop_atmos['Q'] == 'LowQ', 'Q'] = 0
#split dataset in features and target variable
features = ['PREC', 'AIRT', 'SST', 'RH']
X = trop_atmos[features] # Features
y = trop_atmos.Q # Target variable
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=1) # 70% training and 30% test
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(X_train, y_train)
# +
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
# +
from sklearn import metrics
prediction = clf.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, prediction))
# +
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test,prediction))
print("Accuracy:",(1+8663)/(1+8+12+8663))
# -
# # A9. Investigating daily relative humidity (RH)
#
trop_atmos['Day']=trop_atmos['YYYYMMDD'].dt.day
trop_atmos
# +
from sklearn.linear_model import LinearRegression
import numpy as np
x=trop_atmos['Day']
y=trop_atmos['RH']
x = x[:, np.newaxis]
y = y[:, np.newaxis]
model = LinearRegression()
model.fit(x, y)
y_pred = model.predict(x)
plt.scatter(x, y)
plt.plot(x, y_pred, color='r')
plt.show()
# -
| Assignment 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HPO with dask-ml and cuml
#
# ## Introduction
#
#       [Hyperparameter optimization](https://cloud.google.com/ai-platform/training/docs/hyperparameter-tuning-overview) is the task of picking the values for the hyperparameters of the model that provide the optimal results for the problem, as measured on a specific test dataset. This is often a crucial step and can help boost the model accuracy when done correctly. Cross-validation is often used to more accurately estimate the performance of the models in the search process. Cross-validation is the method of splitting the training set into complementary subsets and performing training on one of the subsets, then predicting the models performance on the other. This is a potential indication of how the model will generalise to data it has not seen before.
#
# Despite its theoretical importance, HPO has been difficult to implement in practical applications because of the resources needed to run so many distinct training jobs.
#
# The two approaches that we will be exploring in this notebook are :
#
#
# #### 1. GridSearch
#
#       As the name suggests, the "search" is done over each possible combination in a grid of parameters that the user provides. The user must manually define this grid.. For each parameter that needs to be tuned, a set of values are given and the final grid search is performed with tuple having one element from each set, thus resulting in a Catersian Product of the elements.
#
#      For example, assume we want to perform HPO on XGBoost. For simplicity lets tune only `n_estimators` and `max_depth`
#
#      `n_estimators: [50, 100, 150]`
#
#      `max_depth: [6, 7, ,8]`
#
#       The grid search will take place over |n_estimators| x |max_depth| which is 3 x 3 = 9. As you have probably guessed, the grid size grows rapidly as the number of parameters and their search space increases.
#
# #### 2. RandomSearch
#
#
#       [Random Search](http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf) replaces the exhaustive nature of the search from before with a random selection of parameters over the specified space. This method can outperform GridSearch in cases where the number of parameters affecting the model's performance is small (low-dimension optimization problems). Since this does not pick every tuple from the cartesian product, it tends to yield results faster, and the performance can be comparable to that of the Grid Search approach. It's worth keeping in mind that the random nature of this search means, the results with each run might differ.
#
# Some of the other methods used for HPO include:
#
# 1. Bayesian Optimization
#
# 2. Gradient-based Optimization
#
# 3. Evolutionary Optimization
#
# To learn more about HPO, some papers are linked to at the end of the notebook for further reading.
#
# Now that we have a basic understanding of what HPO is, let's discuss what we wish to achieve with this demo. The aim of this notebook is to show the importance of hyper parameter optimisation and the performance of dask-ml GPU for xgboost and cuML-RF.
#
# For this demo, we will be using the [Airline dataset](http://kt.ijs.si/elena_ikonomovska/data.html). The aim of the problem is to predict the arrival delay. It has about 116 million entries with 13 attributes that are used to determine the delay for a given airline. We have modified this problem to serve as a binary classification problem to determine if the airline will be delayed (True) or not.
#
# Let's get started!
import warnings
warnings.filterwarnings('ignore') # Reduce number of messages/warnings displayed
# +
import time
import numpy as np
import cupy as cp
import pandas as pd
import cudf
import cuml
import rmm
import xgboost as xgb
import sklearn.model_selection as sk
import dask_ml.model_selection as dcv
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
from sklearn import datasets
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score as sk_acc
from cuml.neighbors import KNeighborsClassifier
from cuml.ensemble import RandomForestClassifier
from cuml.preprocessing.model_selection import train_test_split
from cuml.metrics.accuracy import accuracy_score
import os
from urllib.request import urlretrieve
import gzip
# -
# ### Spinning up a CUDA Cluster
#
# We start a local cluster and keep it ready for running distributed tasks with dask.
#
#
# [LocalCUDACluster](https://github.com/rapidsai/dask-cuda) launches one Dask worker for each GPU in the current systems. It's developed as a part of the RAPIDS project.
# Learn More:
# - [Setting up Dask](https://docs.dask.org/en/latest/setup.html)
# - [Dask Client](https://distributed.dask.org/en/latest/client.html)
# +
cluster = LocalCUDACluster(dashboard_address="127.0.0.1:8005")
client = Client(cluster)
client
# -
# ## Data Preparation
#
# We download the Airline dataset and save it to local directory specific by `data_dir` and `file_name`. In this step, we also want to convert the input data into appropriate dtypes. For this, we will use the `prepare_dataset` function
data_dir = '~/rapids_hpo/data/'
file_name = 'airlines.orc'
orc_name = os.path.join(data_dir, file_name)
def prepare_dataset():
global file_path, data_dir
url = 'https://rapids-csp.s3-us-west-2.amazonaws.com/data/airline_20000000.orc'
if os.path.isfile(orc_name):
print(f" > File already exists. Ready to load at {orc_name}")
else:
# Ensure folder exists
os.makedirs(data_dir, exist_ok=True)
def data_progress_hook(block_number, read_size, total_filesize):
if (block_number % 1000) == 0:
print(
f" > percent complete: { 100 * ( block_number * read_size ) / total_filesize:.2f}\r",
end="",
)
return
urlretrieve(
url= url,
filename=orc_name,
reporthook=data_progress_hook,
)
print(f" > Download complete {file_name}")
input_cols = ["Year", "Month", "DayofMonth", "DayofWeek", "CRSDepTime", "CRSArrTime",
"UniqueCarrier", "FlightNum", "ActualElapsedTime", "Origin", "Dest",
"Distance", "Diverted"]
dataset = cudf.read_orc(orc_name)
# encode categoricals as numeric
for col in dataset.select_dtypes(["object"]).columns:
dataset[col] = dataset[col].astype("category").cat.codes.astype(np.int32)
# cast all columns to int32
for col in dataset.columns:
dataset[col] = dataset[col].astype(np.float32) # needed for random forest
# put target/label column first [ classic XGBoost standard ]
output_cols = ["ArrDelayBinary"] + input_cols
dataset = dataset.reindex(columns=output_cols)
return dataset
df = prepare_dataset()
import time
from contextlib import contextmanager
# Helping time blocks of code
@contextmanager
def timed(txt):
t0 = time.time()
yield
t1 = time.time()
print("%32s time: %8.5f" % (txt, t1 - t0))
# Define some default values to make use of across the notebook for a fair comparison
N_FOLDS = 5
N_ITER = 25
label = 'ArrDelayBinary'
# ## Splitting Data
#
# We split the data randomnly into train and test sets using the [cuml train_test_split](https://rapidsai.github.io/projects/cuml/en/0.12.0/api.html#cuml.preprocessing.model_selection.train_test_split) and create CPU versions of the data.
X_train, X_test, y_train, y_test = train_test_split(df, label,
test_size=0.2)
# +
X_cpu = X_train.to_pandas()
y_cpu = y_train.to_array()
X_test_cpu = X_test.to_pandas()
y_test_cpu = y_test.to_array()
# -
# ## Setup Custom cuML scorers
#
# The search functions (such as GridSearchCV) for scikit-learn and dask-ml expect the metric functions (such as accuracy_score) to match the “scorer” API. This can be achieved using the scikit-learn's [make_scorer](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) function.
#
# We will generate a `cuml_scorer` with the cuML `accuracy_score` function. You'll also notice an `accuracy_score_wrapper` which primarily converts the y label into a `float32` type. This is because some cuML models only accept this type for now and in order to make it compatible, we perform this conversion.
#
# We also create helper functions for performing HPO in 2 different modes:
# 1. `gpu-grid`: Perform GPU based GridSearchCV
# 2. `gpu-random`: Perform GPU based RandomizedSearchCV
# +
def accuracy_score_wrapper(y, y_hat):
"""
A wrapper function to convert labels to float32,
and pass it to accuracy_score.
Params:
- y: The y labels that need to be converted
- y_hat: The predictions made by the model
"""
y = y.astype("float32") # cuML RandomForest needs the y labels to be float32
return accuracy_score(y, y_hat, convert_dtype=True)
accuracy_wrapper_scorer = make_scorer(accuracy_score_wrapper)
cuml_accuracy_scorer = make_scorer(accuracy_score, convert_dtype=True)
# -
def do_HPO(model, gridsearch_params, scorer, X, y, mode='gpu-Grid', n_iter=10):
"""
Perform HPO based on the mode specified
mode: default gpu-Grid. The possible options are:
1. gpu-grid: Perform GPU based GridSearchCV
2. gpu-random: Perform GPU based RandomizedSearchCV
n_iter: specified with Random option for number of parameter settings sampled
Returns the best estimator and the results of the search
"""
if mode == 'gpu-grid':
print("gpu-grid selected")
clf = dcv.GridSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer)
elif mode == 'gpu-random':
print("gpu-random selected")
clf = dcv.RandomizedSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer,
n_iter=n_iter)
else:
print("Unknown Option, please choose one of [gpu-grid, gpu-random]")
return None, None
res = clf.fit(X, y)
print("Best clf and score {} {}\n---\n".format(res.best_estimator_, res.best_score_))
return res.best_estimator_, res
def print_acc(model, X_train, y_train, X_test, y_test, mode_str="Default"):
"""
Trains a model on the train data provided, and prints the accuracy of the trained model.
mode_str: User specifies what model it is to print the value
"""
y_pred = model.fit(X_train, y_train).predict(X_test)
score = accuracy_score(y_pred, y_test.astype('float32'), convert_dtype=True)
print("{} model accuracy: {}".format(mode_str, score))
X_train.shape
# ## Launch HPO
#
# We will first see the model's performances without the gridsearch and then compare it with the performance after searching.
#
# ### XGBoost
#
# To perform the Hyperparameter Optimization, we make use of the sklearn version of the [XGBClassifier](https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).We're making use of this version to make it compatible and easily comparable to the scikit-learn version. The model takes a set of parameters that can be found in the documentation. We're primarily interested in the `max_depth`, `learning_rate`, `min_child_weight`, `reg_alpha` and `num_round` as these affect the performance of XGBoost the most.
#
# Read more about what these parameters are useful for [here](https://xgboost.readthedocs.io/en/latest/parameter.html)
#
# #### Default Performance
#
# We first use the model with it's default parameters and see the accuracy of the model. In this case, it is 84%
# +
model_gpu_xgb_ = xgb.XGBClassifier(tree_method='gpu_hist')
print_acc(model_gpu_xgb_, X_train, y_cpu, X_test, y_test_cpu)
# -
# #### Parameter Distributions
#
# The way we define the grid to perform the search is by including ranges of parameters that need to be used for the search. In this example we make use of [np.arange](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html) which returns an ndarray of even spaced values, [np.logspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.logspace.html#numpy.logspace) returns a specified number of ssamples that are equally spaced on the log scale. We can also specify as lists, NumPy arrays or make use of any random variate sample that gives a sample when called. SciPy provides various functions for this too.
# +
# For xgb_model
model_gpu_xgb = xgb.XGBClassifier(tree_method='gpu_hist')
# More range
params_xgb = {
"max_depth": np.arange(start=3, stop = 15, step = 3), # Default = 6
"alpha" : np.logspace(-3, -1, 5), # default = 0
"learning_rate": [0.05, 0.1, 0.15], #default = 0.3
"min_child_weight" : np.arange(start=2, stop=10, step=3), # default = 1
"n_estimators": [100, 200, 1000]
}
# -
# #### RandomizedSearchCV
#
# We'll now try [RandomizedSearchCV](https://dask-ml.readthedocs.io/en/latest/modules/generated/dask_ml.model_selection.RandomizedSearchCV.html).
# `n_iter` specifies the number of parameters points theat the search needs to perform. Here we will search `N_ITER` (defined earlier) points for the best performance.
# +
mode = "gpu-random"
with timed("XGB-"+mode):
res, results = do_HPO(model_gpu_xgb,
params_xgb,
cuml_accuracy_scorer,
X_train,
y_cpu,
mode=mode,
n_iter=N_ITER)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))
# -
print_acc(res, X_train, y_cpu, X_test, y_test_cpu, mode_str=mode)
# +
mode = "gpu-grid"
with timed("XGB-"+mode):
res, results = do_HPO(model_gpu_xgb,
params_xgb,
cuml_accuracy_scorer,
X_train,
y_cpu,
mode=mode)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))
# -
print_acc(res, X_train, y_cpu, X_test, y_test_cpu, mode_str=mode)
# ### Improved performance
#
# There's a 5% improvement in the performance.
#
# We notice that performing grid search and random search yields similar performance improvements even though random search used just 25 combination of parameters. We will stick to performing Random Search for the rest of the notebook with RF with the assumption that there will not be a major difference in performance if the ranges are large enough.
# ### Visualizing the Search
#
# Let's plot some graphs to get an understanding how the parameters affect the accuracy. The code for these plots are included in `cuml/experimental/hyperopt_utils/plotting_utils.py`
# #### Mean/Std of test scores
#
# We fix all parameters except one for each of these graphs and plot the effect the parameter has on the mean test score with the error bar indicating the standard deviation
from cuml.experimental.hyperopt_utils import plotting_utils
plotting_utils.plot_search_results(results)
# #### Heatmaps
# - Between parameter pairs (we can do a combination of all possible pairs, but only one are shown in this notebook)
# - This gives a visual representation of how the pair affect the test score
df_gridsearch = pd.DataFrame(results.cv_results_)
plotting_utils.plot_heatmap(df_gridsearch, "param_max_depth", "param_n_estimators")
# ## RandomForest
#
# Let's use RandomForest Classifier to perform a hyper-parameter search. We'll make use of the cuml RandomForestClassifier and visualize the results using heatmap.
# +
## Random Forest
model_rf_ = RandomForestClassifier()
params_rf = {
"max_depth": np.arange(start=3, stop = 15, step = 2), # Default = 6
"max_features": [0.1, 0.50, 0.75, 'auto'], #default = 0.3
"n_estimators": [100, 200, 500, 1000]
}
for col in X_train.columns:
X_train[col] = X_train[col].astype('float32')
y_train = y_train.astype("int32")
# -
print("Default acc: ",accuracy_score(model_rf_.fit(X_train, y_train).predict(X_test), y_test))
# +
mode = "gpu-random"
model_rf = RandomForestClassifier()
with timed("RF-"+mode):
res, results = do_HPO(model_rf,
params_rf,
cuml_accuracy_scorer,
X_train,
y_train,
mode=mode,
n_iter = N_ITER)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))
# -
print("Improved acc: ",accuracy_score(res.predict(X_test), y_test))
# +
df_gridsearch = pd.DataFrame(results.cv_results_)
plotting_utils.plot_heatmap(df_gridsearch, "param_max_depth", "param_n_estimators")
# -
# ## Conclusion and Next Steps
#
# We notice improvements in the performance for a really basic version of the GridSearch and RandomizedSearch. Generally, the more data we use, the better the model performs, so you are encouraged to try for larger data and broader range of parameters.
#
# This experiment can also be repeated with different classifiers and different ranges of parameters to notice how HPO can help improve the performance metric. In this example, we have chosen a basic metric - accuracy, but you can use more interesting metrics that help in determining the usefulness of a model. You can even send a list of parameters to the scoring function. This makes HPO really powerful, and it can add a significant boost to the model that we generate.
#
#
# #### Further Reading
#
# - [The 5 Classification Evaluation Metrics You Must Know](https://towardsdatascience.com/the-5-classification-evaluation-metrics-you-must-know-aa97784ff226)
# - [11 Important Model Evaluation Metrics for Machine Learning Everyone should know](https://www.analyticsvidhya.com/blog/2019/08/11-important-model-evaluation-error-metrics/)
# - [Algorithms for Hyper-Parameter Optimisation](http://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf)
# - [Forward and Reverse Gradient-Based Hyperparameter Optimization](http://proceedings.mlr.press/v70/franceschi17a/franceschi17a-supp.pdf)
# - [Practical Bayesian Optimization of Machine
# Learning Algorithms](http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf)
# - [Random Search for Hyper-Parameter Optimization](http://jmlr.csail.mit.edu/papers/volume13/bergstra12a/bergstra12a.pdf)
| dask/notebooks/HPO_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Aula 1 - Introdução Machine Learning
#
# ## Algoritmo K-NN
#
# ### Import Section
import scipy as sp
import pandas as pd
import numpy as np
import sklearn as sk
# ### Importar a base
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
# ### Carregando a base de dados e mostrando formato
X,y = load_digits(return_X_y=True)
print(X.shape)
# ### Treinamento e avaliação via *Holdout*
# seccionando a base de testes
X_train, X_test, y_train, y_test=train_test_split(X, y, test_size=.3, random_state=42, stratify=y)
# Declarando o modelo do classificador
# clf == classificador
clf=KNeighborsClassifier(n_neighbors=3)
# treinamento do modelo
clf=clf.fit(X_train, y_train)
# calculando precisão
score=clf.score(X_test, y_test)
# predição do modelo
predicted=clf.predict(X_test)
# calculando a matriz de confusão
matrix=confusion_matrix(y_test, predicted)
print('-'*100)
print(f'Predicted:\n{predicted}')
print('-'*100)
print(f'Matriz de confusão:\n{matrix}')
print('-'*100)
print(f'Score: {score}')
| aula1/aula1.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: iSetlX
// language: SetlX
// name: isetlx
// ---
// This program computes pi using a monte carlo algorithm which uses random()
//
// Essentially it creates random points in a coordinate system,
// with 0 <= x <=1 and 0 <= y <=1 and computes if they are within a unit circle
// with is centered at 0,0.
//
// number of points within this cirle devided by all points is around a
// quarter of pi
//
// +
// how many random points to create?
n := 50000;
// number of points found to be in the circle
inCircle := 0;
for (x in [1 .. n]) {
// is this random point in circle?
if ( sqrt(random() ** 2 + random() ** 2) <= 1) {
inCircle += 1;
}
}
// -
pseudoPi := 4 * inCircle / n;
print("pi := $ pseudoPi $ (or $ nDecimalPlaces(pseudoPi, 5) $), which is almost $mathConst(\"pi\")$");
| example_notebooks/montecarlo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chiamaka249/IgboNER/blob/main/Copy_of_igbobert4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="4amslz4Oo9vT" outputId="3b118b5b-51e6-4982-8c71-b00374991657"
# !wget -c https://github.com/IgnatiusEzeani/IGBONLP/raw/master/ig_monoling/text.zip
# !wget -c https://raw.githubusercontent.com/chiamaka249/lacuna_pos_ner/main/language_corpus/ibo/ibo.txt
# + id="qsSkvlYoplYP"
import zipfile
import os
def unzip(zipfilename):
try:
with zipfile.ZipFile(zipfilename, 'r') as zip_ref:
zip_ref.extractall(zipfilename[:-4])
return f"'{zipfilename}' unzipped!"
except FileNotFoundError:
print(f"Cannot find '{zipfilename}' file")
unzip("text.zip")
# !rm text.zip
# + id="ua0R2p31p8zA"
#copies the file "ibo.txt" to into the folder "text"
import shutil
newPath = shutil.copy('/content/ibo.txt', '/content/text')
# + id="fj4i2-9bp4U3"
# import os
#import shutil
dir_name = "/content/text"
text=""
for fname in os.listdir(dir_name):
fname = os.path.join(dir_name, fname)
with open(fname, "r", encoding="utf8") as datafile:
text = text+"\n"+datafile.read()
with open("data.txt", "w", encoding="utf8") as datafile:
datafile.write(text)
shutil.rmtree("text")
# + colab={"base_uri": "https://localhost:8080/"} id="QLhNvJubEowT" outputId="5c86f6d5-bde6-4d74-ff69-88a209205aef"
# We won't need TensorFlow here
# !pip uninstall -y tensorflow
# Install `transformers` from master
# !pip install git+https://github.com/huggingface/transformers
# !pip list | grep -E 'transformers|tokenizers'
# transformers version at notebook update --- 2.11.0
# tokenizers version at notebook update --- 0.8.0rc1
# + colab={"base_uri": "https://localhost:8080/"} id="TyraD86RE3QK" outputId="de1d258d-4a7d-452e-cb99-7218105c29e5"
# %%time
from pathlib import Path
from tokenizers import ByteLevelBPETokenizer
paths = [str(x) for x in Path(".").glob("**/*.txt")]
# Initialize a tokenizer
tokenizer = ByteLevelBPETokenizer()
# Customize training
tokenizer.train(files=paths, vocab_size=52_000, min_frequency=2, special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
# + colab={"base_uri": "https://localhost:8080/"} id="ro52g8BqFFfr" outputId="8e8fd11d-d8db-45e2-d550-3883c8ce310d"
# !mkdir igbo_bert4
tokenizer.save_model("igbo_bert4")
# + id="FekvedLrFR_t"
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
tokenizer = ByteLevelBPETokenizer(
"./igbo_bert4/vocab.json",
"./igbo_bert4/merges.txt",
)
# + id="E52LgvLWFbuq"
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)
# + colab={"base_uri": "https://localhost:8080/"} id="1Nm7h9ndFis2" outputId="193ffc74-b42f-44f7-d200-ccfa2cf52738"
tokenizer.encode("Simone gara ụka ụnyahụ guọ egwu ma ga-kwa taa.").tokens
# + colab={"base_uri": "https://localhost:8080/"} id="IxcYhcxRKROn" outputId="d0b8b9a3-1e8b-4a15-91bb-1ef8881fea4b"
# Check that we have a GPU
# !nvidia-smi
# + colab={"base_uri": "https://localhost:8080/"} id="XMe5vRLvG5Zb" outputId="4ba91ed3-3a8d-4f89-aff2-2196f09d0076"
# Check that PyTorch sees it
import torch
torch.cuda.is_available()
# + id="ZUtbKfFgG-Y3"
from transformers import RobertaConfig
config = RobertaConfig(
vocab_size=52_000,
max_position_embeddings=514,
num_attention_heads=12,
num_hidden_layers=6,
type_vocab_size=1,
)
# + id="OhCRKUJR65iJ"
#from google.colab import files
#files.upload()
# + id="ZIqujlqjHQnX"
from transformers import RobertaTokenizerFast
tokenizer = RobertaTokenizerFast.from_pretrained("./igbo_bert4", max_len=512)
# + id="Jvup7wl8Hhp9"
from transformers import RobertaForMaskedLM
model = RobertaForMaskedLM(config=config)
# + colab={"base_uri": "https://localhost:8080/"} id="yyWUosTlHnRE" outputId="5e63c5f0-6bc2-41ba-9eb7-1bdae9e34364"
model.num_parameters()
# => 83 million parameters
# + colab={"base_uri": "https://localhost:8080/"} id="hujpbn1oHp-x" outputId="40c89d1d-e273-423c-ec36-494490ba9b4b"
# %%time
from transformers import LineByLineTextDataset
dataset = LineByLineTextDataset(
tokenizer = tokenizer,
file_path = "/content/data.txt",
block_size = 128
)
# + id="EICtSzqwH618"
from transformers import DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=0.15
)
# + id="kalucrRPH9wb"
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./igbo_bert4",
overwrite_output_dir=True,
num_train_epochs=5,
per_gpu_train_batch_size=64,
save_steps=10_000,
save_total_limit=2,
prediction_loss_only=True,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=dataset,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="vCWG7CRZIOkb" outputId="82e3f24f-26f3-4338-d87b-a991910e94d4"
# %%time
trainer.train()
# + id="dkwVjpAyIVu9" colab={"base_uri": "https://localhost:8080/"} outputId="1ad165c7-1501-49dd-892f-628dddbe2b72"
trainer.save_model("./igbo_bert4")
# + id="ZYx9FK7ZIYZs" colab={"base_uri": "https://localhost:8080/"} outputId="83c58459-1e02-4069-864e-b4136d535792"
from transformers import pipeline
fill_mask = pipeline(
"fill-mask",
model="./igbo_bert4",
tokenizer="./igbo_bert4"
)
# + id="lQK6jyf9IkRY" colab={"base_uri": "https://localhost:8080/"} outputId="8ee07d65-4f62-4d0b-f886-3bf27c643ab8"
# The sun <mask>.
# =>
fill_mask("Abụ m Maazị <mask>.") #= okafor/Ọkafọ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# + id="D0mje0nMIoWX" colab={"base_uri": "https://localhost:8080/"} outputId="773f5d2f-5d16-4c7f-825f-308271b7e9ac"
# The sun <mask>.
# =>
fill_mask("Nwaanyị na <mask> ji na akara.") #= eri
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# + id="Sr9wjE8PItpE" colab={"base_uri": "https://localhost:8080/"} outputId="fc319676-dbcb-41a1-8bf4-4d0c551a66b0"
# The sun <mask>.
# =>
fill_mask("Chineke ga- ebibikwa ndị niile na- eme ihe <mask>.") #=ọjọọ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# + id="ubR7pCxJIyNR" colab={"base_uri": "https://localhost:8080/"} outputId="a14301b9-55f0-4744-b66e-b16f582e086f"
fill_mask("ọba akwụkwọ Ọkammụta Kenneth Dike dị <mask>.") #n'Awka
# This is the beginning of a beautiful <mask>.
# =>
# + colab={"base_uri": "https://localhost:8080/"} id="cgveMBQYNZV7" outputId="7048656e-bc72-4c7f-dfc3-a5ab727a82fb"
# The sun <mask>.
# =>
fill_mask("Nwaanyị na eri <mask> na akara.") #= ji
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# + colab={"base_uri": "https://localhost:8080/"} id="VRbvsFIyNpID" outputId="17053230-b060-42bb-aac8-8fb116cef776"
# The sun <mask>.
# =>
fill_mask("Ọ bụ <mask>a ka a na- arịa .") #= mmadụ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# + id="W6O-Q8D7g5eY"
import shutil
# + id="hKOLSz2YI4Rv" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7e771839-14ef-4956-b021-3d815ef2ad60"
shutil.make_archive("/content/igbo_bert4", 'zip', "igbo_bert4")
# + colab={"base_uri": "https://localhost:8080/"} id="Hddc-BufgXzf" outputId="a4ead454-8ab4-48ca-f58c-ecbf54a1c363"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="vfeHNovnVuNP"
model_save_name = '/igbo_bert4.zip'
path = F"/content/gdrive/My Drive/{model_save_name}"
torch.save(model.state_dict(), path)
# + id="SX7kbLAcJJ6I" colab={"base_uri": "https://localhost:8080/", "height": 310} outputId="de0575e6-7467-48f4-b5c4-6319e410f243"
from google.colab import files
files.download("/content/igbo_bert4.zip")
| Copy_of_igbobert4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:40.288812", "end_time": "2020-10-29T03:01:40.303848", "duration": 0.015036, "status": "completed"} tags=[]
# # Time Series
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:40.316815", "end_time": "2020-10-29T03:01:42.747896", "duration": 2.431081, "status": "completed"} tags=[]
import pandas as pd
import matplotlib.pyplot as plt
import data_describe as dd
from data_describe.core.time import plot_autocorrelation, stationarity_test
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:42.759857", "end_time": "2020-10-29T03:01:43.151022", "duration": 0.391165, "status": "completed"} tags=[]
df = pd.read_csv("https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-total-female-births.csv")
df['Date'] = pd.to_datetime(df.Date, unit='ns')
df['Births_Multiplier'] = df['Births'] * 1.16
df.set_index("Date", inplace=True)
df.head(2)
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:43.164001", "end_time": "2020-10-29T03:01:43.177034", "duration": 0.013033, "status": "completed"} tags=[]
# ## Plot time series
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:43.189023", "end_time": "2020-10-29T03:01:43.956994", "duration": 0.767971, "status": "completed"} tags=[]
dd.plot_time_series(df, col="Births")
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:43.972994", "end_time": "2020-10-29T03:01:43.989994", "duration": 0.017, "status": "completed"} tags=[]
# ## Plot interactive time series
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:44.006043", "end_time": "2020-10-29T03:01:48.354060", "duration": 4.348017, "status": "completed"} tags=[]
dd.plot_time_series(df, col=["Births","Births_Multiplier"], viz_backend="plotly" )
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:48.384058", "end_time": "2020-10-29T03:01:48.412088", "duration": 0.02803, "status": "completed"} tags=[]
# ## Plot decomposition
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:48.440111", "end_time": "2020-10-29T03:01:50.828061", "duration": 2.38795, "status": "completed"} tags=[]
dd.plot_time_series(df, col="Births", decompose=True)
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:50.860061", "end_time": "2020-10-29T03:01:51.144089", "duration": 0.284028, "status": "completed"} tags=[]
dd.plot_time_series(df, col="Births", decompose=True, viz_backend="plotly")
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:51.195066", "end_time": "2020-10-29T03:01:51.245058", "duration": 0.049992, "status": "completed"} tags=[]
# ## Perform Stationarity Tests
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:51.296063", "end_time": "2020-10-29T03:01:51.619060", "duration": 0.322997, "status": "completed"} tags=[]
stationarity_test(df, col='Births', test="dickey-fuller")
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:51.673060", "end_time": "2020-10-29T03:01:51.760063", "duration": 0.087003, "status": "completed"} tags=[]
# ## Plot ACF
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:51.818060", "end_time": "2020-10-29T03:01:52.753062", "duration": 0.935002, "status": "completed"} tags=[]
# Use seaborn by default
plot_autocorrelation(df, col='Births', plot_type="acf")
# + [markdown] papermill={"exception": false, "start_time": "2020-10-29T03:01:52.804062", "end_time": "2020-10-29T03:01:52.855090", "duration": 0.051028, "status": "completed"} tags=[]
# ## Plot PACF
# + papermill={"exception": false, "start_time": "2020-10-29T03:01:52.913066", "end_time": "2020-10-29T03:01:53.016101", "duration": 0.103035, "status": "completed"} tags=[]
plot_autocorrelation(df, col="Births", plot_type="pacf", n_lags=10, viz_backend="plotly")
| examples/Time_Series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data structures
# ## Nested Lists and Dictionaries
# In research programming, one of our most common tasks is building an appropriate *structure* to model our complicated
# data. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields:
UCL = {"City": "London", "Street": "Gower Street", "Postcode": "WC1E 6BT"}
James = {"City": "London", "Street": "Waterson Street", "Postcode": "E2 8HH"}
# A collection of people's addresses is then a list of dictionaries:
addresses = [UCL, James]
addresses
# A more complicated data structure, for example for a census database, might have a list of residents or employees at each address:
UCL["people"] = ["Clare", "James", "Owain"]
James["people"] = ["Sue", "James"]
addresses
# Which is then a list of dictionaries, with keys which are strings or lists.
# We can go further, e.g.:
UCL["Residential"] = False
# And we can write code against our structures:
leaders = [place["people"][0] for place in addresses]
leaders
# This was an example of a 'list comprehension', which have used to get data of this structure, and which we'll see more of in a moment...
# ## Exercise: a Maze Model.
# Work with a partner to design a data structure to represent a maze using dictionaries and lists.
# * Each place in the maze has a name, which is a string.
# * Each place in the maze has one or more people currently standing at it, by name.
# * Each place in the maze has a maximum capacity of people that can fit in it.
# * From each place in the maze, you can go from that place to a few other places, using a direction like 'up', 'north',
# or 'sideways'
# Create an example instance, in a notebook, of a simple structure for your maze:
# * The front room can hold 2 people. James is currently there. You can go outside to the garden, or upstairs to the bedroom, or north to the kitchen.
# * From the kitchen, you can go south to the front room. It fits 1 person.
# * From the garden you can go inside to front room. It fits 3 people. Sue is currently there.
# * From the bedroom, you can go downstairs to the front room. You can also jump out of the window to the garden. It fits 2 people.
# Make sure that your model:
#
# * Allows empty rooms
# * Allows you to jump out of the upstairs window, but not to fly back up.
# * Allows rooms which people can't fit in.
# ```python
# house = [ "Your answer here" ]
# ```
| module01_introduction_to_python/01_07_data_structures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="V8-yl-s-WKMG"
# # Object Detection Demo
# Welcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.
# + [markdown] colab_type="text" id="kFSqkTCdWKMI"
# # Imports
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="hV4P5gyTWKMI"
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
import time
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# + [markdown] colab_type="text" id="Wy72mWwAWKMK"
# ## Env setup
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="v7m_NY_aWKMK"
# This is needed to display the images.
# %matplotlib inline
# + [markdown] colab_type="text" id="r5FNuiRPWKMN"
# ## Object detection imports
# Here are the imports from the object detection module.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="bm0_uNRnWKMN"
from utils import label_map_util
from utils import visualization_utils as vis_util
# + [markdown] colab_type="text" id="cfn_tRFOWKMO"
# # Model preparation
# + [markdown] colab_type="text" id="X_sEBLpVWKMQ"
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="VyPz_t8WWKMQ"
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = './inference_graph/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = './object-detection.pbtxt'
# + [markdown] colab_type="text" id="YBcB9QHLWKMU"
# ## Load a (frozen) Tensorflow model into memory.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="KezjCRVvWKMV"
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# + [markdown] colab_type="text" id="_1MVVTcLWKMW"
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="hDbpHkiWWKMX"
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
print(category_index)
# + [markdown] colab_type="text" id="EFsoUHvbWKMZ"
# ## Helper code
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="aSlYc3JkWKMa"
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# + [markdown] colab_type="text" id="H0_1AGhrWKMc"
# # Detection
# +
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def annotate_video(video_path, write = False):
# Initialize webcam feed
video = cv2.VideoCapture(video_path)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(video.isOpened()):
try:
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
#frame = cv2.resize(frame, (160, 120))
frame_expanded = np.expand_dims(frame, axis=0)
#frame_expanded = cv2.cvtColor(frame_expanded, cv2.COLOR_BGR2RGB)
t1 = time.time()
try:
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4,
min_score_thresh=0.6)
#print(time.time() - t1)
except Exception as e:
print('Error during inference',e )
# All the results have been drawn on the frame, so it's time to display it.
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
if(write):
out.write(frame)
except Exception as e:
print(e)
break
# Clean up
out.release()
video.release()
cv2.destroyAllWindows()
# -
annotate_video('./video10.mp4', True)
| object_detection_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# <hr style="margin-bottom: 40px;">
#
# # The Sakila Database
#
# One of the best example databases out there is the <a href="https://dev.mysql.com/doc/sakila/en/">Sakila Database</a>, which was originally created by MySQL and has been open sourced under the terms of the BSD License.
#
# The Sakila database is a nicely normalised schema modelling a DVD rental store, featuring things like films, actors, film-actor relationships, and a central inventory table that connects films, stores, and rentals.
#
# <img width="1200px" src="https://user-images.githubusercontent.com/7065401/58504872-fa243b00-8161-11e9-85ed-4b7d8d7ce9f7.png" />
# 
#
# ## Hands on!
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlite3
# %matplotlib inline
# -
# 
#
# ## Loading our data:
# +
conn = sqlite3.connect('data/sakila.db')
df = pd.read_sql('''
SELECT
rental.rental_id, rental.rental_date, rental.return_date,
customer.last_name AS customer_lastname,
store.store_id,
city.city AS rental_store_city,
film.title AS film_title, film.rental_duration AS film_rental_duration,
film.rental_rate AS film_rental_rate, film.replacement_cost AS film_replacement_cost,
film.rating AS film_rating
FROM rental
INNER JOIN customer ON rental.customer_id == customer.customer_id
INNER JOIN inventory ON rental.inventory_id == inventory.inventory_id
INNER JOIN store ON inventory.store_id == store.store_id
INNER JOIN address ON store.address_id == address.address_id
INNER JOIN city ON address.city_id == city.city_id
INNER JOIN film ON inventory.film_id == film.film_id
;
''', conn, index_col='rental_id', parse_dates=['rental_date', 'return_date'])
# -
# 
#
# ## The data at a glance:
df.head()
df.shape
df.info()
df.describe()
# 
#
# ## Numerical analysis and visualization
#
# We'll analyze the `film_rental_rate` column:
df['film_rental_rate'].describe()
df['film_rental_rate'].mean()
df['film_rental_rate'].median()
df['film_rental_rate'].plot(kind='box', vert=False, figsize=(14,6))
df['film_rental_rate'].plot(kind='density', figsize=(14,6)) # kde
ax = df['film_rental_rate'].value_counts().plot(kind='bar', figsize=(14,6))
ax.set_ylabel('Number of Rentals')
# 
#
# ## Categorical analysis and visualization
#
# We'll analyze the `rental_store_city` column:
df['rental_store_city'].value_counts()
df['rental_store_city'].value_counts().plot(kind='pie', figsize=(6,6))
ax = df['rental_store_city'].value_counts().plot(kind='bar', figsize=(14,6))
ax.set_ylabel('Number of Rentals')
# 
#
# ## Column wrangling
#
# We can also create new columns or modify existing ones.
#
# ### Add and calculate a new `rental_rate_return` column
#
# We want to know the rental rate of return of each film. To do that we'll use this formula:
#
# $$ rental\_gain\_return = \frac{film\_rental\_rate}{film\_replacement\_cost} * 100 $$
# +
df['rental_gain_return'] = df['film_rental_rate'] / df['film_replacement_cost'] * 100
df['rental_gain_return'].head()
# -
df['rental_gain_return'].plot(kind='density', figsize=(14,6))
df['rental_gain_return'].mean().round(2)
df['rental_gain_return'].median().round(2)
ax = df['rental_gain_return'].plot(kind='density', figsize=(14,6)) # kde
ax.axvline(df['rental_gain_return'].mean(), color='red')
ax.axvline(df['rental_gain_return'].median(), color='green')
# > Each rental represents <b>13.6%</b> of film cost.
# So <b>7.35</b> rentals are needed to recover film market price (`film_replacement_cost`)
100 / 13.6
# While in average each film is rented <b>16.74</b> times.
df['film_title'].value_counts().mean()
# 
#
# ## Selection & Indexing:
# ### Get the rental records of the customer with lastname `HANSEN`
df.loc[df['customer_lastname'] == 'HANSEN']
# ### Create a list of all the films with the highest replacement cost
df['film_replacement_cost'].max()
df.loc[df['film_replacement_cost'] == df['film_replacement_cost'].max(), 'film_title'].unique()
# ### How many `PG` or `PG-13` rating films were rented?
df.loc[(df['film_rating'] == 'PG') | (df['film_rating'] == 'PG-13')].shape[0]
# 
| Lecture_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fashion MNIST Pixel by Pixel
# Re-purposes the [MNIST Digit Pixel by Pixel]() notebook for the purposes of plotting an example Fashion-MNIST image.
# [](https://colab.research.google.com/github/the-deep-learners/deep-learning-illustrated/blob/master/notebooks/fashion_mnist_pixel_by_pixel.ipynb)
# #### Load dependencies
from matplotlib import pyplot as plt
from keras.datasets import fashion_mnist
import numpy as np
# #### Load data
(X_train, y_train), (X_valid, y_valid) = fashion_mnist.load_data()
# #### Sample an image
# sample = np.random.randint(0, X_train.shape[0])
sample = 39235
# #### Plot digit
# +
plt.figure(figsize = (10,10))
mnist_img = X_train[sample]
plt.imshow(mnist_img,cmap="Greys")
ax = plt.gca()
# First turn off the major labels, but not the major ticks
plt.tick_params(
axis='both', # changes apply to the both x and y axes
which='major', # Change the major ticks only
bottom=True, # ticks along the bottom edge are on
left=True, # ticks along the top edge are on
labelbottom=False, # labels along the bottom edge are off
labelleft=False) # labels along the left edge are off
# Next turn off the minor ticks, but not the minor labels
plt.tick_params(
axis='both', # changes apply to both x and y axes
which='minor', # Change the minor ticks only
bottom=False, # ticks along the bottom edge are off
left=False, # ticks along the left edge are off
labelbottom=True, # labels along the bottom edge are on
labelleft=True) # labels along the left edge are on
# Set the major ticks, starting at 1 (the -0.5 tick gets hidden off the canvas)
ax.set_xticks(np.arange(-.5, 28, 1))
ax.set_yticks(np.arange(-.5, 28, 1))
# Set the minor ticks and labels
ax.set_xticks(np.arange(0, 28, 1), minor=True);
ax.set_xticklabels([str(i) for i in np.arange(0, 28, 1)], minor=True);
ax.set_yticks(np.arange(0, 28, 1), minor=True);
ax.set_yticklabels([str(i) for i in np.arange(0, 28, 1)], minor=True);
ax.grid(color='black', linestyle='-', linewidth=1.5)
_ = plt.colorbar(fraction=0.046, pad=0.04, ticks=[0,32,64,96,128,160,192,224,255])
# -
# #### Confirm image label
y_train[sample]
| notebooks/fashion_mnist_pixel_by_pixel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Let's dive deeper (without killing the planet) !
# (Widely inspired from https://keras.io/guides/transfer_learning/)
#
# In this tutorial, we experiment an end-to-end transfer learning application for a classification task. We have seen in the lecture that transfer learning is especially interesting when our dataset is small. We can this way benefit from the features learned on a similar problem that has a bigger dataset.
#
# Our goal is to learn a "cats vs dogs" classifier, given a very small dataset composed of 500 images of each for training and 200 for validation. (If you have not already downloaded the data, you can find them here: https://filesender.renater.fr/?s=download&token=<PASSWORD>
# Then unzip the file in ./data/)
#
# We will first train a baseline network from scratch. Then, we will use transfer learning by loading a very deep architecture, a ResNet18, that has been pretrained on the ImageNet dataset that contains millions of images. More specifically, we will use the convolutional part of the ResNet as a feature extractor, and train a single fully connected layer as classifier. The transfer learning workflow is then:
# 1. Load the pretrained version of the model
# 2. Freeze the weights of the model to keep the information they have learned on ImageNet
# 3. Replace the original last FC layer with a one (or more) adapted to our problem
# 4. Train this(ese) new layer(s) on our very small dataset
# ## Prepare the environment
# +
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split, Dataset
from torchvision.datasets.folder import pil_loader
from torchvision.models import resnet18
import pytorch_lightning as pl
import torchvision
from torchvision import transforms
from torchmetrics import Accuracy
import numpy as np
import matplotlib.pyplot as plt
# +
# Define computing device
use_cuda = True
if torch.cuda.is_available and use_cuda:
print('We have GPU !')
gpu = 1
else:
print('We will use CPU')
gpu = 0
# -
# Fix random seed for reproducibility
pl.trainer.seed_everything(0)
student_name = ?
# ## Collect and prepare the data
#
# As "Cats vs Dogs" is not a standard torchvision dataset, we have to create our own custom dataset class.
#
# The train and test images are in separated folders. Besides, the class of each image is included in its name.
for directory, folders, files in os.walk('./data/cats_dogs_light/'):
print(directory)
print(folders)
print(files[:6])
# We can therefore use the same class to load the train dataset object and the test dataset object.
#
# The custom dataset class inherits from torch.utils.data.Dataset, and has 3 mandatory methods: \_\_init\_\_, \_\_len\_\_ and \_\_getitem\_\_
#
# * In \_\_init\_\_ we need to fetch the data folder (train or test) to list the images.
#
# * \_\_len\_\_ returns the length of the dataset
#
# * \_\_getitem\_\_ returns one sample (one image and its label)
def get_image_list(path):
image_list = set([file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))])
return list(image_list)
class CatsDogsDataset(Dataset):
def __init__(self, data_path='cats_dogs_light/train', transform=None):
self.data_path = data_path
self.transform = transform
self.classes = {'cat': 0, 'dog': 1}
self.data_list = get_image_list(data_path)
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
img_name = self.data_list[idx]
image = pil_loader(os.path.join(self.data_path, img_name))
label = self.classes[img_name.split('.')[0]]
if self.transform is not None:
image = self.transform(image)
return image, label
class CatsDogsDataModule(pl.LightningDataModule):
def __init__(
self,
batch_size: int = 8,
):
super().__init__()
self.batch_size = batch_size
self.classes = ('cat', 'dog')
train_transform = transforms.Compose([
# We use data augmentation for training
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(0.1),
# We will use a model pretrained on Imagenet, we have to resize images
transforms.Resize(224),
# transforms.RandomCrop(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
# and to apply the correct normalization
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([
# For validation, we only resize images
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
# and apply the correct normalization
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.train_set = CatsDogsDataset(data_path='./data/cats_dogs_light/train', transform=train_transform)
self.test_set = CatsDogsDataset(data_path='./data/cats_dogs_light/test', transform=val_transform)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.batch_size,
num_workers=4, pin_memory=True, shuffle=True)
def val_dataloader(self):
return DataLoader(self.test_set, batch_size=self.batch_size,
num_workers=4, pin_memory=True)
# ### Let's visualize some images
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(12, 4))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# +
data_module = CatsDogsDataModule()
# Get a batch of training data
inputs, classes = next(iter(data_module.val_dataloader()))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[data_module.classes[x] for x in classes])
# -
# # Exercises
# 1. Implement and train our VGG like architecture (from tutorial 4) as a baseline. Play a bit with the learning rate. Can you improve the performance of the model ?
# 2. Load a pretrained ResNet18 model. Freeze its weights, replace the last fully connected layer to match the classification case "Cats vs Dogs".
# 3. Train the modified ResNet18. How does the performance improve compared to the baseline ? What is the impact of transfer learning on the number of trainable parameters and on the training time ?
# ## 1. Implement and train a neural network baseline from scratch
# As a baseline to evaluate the improvement brought by transfer learning, we implement and train the same VGG like architecture as in T4.
class BaselineClassifier(pl.LightningModule):
def __init__(
self,
learning_rate: float = 0.0001,
):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.model = ?
self.accuracy = Accuracy()
def forward(self, x):
# use forward for inference/predictions
x = self.model(x)
x = F.softmax(x, dim=1) # Transform network outputs into probabilities
return x
def training_step(self, batch, batch_idx):
# Function called by Lightning for each iteration
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('Loss', {'train_loss': loss}, on_step=False, on_epoch=True)
self.log('Accuracy', {'train_accuracy': acc}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('Loss', {'val_loss': loss}, loss)
self.log('Accuracy', {'val_accuracy': acc}, acc)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('test_loss', loss)
self.log('test_accuracy', acc)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]
learning_rate = 0.001
batch_size = 32
max_epochs = 55
data_module = CatsDogsDataModule(batch_size=batch_size)
net_baseline = BaselineClassifier(learning_rate=learning_rate)
trainer = pl.Trainer(default_root_dir='./lightning_logs/T5_baseline_' + student_name, gpus=gpu, max_epochs=max_epochs)
# Let Lightning train the network
trainer.fit(net_baseline, data_module)
# ### Let us check the prediction on some test images
# +
# get some random training images
n_img = 8
dataiter = iter(data_module.val_dataloader())
images, labels = dataiter.next()
with torch.no_grad():
predictions = net_baseline(images)
predicted_labels = torch.argmax(predictions, dim=1)
# show images
imshow(torchvision.utils.make_grid(images[:n_img]))
# print labels
print('Predictions')
print(' '.join('%5s' % data_module.classes[pred_lab] for pred_lab in predicted_labels[:n_img]))
print('True classes')
print(' '.join('%5s' % data_module.classes[true_lab] for true_lab in labels[:n_img]))
# -
# ## 2. Load a pretrained ResNet18
# For transfer learning, we use a ResNet like model of 18 layers called ResNet18. See https://arxiv.org/pdf/1512.03385.pdf for details.
#
# 
#
# ResNets are very deep architectures whose specificity lies in their residual blocks. A skip connection that add the input of a block to its output, allows the gradient flowing in the model despite the network depth.
#
# 
#
# ### Some hints on model exploration
# #### How to load a pretrained model
model = resnet18(pretrained=True)
model
# #### How to access layers
for name, child in model.named_children():
print(name)
for name, modules in model.named_modules():
print(name)
for name, param in model.named_parameters():
print(name, param.requires_grad)
# #### How to modify a specific layer
model.relu = torch.nn.LeakyReLU()
model
# #### How to modify gradient retaining information
model.layer1[0].conv1.weight.requires_grad
model.layer1[0].conv1.weight.requires_grad = False
model.layer1[0].conv1.weight.requires_grad
# ### ResNet18 module
# +
class ResNet18PreTrainedClassifier(pl.LightningModule):
def __init__(
self,
learning_rate: float = 0.0001,
):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.model = ?
# We first freeze the convolutional part of the network
# ?
# We replace the final fully connected layer to match our case
# Only this layer will be trained
# ?
self.accuracy = Accuracy()
def forward(self, x):
# use forward for inference/predictions
x = self.model(x)
x = F.softmax(x, dim=1) # Transform network outputs into probabilities
return x
def training_step(self, batch, batch_idx):
# Function called by Lightning for each iteration
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('Loss', {'train_loss': loss}, on_step=False, on_epoch=True)
self.log('Accuracy', {'train_accuracy': acc}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('Loss', {'val_loss': loss}, loss)
self.log('Accuracy', {'val_accuracy': acc}, acc)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y) # Includes a LogSoftmax and the NegativeLogLikelihood loss
acc = self.accuracy(F.softmax(y_hat, dim=1), y)
self.log('test_loss', loss)
self.log('test_accuracy', acc)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.learning_rate, momentum=0.9)
return [optimizer]
# -
learning_rate = 0.001
batch_size = 32
max_epochs = 30
data_module = CatsDogsDataModule(batch_size=batch_size)
net_module = ResNet18PreTrainedClassifier(learning_rate=learning_rate)
# Let's check that only the last fully connected layer will retain gradient information
for name, param in net_module.model.named_parameters():
print(name, param.requires_grad)
# +
trainer = pl.Trainer(default_root_dir='./lightning_logs/T5_resnet18_' + student_name, gpus=gpu, max_epochs=max_epochs)
# Log network graph to Tensorboard
trainer.logger.experiment.add_graph(net_module, torch.rand(batch_size, 3, 32, 32))
# -
# Let Lightning train the network
trainer.fit(net_module, data_module)
# ### Let us check the prediction on some test images
# +
# get some random training images
n_img = 8
dataiter = iter(data_module.val_dataloader())
images, labels = dataiter.next()
with torch.no_grad():
predictions = net_module(images)
predicted_labels = torch.argmax(predictions, dim=1)
# show images
imshow(torchvision.utils.make_grid(images[:n_img]))
# print labels
print('Predictions')
print(' '.join('%5s' % data_module.classes[pred_lab] for pred_lab in predicted_labels[:n_img]))
print('True classes')
print(' '.join('%5s' % data_module.classes[true_lab] for true_lab in labels[:n_img]))
# -
# ## Do you need a "solution" ?
# 
# ## Baseline
# ### Hints
# * Beware of the number of output_features of the last FC layer.
# ### Solution
# VGG like network
self.model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
torch.nn.LeakyReLU(),
torch.nn.AdaptiveAvgPool2d(output_size=(1, 1)),
torch.nn.Flatten(),
torch.nn.Linear(in_features=64, out_features=2)
)
# ## Pretrained ResNet18
# ### Hints
# * You can use model.parameters() to access the requires_grad information in a for loop
# * Beware of the number of input_features and output_features of the last FC layer
# ### Solution
self.model = resnet18(pretrained=True)
# We first freeze the convolutional part of the network
for param in self.model.parameters():
param.requires_grad = False
# We replace the final fully connected layer to match our case
# Only this layer will be trained
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(in_features=num_ftrs, out_features=2)
| deep-learning/T5_Cats_Dogs_classification_with_ResNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (stenv)
# language: python
# name: stenv
# ---
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
# %matplotlib inline
set_matplotlib_formats('svg')
from io import StringIO
import urllib.parse
import IPython.display
def show(fig, **kwargs):
imgdata = StringIO()
fig.savefig(imgdata, format='svg')
imgdata.seek(0)
return IPython.display.Image(url=rf'data:image/svg+xml,{urllib.parse.quote(imgdata.getvalue())}', **kwargs)
# +
anchors = [1, 5, 20]
values_svm = [0.5857498, 0.60177903, 0.60682894]
anchors_gaussian = [1, 5, 20]
values_svm_gaussian = [0.60091833, 0.62493347, 0.6375232]
plt.scatter(anchors, values_svm, label='Angular')
plt.plot(anchors, values_svm)
plt.scatter(anchors_gaussian, values_svm_gaussian, label='Isotropic')
plt.plot(anchors_gaussian, values_svm_gaussian)
plt.scatter([20], [0.65795456], label='Isotropic@500')
plt.plot([20], [0.65795456])
plt.scatter([1, 20], [0.58219526, 0.60711063], label='Independent')
plt.plot([1, 20], [0.58219526, 0.60711063])
plt.scatter([5], [0.60615681], label='Steer')
plt.scatter([0], [0.53033983], label='Crossentropy')
plt.scatter([0], [0.55979714], label='SupCon')
plt.axhline([0.65641794], label='SupConReal')
plt.legend(fontsize=12, ncol=2)
plt.grid(linestyle='--')
# plt.axhline([0.8595699])
plt.ylabel('mAP', fontsize=16)
plt.xlabel('Size Pool Contrastives', fontsize=16)
plt.title('Pascal VOC Detection', fontsize=18)
plt.xticks(ticks=range(0,22,2), fontsize=12)
plt.yticks(fontsize=12)
# +
supcon_real = [
0.6063655333,
0.6446154718,
0.6453137675,
0.654741794,
0.6564177909,
]
supcon_indep = [
0.547670409270084,
0.5664245743511127,
0.5760440951312261,
0.606638588524784,
0.6071106984969435,
0.576891216649673,
0.5802599499567932,
0.5979333976436632,
0.599746573470129,
0.6161295263564223,
0.6241358282110723,
0.6240934939679285
]
supcon_indep_1 = [
0.5393882328258038,
0.5555600045474895,
0.5764531491713961,
0.5808689189217227,
0.5821952530798381
]
supcon_gauss_1 = [
0.5622163706,
0.5752283247,
0.5861299932,
0.5954185358,
0.6009183763
]
supcon_gauss = [
0.5650079295,
0.5969083769,
0.619808438,
0.6311508645,
0.6375233255,
0.6211229452,
0.6285350868,
0.643727578,
0.6418866488,
0.6455860237806477,
0.6575221603,
0.6580566487,
]
supcon_gauss2 = [
0.5519023466,
0.6023912597,
0.6082569519,
0.6147282175,
0.6231331837,
0.6186075153,
0.6331750744,
0.6357582287,
0.6413984066,
0.6537223016,
0.6520260373,
0.6618719564,
]
supcon_real2 = [
0.6190221168,
0.6306371258,
0.6342437437,
0.647960946,
0.650389455,
0.64823239,
0.6549618563,
0.6615362758,
0.6559774529,
0.6654496766
]
nums = [507*x for x in list(range(40, 500, 40))]
p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
plt.plot(nums[:len(supcon_gauss2)], supcon_gauss2, label='Isotropic-20sample@500')
p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
plt.plot(nums[:len(supcon_real2)], supcon_real2, label='Real@500')
gauss_online = [
0.5272706046,
0.5653562989,
0.5827447695,
0.60981094,
0.61517051
]
plt.plot([507*x for x in range(20,101,20)], gauss_online, label='Online@100')
plt.grid(linestyle='--')
plt.axvline([200*509], color='black', linestyle="--")
plt.ylabel('mAP', fontsize=16)
plt.xlabel('Grad updates', fontsize=16)
plt.title("Classification PascalVOC2007", fontsize=16)
plt.legend(ncol=3)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xscale('log')
plt.savefig('result.pdf')
# +
nums = list(range(40, 201, 40))
supcon_real = [
0.6063655333,
0.6446154718,
0.6453137675,
0.654741794,
0.6564177909,
][:len(nums)]
supcon_indep = [
0.547670409270084,
0.5664245743511127,
0.5760440951312261,
0.606638588524784,
0.6071106984969435,
0.576891216649673,
0.5802599499567932,
0.5979333976436632,
0.599746573470129,
0.6161295263564223,
0.6241358282110723,
0.6240934939679285
][:len(nums)]
supcon_indep_1 = [
0.5393882328258038,
0.5555600045474895,
0.5764531491713961,
0.5808689189217227,
0.5821952530798381
][:len(nums)]
supcon_gauss_1 = [
0.5622163706,
0.5752283247,
0.5861299932,
0.5954185358,
0.6009183763
][:len(nums)]
supcon_gauss = [
0.5650079295,
0.5969083769,
0.619808438,
0.6311508645,
0.6375233255,
0.6211229452,
0.6285350868,
0.643727578,
0.6418866488,
0.6455860237806477,
0.6575221603,
0.6580566487,
][:len(nums)]
p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
plt.grid(linestyle='--')
# plt.axvline([200], color='black', linestyle="--")
plt.ylabel('mAP', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.title("Classification PascalVOC2007", fontsize=16)
plt.legend()
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# plt.savefig('result.pdf')
# -
# +
supcon_real = [
0.6190221168,
0.6306371258,
0.6342437437,
0.647960946,
0.650389455,
0.64823239,
0.6549618563,
0.6615362758,
0.6559774529,
0.6654496766
]
supcon_indep = [
0.5498698695,
0.5546304763,
0.5624828976,
0.5804974275,
0.5823518456,
0.588479107,
0.5952730528,
0.5946586853,
0.6005861639,
0.6142297902,
0.618940419,
0.6218340221
]
supcon_indep_1 = [
0.5393882328258038,
0.5555600045474895,
0.5764531491713961,
0.5808689189217227,
0.5821952530798381
]
supcon_gauss_1 = [
0.5622163706,
0.5752283247,
0.5861299932,
0.5954185358,
0.6009183763
]
supcon_gauss = [
0.5519023466,
0.6023912597,
0.6082569519,
0.6147282175,
0.6231331837,
0.6186075153,
0.6331750744,
0.6357582287,
0.6413984066,
0.6537223016,
0.6520260373,
0.6618719564
]
nums = list(range(40, 500, 40))
p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
plt.grid(linestyle='--')
# plt.axvline([200], color='black', linestyle="--")
plt.ylabel('mAP', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.title("Classification PascalVOC2007", fontsize=16)
plt.legend()
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# plt.savefig('result.pdf')
# +
anchors = [1, 5, 20]
values_svm = [67.94, 70.3, 70.94]
anchors_gaussian = [20]
values_svm_gaussian = [74.46]
plt.scatter(anchors, values_svm, label='Angular')
plt.plot(anchors, values_svm, linestyle='-')
plt.scatter(anchors_gaussian, values_svm_gaussian, label='Isotropic')
plt.plot([1, 20], [68.08, 70.34])
plt.scatter([20], [76.52], label='Isotropic@500')
plt.plot([20], [76.52])
plt.scatter([1, 20], [68.08, 70.34], label='Independent')
plt.plot([1, 20], [68.08, 70.34])
plt.scatter([0], [61.92], label='CrossEntropy')
plt.scatter([0], [65.48], label='SupCon')
plt.legend(fontsize=12, ncol=2)
plt.grid(linestyle='--')
plt.title('Imagenet100 Linear', fontsize=18)
plt.ylabel('Accuracy Linear', fontsize=16)
plt.xlabel('Size Pool Contrastives', fontsize=16)
plt.xticks(ticks=range(0,22,2), fontsize=12)
plt.yticks(fontsize=12)
# -
# +
gauss_online = [
0.5272706046,
0.5653562989,
0.5827447695,
0.60981094,
0.61517051
]
supcon_real = [0.6559803189]
# p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
# plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
p = plt.plot([x*130000 for x in range(20,101,20)],gauss_online, label='Isotropic-Online')
plt.scatter([x*130000 for x in range(20,101,20)],gauss_online)
# plt.plot([x*130000 for x in range(20,101,20)], gauss_online, label='Isotropic-Online', color=p[-1].get_color(), linestyle='-.')
# plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
plt.plot([130000], supcon_real, label='Real')
plt.scatter([130000], supcon_real)
plt.grid(linestyle='--')
# plt.axvline([200], color='black', linestyle="--")
plt.ylabel('mAP', fontsize=16)
plt.xlabel('#Samples', fontsize=16)
plt.title("Classification PascalVOC2007", fontsize=16)
plt.legend()
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xscale('log')
# plt.savefig('result.pdf')
# +
supcon_real = [
0.6190221168,
0.6306371258,
0.6342437437,
0.647960946,
0.650389455,
0.64823239,
0.6549618563,
0.6615362758,
0.6559774529,
0.6654496766
]
supcon_indep = [
0.5498698695,
0.5546304763,
0.5624828976,
0.5804974275,
0.5823518456,
0.588479107,
0.5952730528,
0.5946586853,
0.6005861639,
0.6142297902,
0.618940419,
0.6218340221
]
supcon_indep_1 = [
0.5393882328258038,
0.5555600045474895,
0.5764531491713961,
0.5808689189217227,
0.5821952530798381
]
supcon_gauss_1 = [
0.5622163706,
0.5752283247,
0.5861299932,
0.5954185358,
0.6009183763
]
supcon_gauss = [
0.5519023466,
0.6023912597,
0.6082569519,
0.6147282175,
0.6231331837,
0.6186075153,
0.6331750744,
0.6357582287,
0.6413984066,
0.6537223016,
0.6520260373,
0.6618719564
]
iso_std = [0.05, 0.1, 0.2, 0.3, 0.4]
iso_acc = [60.22, 63.18, 65.46, 65.42, 64.42]
p = plt.plot(iso_std, iso_acc)
plt.scatter(iso_std, iso_acc)
# p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
# plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
# p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
# plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
# plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
# plt.grid(linestyle='--')
# plt.axvline([200], color='black', linestyle="--")
# p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
# plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
# p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
# plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
# plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
plt.grid(linestyle='--')
# # plt.axvline([200], color='black', linestyle="--")
plt.ylabel('Top-1 Accuracy', fontsize=16)
plt.xlabel('Isotropic gaussian standard deviation', fontsize=16)
plt.title("Classification ImageNet100", fontsize=16)
# plt.legend()
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# plt.savefig('result.pdf')
# +
iso_std = [0, 5, 25, 50, 100]
iso_acc = [67.36, 67.52, 66.12, 64.68, 58.6]
p = plt.plot(iso_std, iso_acc)
plt.scatter(iso_std, iso_acc)
# p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
# plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
# p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
# plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
# plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
# plt.grid(linestyle='--')
# plt.axvline([200], color='black', linestyle="--")
# p = plt.plot(nums[:len(supcon_gauss)], supcon_gauss, label='Isotropic-20sample')
# plt.plot(nums[:len(supcon_gauss_1)], supcon_gauss_1, label='Isotropic-1sample', color=p[-1].get_color(), linestyle='-.')
# p = plt.plot(nums[:len(supcon_indep)], supcon_indep, label='Independent-20sample')
# plt.plot(nums[:len(supcon_indep_1)], supcon_indep_1, label='Independent-1sample', color=p[-1].get_color(), linestyle='-.')
# plt.plot(nums[:len(supcon_real)], supcon_real, label='Real')
plt.grid(linestyle='--')
# # plt.axvline([200], color='black', linestyle="--")
plt.ylabel('Top-1 Accuracy', fontsize=16)
plt.xlabel('Percentage of fake data', fontsize=16)
plt.title("Classification ImageNet100", fontsize=16)
# plt.legend()
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# plt.savefig('result.pdf')
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# -
from matplotlib.ticker import FormatStrFormatter
| GenRep/utils/plot_std_vs_acc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fenicsproject
# language: python
# name: fenicsproject
# ---
# +
from dolfin import *
import numpy as np
parameters["form_compiler"]["representation"] = 'quadrature'
import warnings
from ffc.quadrature.deprecation import QuadratureRepresentationDeprecationWarning
warnings.simplefilter("once", QuadratureRepresentationDeprecationWarning)
import matplotlib.pyplot as plt
# Form compiler options
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["optimize"] = True
N_refines = np.array([10,20,40,80,160,320])
dts = np.zeros((N_refines.size,1))
errors_result = np.zeros((N_refines.size,2))
i_ref = 0
for N_refine in N_refines:
# Define mesh
N_mesh = N_refine
mesh = UnitIntervalMesh(N_mesh)
# =========================================================
# DEFINE BOUNDARY ELEMENTS
tol = 1E-12
# Sub domain for clamp at left end
def left(x, on_boundary):
return near(x[0], 0.,tol) and on_boundary
# Sub domain for rotation at right end
def right(x, on_boundary):
return near(x[0], 1.,tol) and on_boundary
def left_and_right(x, on_boundary):
return on_boundary
# Create mesh function over the cell facets
boundary_subdomains = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
boundary_subdomains.set_all(0)
force_boundary = AutoSubDomain(right)
force_boundary.mark(boundary_subdomains, 3)
# Define measure for boundary condition integral
dss = ds(subdomain_data=boundary_subdomains)
# =========================================================
# =========================================================
# DEFINE MAT. PROPERTIES
E = Constant(70e3)
nu = Constant(0.3)
lmbda = E*nu/(1+nu)/(1-2*nu)
mu = E/2./(1+nu)
sig0 = Constant((1e0)) # yield strength
Et = E/100. # tangent modulus
H = E*Et/(E-Et) # hardening modulus
rho = Constant(10e4)
Nitermax, tol = 200, 1e-8 # parameters of the Newton-Raphson procedure
Nincr = N_refine*2
# Nincr = 100
load_steps = np.linspace(0, 1, Nincr+1)[1:]
dt = Constant(load_steps[1]-load_steps[0])
dt_ = float(dt)
# Newmark-beta method
betaN = Constant(0.25)
gammaN = Constant(0.5)
# =========================================================
# =========================================================
# DEFINE ELEMENT FUNCTION SPACE AND VARIABLES
deg_u = 1
deg_stress = 0
V = FunctionSpace(mesh, "CG", deg_u)
We = VectorElement("Quadrature", mesh.ufl_cell(), degree=deg_stress, dim=1, quad_scheme='default')
W = FunctionSpace(mesh, We)
W0e = FiniteElement("Quadrature", mesh.ufl_cell(), degree=deg_stress, quad_scheme='default')
W0 = FunctionSpace(mesh, W0e)
# Define function space for stresses
Vsig = VectorFunctionSpace(mesh, "DG", 0)
sig = Function(W)
sig_old = Function(W)
beta = Function(W0)
beta1 = Function(V)
gamma0 = Function(W0)
gamma0.interpolate(Constant(1e-6))
u = Function(V, name="Total displacement")
u_pre = Function(V)
u_old = Function(V)
u_d_old = Function(V)
u_dd_old = Function(V)
du = Function(V, name="Iteration correction")
Du = Function(V, name="Current increment")
v = TrialFunction(V)
u_ = TestFunction(V)
u_theo = Function(V)
sig_out = Function(Vsig, name="sigma")
eps_out = Function(Vsig, name="epsilon")
metadata = {"quadrature_degree": deg_stress, "quadrature_scheme": "default"}
dxm = dx(metadata=metadata)
# =========================================================
# =========================================================
# DEFINE NEUMANN BCs
bc_expression = Expression(("t*t*x[0]*(1-x[0]*x[0]*t/L)/L"), L=1, t=0, degree=2)
bc = DirichletBC(V, bc_expression, left_and_right)
zero = Constant(0.0)
bc_newton = DirichletBC(V, zero, left_and_right)
n = FacetNormal(mesh)
q_lim = float(2/sqrt(3)*sig0*mu*2)
loading = Expression(("t <= tc ? q*t*(1-t)*4 : 0"), q=0.2*q_lim, t=0, tc=0.7, degree=2)
source = Expression(\
("(2*(pow(L,5)*eps0*eps0*rho+3*t*eps0*eps0*(G0*t*t-rho*x[0]*x[0])*pow(L,4)+9*G0*L*L*pow(t,7)-54*G0*L*pow(t,8)*x[0]*x[0]+81*G0*pow(t,9)*pow(x[0],4)))*x[0]/(pow(eps0,2)*pow(L,6))")\
,G0=float(mu), eps0=float(sig0), rho=float(rho), t=0, L=1, degree=2)
def F_ext(v):
return dot(loading,v)*dss(3)
# =========================================================
# =========================================================
# DEFINE FUNCTIONS FOR FUNCTIONAL CONSTRUCTION
def eps(v):
e = 0.5*grad(v)
return e
def sigma(eps_el):
return 2*mu*eps_el
def sigma_out(r):
return 2.0*mu*sym(grad(r))
def sigma_tang(e):
return 2*mu*beta*e
# =========================================================
# =========================================================
# DEFINE LHS MATRIX AND RHS RESIDUAL
a_Newton = rho*inner(u_,v)/(betaN*dt*dt)*dxm + inner(2*eps(u_), sigma_tang(eps(v)) )*dxm
res = (-rho)*inner(u_,u)/(betaN*dt*dt)*dxm + \
-inner(2*eps(u_), sig)*dxm + \
F_ext(u_) + \
inner(u_,source)*dxm + \
rho*inner(u_old+dt*u_d_old,u_)*1./(betaN*dt*dt)*dxm + \
(1.-2*betaN)/(2*betaN)*rho*inner(u_dd_old,u_)*dxm
# =========================================================
# =========================================================
# DEFINE FUNCTIONS FOR FUNCTION PROJECTION
def proj_sig(old_u,old_sig,old_Du):
eps_old = eps(old_u)
beta = 1+3*(2*eps_old[0]/sig0)*(2*eps_old[0]/sig0)
return 2*mu*beta*eps(old_Du)+old_sig, beta
def local_project(v, V, u=None):
dv = TrialFunction(V)
v_ = TestFunction(V)
a_proj = inner(dv, v_)*dxm
b_proj = inner(v, v_)*dxm
solver = LocalSolver(a_proj, b_proj)
solver.factorize()
if u is None:
u = Function(V)
solver.solve_local_rhs(u)
return u
else:
solver.solve_local_rhs(u)
return
# =========================================================
# =========================================================
# DEFINE FUNCTIONS FOR U, V, A UPDATE
# Update formula for acceleration
def update_a(u, u_old, v_old, a_old, ufl=True):
if ufl:
dt_ = dt
betaN_ = betaN
else:
dt_ = float(dt)
betaN_ = float(betaN)
return (u-u_old-dt_*v_old)/betaN_/dt_**2 - (1-2*betaN_)/2/betaN_*a_old
# Update formula for velocity
def update_v(a, u_old, v_old, a_old, ufl=True):
if ufl:
dt_ = dt
gamma_ = gammaN
else:
dt_ = float(dt)
gamma_ = float(gammaN)
return v_old + dt_*((1-gamma_)*a_old + gamma_*a)
def update_fields(u, u_old, v_old, a_old):
"""Update fields at the end of each time step."""
# Get vectors (references)
u_vec, u0_vec = u.vector(), u_old.vector()
v0_vec, a0_vec = v_old.vector(), a_old.vector()
# use update functions using vector arguments
a_vec = update_a(u_vec, u0_vec, v0_vec, a0_vec, ufl=False)
v_vec = update_v(a_vec, u0_vec, v0_vec, a0_vec, ufl=False)
# Update (u_old <- u)
v_old.vector()[:], a_old.vector()[:] = v_vec, a_vec
u_old.vector()[:] = u.vector()
# =========================================================
# =========================================================
# FILE OUTPUT
file_results = XDMFFile("nonlinearElas1DVeri__results.xdmf")
file_results.parameters["flush_output"] = True
file_results.parameters["functions_share_mesh"] = True
# =========================================================
# =========================================================
# ITERATIVE SOLVER
results = np.zeros((Nincr+1, 5))
results_1d = np.zeros((Nincr+1, N_mesh))
# Initial conditions
u_old.interpolate( Expression(("0"), degree=2) )
u_d_old.interpolate( Expression(("0"), degree=2) )
# u.assign(u_old)
for (i, t) in enumerate(load_steps[0:]):
loading.t = t
bc_expression.t = t
source.t = t
u_pre.assign(u)
# Prepare for U_n+1^0 from U_n
bc.apply(u.vector())
# -- Compute components in F(U_n+1^0)
Du.assign(u-u_pre)
sig_, beta_ = proj_sig(u_pre,sig_old,Du)
local_project(sig_, W, sig)
local_project(beta_, W0, beta)
A, Res = assemble_system(a_Newton, res, bc_newton)
nRes0 = Res.norm("l2")
nRes = nRes0
# print("Increment:", str(i+1)," Initial Residual:", nRes0)
niter = 0
while (nRes/nRes0 > tol and nRes0>1e-10) and niter < Nitermax:
solve(A, du.vector(), Res, "mumps")
Du.assign(Du+du)
u.assign(u+du) # u^{k+1}_{n+1}
sig_, beta_ = proj_sig(u_pre,sig_old,Du)
local_project(sig_, W, sig)
local_project(beta_, W0, beta)
A, Res = assemble_system(a_Newton, res, bc_newton)
nRes = Res.norm("l2")
# print(" Residual:", nRes," ",niter)
niter += 1
u.assign(u_pre+Du)
update_fields(u, u_old, u_d_old, u_dd_old)
sig_old.assign(sig)
# ----------------
# Post-processing
# ----------------
local_project(sig, Vsig, sig_out)
local_project(eps(u), Vsig, eps_out)
file_results.write(u, t)
for i_x in range(N_mesh):
results_1d[i+1,i_x] = u(i_x/(N_mesh-1))
tc = 0.7;
if t<tc:
# results[i+1, :] = (u(1),t,4*t*(1-t))
results[i+1, :] = (u(1), sig_out(1),t,4*t*(1-t),eps_out(1))
else:
results[i+1, :] = (u(1), sig_out(1),t,0,eps_out(1))
print("---------------------------------")
sig_expression = Expression(("G0*t*t*(-3*t*x[0]*x[0]+L)*(9*pow(t,6)*pow(x[0],4)-6*L*pow(t,5)*x[0]*x[0]+pow(L,4)*eps0*eps0+L*L*pow(t,4))/(eps0*eps0*pow(L,6))",)\
,G0=float(mu), eps0=float(sig0), rho=float(rho), L=1, t=t, degree=2)
sig_theo = Function(Vsig)
sig_theo.interpolate(sig_expression)
u_theo.interpolate(bc_expression)
error = (u_theo - u)**2*dxm
Errors_theo = sqrt(assemble(error))
errors_result[i_ref,0] = Errors_theo
error = (sig_theo - sig)**2*dxm
Errors_theo1 = sqrt(assemble(error))
errors_result[i_ref,1] = Errors_theo1
dts[i_ref] = 1/Nincr
i_ref += 1
print("Solution misfit:", Errors_theo,Errors_theo1," dx:",1/N_mesh,t)
# -
N_pt = 500;
stress_theo = 2*float(mu)*results[:N_pt, 4]*(1+ (2*results[:N_pt, 4]/float(sig0))**2 )
stress_lin = 2*float(mu)*results[:N_pt, 4]
# +
import matplotlib.pyplot as plt
plt.subplot(1,4,1)
plt.plot(results[:, 0], results[:, 1], "-*")
plt.plot(results[:, 0], results[:, 3]*0.2*q_lim, ".")
plt.xlabel("Displacement")
plt.ylabel("$\sigma_{xx,yy}$ at the right end")
plt.legend(["$\sigma_{yy}$","$\sigma_{xx}$","Loads"])
plt.subplot(1,4,2)
plt.plot(results[:, 2], results[:, 1], "-.")
plt.plot(results[:, 2], results[:, 3]*0.2*q_lim, ".")
plt.xlabel("Loading steps")
plt.ylabel("$\sigma_{xx}$ at the right end")
plt.legend(["$\sigma_{yy}$","Loads"])
plt.subplot(1,4,3)
plt.plot(results[:, 2], results[:, 0], "-.")
plt.xlabel("Time")
plt.ylabel("Displacement at the right end")
plt.subplot(1,4,4)
plt.plot(results[:N_pt, 4], results[:N_pt, 1], ".-")
plt.plot(results[:N_pt, 4], stress_theo, "-")
plt.plot(results[:N_pt, 4], stress_lin, "-")
plt.xlabel("Strain and the right end")
plt.ylabel("Stress at the right end")
plt.show()
# -
x_grid = np.linspace(0,1,N_mesh)
u_theo = x_grid/1*(1-x_grid**2/1)
# +
fig, ax = plt.subplots()
line1, = ax.plot([])
line2, = ax.plot([])
ax.set_xlim(0,1)
ax.set_ylim(-1e0,1e0)
ax.set_xlabel('x')
ax.set_ylabel('Displacement')
def animate(frame_num):
# ax.clear()
line1.set_data((np.linspace(0,1,N_mesh), results_1d[frame_num, :]))
line2.set_data(x_grid,u_theo)
return line1, line2
ani = FuncAnimation(fig, animate, frames=Nincr, interval=10)
plt.show()
| WaveProp1D-Veri3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Pod security policy tutorial
#
# Before you start this tutorial you have to make sure that you start minikube with the pod security policy admission controller and the pod-security-policy addon enabled (compare https://minikube.sigs.k8s.io/docs/tutorials/using_psp/). For this tutorial you can do so using the start script bin/start_psp.
#
# You can find more information about the pod security policies at https://kubernetes.io/docs/concepts/policy/pod-security-policy/.
#
# * First create a separate namespace that we can use for this part of the tutorial
kubectl create namespace psp
# * As done a couple of times before, let's try to deploy an nginx server to our cluster. Therefore we first define a deployment manifest and apply it to the cluster.
cat <<EOT> 03_privileged_nginx_deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
EOT
kubectl apply -f 03_privileged_nginx_deployment.yml -n=psp
# * Let's check if the nginx server is running!
kubectl get all -n=psp
# * Apparently there is something wrong with out deployment. The Pod shows a CreateContainerConfigError. Let's check what is going on with the pod.
kubectl describe pod nginx-deployment-877f48f6d-FILL+IN+YOUR+ID -n=psp
# * The error message states "Error: container has runAsNonRoot and image will run as root" - apparantely this pod does not comply with the pod security policies that are in place on this cluster. Let's check which security policies have been configured automatically:
kubectl get psp -A
# * Check the detailed definition of all pod security policies
kubectl describe psp -A
# * There are two policy definitions in place, one very restrictive policy definition "restricted" and one pretty permissive policy "privileged". The permissive policy should actually comply with our pod as it allows "RunAsAny" as User Strategy. Let's check if we are allowed to use these policies:
kubectl auth can-i --as system:serviceaccount:default:default use podsecuritypolicy/restricted
kubectl auth can-i --as system:serviceaccount:default:default use podsecuritypolicy/priviledged
# * Apparently we are allowed to make use of the restricted policy (observe the "yes" after the warning) but we are not allowed to make use of the privileged policy (observe the "no" after the warning). Access to these policies is granted via cluster roles and cluster role bindings. Check the existing configuration for the restricted policy to find out what kind of adjustments are necessary to use the privileged policy.
kubectl get clusterrole
kubectl get clusterrolebinding
kubectl describe clusterrolebinding default:restricted
# * Use the following stub to deploy the missing cluster role binding
cat <<EOT> 03_privileged_policy_cluster_role_binding.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ???
labels:
addonmanager.kubernetes.io/mode: EnsureExists
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ???
name: ???
subjects:
- kind: Group
name: ???
apiGroup: rbac.authorization.k8s.io
EOT
kubectl apply -f 03_privileged_policy_cluster_role_binding.yml
# * For the changes to take effect you will have to restart the deployment
kubectl delete -f 03_privileged_nginx_deployment.yml -n=psp
kubectl apply -f 03_privileged_nginx_deployment.yml -n=psp
kubectl get all -n=psp
# * Now you can expose your deployment as done in previous tutorials
kubectl expose deployment nginx-deployment -n=psp --type=NodePort --port=80
NODEPORT=$(kubectl get service nginx-deployment -n=psp -o jsonpath='{.spec.ports[0].nodePort}')
echo http://$(hostname -i):$NODEPORT
# * clean up
kubectl delete deployment nginx-deployment -n=psp
kubectl delete service nginx-deployment -n=psp
# * An alterative solution would be to simply use an nginx configuration that does not need root privileges. If you still have time you can delete the cluster role binding you just created to use the privileged policy and try with the manifest below. To expose this deployment you will also need an adjustment of your service. Check https://hub.docker.com/r/nginxinc/nginx-unprivileged to find out what to do.
cat <<EOT> 03_unprivileged_nginx_deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginxinc/nginx-unprivileged
EOT
| share/notebooks/03_PodSecurityPolicy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import tensorflow as tf
from tensorflow import keras
import matplotlib.image as mpimg
from matplotlib.pyplot import imshow
from pprint import pprint
checkpoint_path = "training_checkpoints/checkpoint.ckpt"
def convolute(images, kernel):
kernel = tf.constant(kernel, dtype=tf.float32)
kernel = tf.expand_dims(kernel, 2)
kernel = tf.expand_dims(kernel, 3)
#images = tf.expand_dims(images, 0)
processed = tf.nn.convolution(images, kernel, padding="VALID")
processed = tf.clip_by_value(processed, 0, 255)
return processed
IDENTITY_KERNEL = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
EDGE_KERNEL = [[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]
BLUR_KERNEL = [[1, 2, 1], [2, 4, 2], [1, 2, 1]]
HV_EDGE_KERNEL = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
DIAGONAL_EDGE_KERNEL = [[1, 0, -1], [0, 0, 0], [-1, 0, 1]]
def apply_filters(images):
edged = convolute(images, EDGE_KERNEL)
blured = convolute(images, BLUR_KERNEL)
#hv_edged = convolute(images, HV_EDGE_KERNEL)
#diagonal_edged = convolute(images, DIAGONAL_EDGE_KERNEL)
#cropped_images = convolute(images, IDENTITY_KERNEL)
"""
return tf.concat([cropped_images, edged, blured, hv_edged, diagonal_edged],
3,
name='concat')
"""
return tf.concat([edged, blured], 3)
#image = load_test_image()
"""
def create_model():
images = keras.Input(shape=(28, 28, 1), batch_size=1000)
#filters = keras.layers.Lambda(apply_filters)(images)
#flatten = keras.layers.Flatten(input_shape=(28, 28))(images)
relu = keras.layers.Dense(128,
#input_shape=(26, 26, 5),
activation=tf.nn.relu,
name='relu')(images)
softmax = keras.layers.Dense(10,
#input_shape=(3380,),
activation=tf.nn.softmax,
name='softmax')(relu)
return keras.Model(inputs=images, outputs=softmax)
"""
model = keras.Sequential([
keras.layers.Lambda(apply_filters),
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax),
])
# Train
# ... Load training set
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
with tf.Session():
train_images = tf.expand_dims(train_images, 3).eval()
test_images = tf.expand_dims(test_images, 3).eval()
# ... Make save weights
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1,
period=1)
# ... Compile model
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# ... Fit model
model.fit(train_images, train_labels, epochs=15, steps_per_epoch=10, callbacks=[checkpoint_callback])
# ... Test model
test_loss, test_acc = model.evaluate(test_images, test_labels)
print("loss: {}, acc: {}".format(test_loss, test_acc))
# +
import sys
import tensorflow as tf
from tensorflow import keras
import matplotlib.image as mpimg
from matplotlib.pyplot import imshow
from pprint import pprint
checkpoint_path = "training_checkpoints/checkpoint.ckpt"
def load_test_image():
image_str = tf.read_file("../predict-data/seetha-white.jpeg")
image = tf.image.decode_jpeg(image_str)
image = tf.image.resize_images(image, [28, 28])
image = tf.image.rgb_to_grayscale(image)
image = 1 - image
return image
def convolute(images, kernel):
kernel = tf.constant(kernel, dtype=tf.float32)
kernel = tf.expand_dims(kernel, 2)
kernel = tf.expand_dims(kernel, 3)
#images = tf.expand_dims(images, 0)
processed = tf.nn.convolution(images, kernel, padding="VALID")
processed = tf.clip_by_value(processed, 0, 255)
return processed
IDENTITY_KERNEL = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
EDGE_KERNEL = [[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]
BLUR_KERNEL = [[1, 2, 1], [2, 4, 2], [1, 2, 1]]
HV_EDGE_KERNEL = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
DIAGONAL_EDGE_KERNEL = [[1, 0, -1], [0, 0, 0], [-1, 0, 1]]
image = load_test_image()
edged = convolute(image, EDGE_KERNEL)
with tf.Session():
imshow(edged.eval())
sys.exit(1)
def apply_filters(images):
edged = convolute(images, EDGE_KERNEL)
with tf.Session():
imshow(edged.eval())
blured = convolute(images, BLUR_KERNEL)
#hv_edged = convolute(images, HV_EDGE_KERNEL)
#diagonal_edged = convolute(images, DIAGONAL_EDGE_KERNEL)
#cropped_images = convolute(images, IDENTITY_KERNEL)
"""
return tf.concat([cropped_images, edged, blured, hv_edged, diagonal_edged],
3,
name='concat')
"""
return tf.concat([edged, blured], 3)
#image = load_test_image()
model = keras.Sequential([
keras.layers.Lambda(apply_filters),
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax),
])
# Load model
model.load_weights(checkpoint_path)
# Test with real image
image = load_test_image()
with tf.Session():
image = image.eval()
predictions = model.predict(image, steps=1)
print(predictions)
# -
| notebooks/import-image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib as plt
import pandas as pd
import numpy as np
import sklearn
import sklearn.feature_extraction.text as sklearnText
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# -
train_set = pd.read_csv("Data/similarity.csv")
# remove nans
train_set = train_set[train_set['similarity'].isnull() == False]
train_set
train, test = train_test_split(train_set, test_size=0.2)
print('N training data:', len(train))
print('N test data:',len(test))
clf = RandomForestClassifier(n_jobs=1, random_state=0)
simArray = np.array(train['similarity']).reshape((-1, 1))
simArray = [[(np.float32(x[0]))] for x in simArray]
#simArray
len(simArray), len(train['is_duplicate_y'])
clf.fit(simArray, train['is_duplicate_y'])
simY = np.array(train['similarity']).reshape((-1, 1))
simY = [[(np.float32(x[0]))] for x in simY]
#simY
prediction = clf.predict(simY)
prediction
clf.predict_proba(simY)
np.array(test['is_duplicate_y'])
# +
acc = sklearn.metrics.accuracy_score(prediction,
np.array(np.array(train['is_duplicate_y'])))
print (acc)
| Random_Forest_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab_type="code" id="lCdA5A_4AawH" outputId="956e7f9d-40ca-4916-aea4-d997f6b52294" executionInfo={"status": "ok", "timestamp": 1586627763437, "user_tz": 240, "elapsed": 19033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}} colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + id="ho3k_anDWL09" colab_type="code" colab={}
GOOGLE_COLAB = True
# + colab_type="code" id="tBkHQA5CAYP2" colab={}
# %reload_ext autoreload
# %autoreload 2
# + colab_type="code" id="GmZLGhnTAYP7" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="39ae2eb3-0a9d-4575-a6ae-92d2f6cbc3bb" executionInfo={"status": "ok", "timestamp": 1586627763440, "user_tz": 240, "elapsed": 2398, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import pickle
# + colab_type="code" id="vfTX4hByAYQE" colab={}
import sys
if GOOGLE_COLAB:
sys.path.append('drive/My Drive/yelp_sentiment_analysis')
else:
sys.path.append('../')
from yelpsent import data
from yelpsent import features
from yelpsent import metrics
from yelpsent import visualization
from yelpsent import models
# + id="rMagodlQ0CPW" colab_type="code" colab={}
import importlib
def reload():
importlib.reload(data)
importlib.reload(features)
importlib.reload(metrics)
importlib.reload(visualization)
importlib.reload(models)
# + [markdown] colab_type="text" id="5Eeg1CLlAYQH"
# # Load Dataset
# + colab_type="code" id="hGnUCDXLAYQI" colab={}
if GOOGLE_COLAB:
data_train, data_test = data.load_dataset("drive/My Drive/yelp_sentiment_analysis/data/yelp_train_balanced.json",
"drive/My Drive/yelp_sentiment_analysis/data/yelp_test.json")
else:
data_train, data_test = data.load_dataset("../data/yelp_train.json",
"../data/yelp_test.json")
# + colab_type="code" id="qE4liC-yAYQX" colab={}
X_train = data_train['review'].tolist()
y_train = data_train['sentiment'].tolist()
# + colab_type="code" id="K-pw_ebWAYQZ" colab={}
X_test = data_test['review'].tolist()
y_test = data_test['sentiment'].tolist()
# + [markdown] id="RhQEWsaHbrZR" colab_type="text"
# # Load DTMs
# + id="hV7HErdf_D_9" colab_type="code" colab={}
with open('drive/My Drive/yelp_sentiment_analysis/pickles/vectorizer.pickle', 'rb') as f:
vectorizer = pickle.load(f)
with open('drive/My Drive/yelp_sentiment_analysis/pickles/X_train_dtm.pickle', 'rb') as f:
X_train_dtm = pickle.load(f)
with open('drive/My Drive/yelp_sentiment_analysis/pickles/X_test_dtm.pickle', 'rb') as f:
X_test_dtm = pickle.load(f)
# + [markdown] id="LBhtNConM71T" colab_type="text"
# # Logistic Regression
# + id="1_ZcvBu0FrEI" colab_type="code" colab={}
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
# + id="4M2noKryF2_l" colab_type="code" colab={}
# # Grid-search for c
# params = dict(C = np.logspace(-3, 0, 4),
# penalty = ['l1', 'l2'])
# gscv = GridSearchCV(LogisticRegression(max_iter=1000,
# random_state=647),
# params,
# scoring='f1_macro',
# cv=3,
# verbose=1,
# n_jobs=-1)
# + id="-TuQMxYXGCdj" colab_type="code" colab={}
# gscv.fit(X_train_dtm, y_train)
# + id="kxcpR-ne94cH" colab_type="code" colab={}
# print(gscv.best_params_)
# + id="0dPZE_x4PTwu" colab_type="code" colab={}
# # Final model
# model = LogisticRegression(C=0.01,
# penalty='l2',
# max_iter=1000,
# random_state=647,
# n_jobs=-1)
# + id="QAeZWxJ1PV7v" colab_type="code" colab={}
# # %time model.fit(X_train_dtm, y_train)
# + id="ZAllB62qKCSI" colab_type="code" colab={}
# with open('drive/My Drive/yelp_sentiment_analysis/models/logistic_regression.pickle', 'wb') as f:
# pickle.dump(model, f)
with open('drive/My Drive/yelp_sentiment_analysis/models/logistic_regression.pickle', 'rb') as f:
model = pickle.load(f)
# + id="wYJJPNlSJire" colab_type="code" outputId="221d2e98-c1fd-4868-f608-7959684bc3c8" executionInfo={"status": "ok", "timestamp": 1586627792504, "user_tz": 240, "elapsed": 3083, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}} colab={"base_uri": "https://localhost:8080/", "height": 314}
y_train_pred, y_test_pred, f1_train, f1_test =\
models.evaluate_pipeline(X_train = X_train_dtm,
y_train = y_train,
X_test = X_test_dtm,
y_test = y_test,
pipeline = model)
print("Macro F1 Scores: \n Training: {0:.3f} \n Testing: {1:.3f}\n\n".format(f1_train, f1_test))
# + colab_type="code" id="WgOFOM9leFtf" colab={}
feature_to_coefs = {
word: coef for word, coef in zip(
vectorizer.get_feature_names(),
model.coef_.transpose()
)
}
# + id="1Ur6dmDqkKoC" colab_type="code" colab={}
most_discriminatives = sorted(feature_to_coefs.items(),
key=lambda x: np.std(x[1]),
reverse=True)
least_discriminatives = sorted(feature_to_coefs.items(),
key=lambda x: np.std(x[1]),
reverse=False)
most_negative = sorted(feature_to_coefs.items(),
key=lambda x: x[1][0],
reverse=True)
most_neutral = sorted(feature_to_coefs.items(),
key=lambda x: x[1][1],
reverse=True)
most_positive = sorted(feature_to_coefs.items(),
key=lambda x: x[1][2],
reverse=True)
# + id="pREkAujCkrUm" colab_type="code" colab={}
mosts = {"Most Discriminative":most_discriminatives,
"Least Discriminative":least_discriminatives,
"Most Negative":most_negative,
"Most Neutral":most_neutral,
"Most Positive":most_positive}
# + id="dJZMXi22ipNM" colab_type="code" outputId="f4ab49e9-e793-41b9-ad61-64915f9df15a" executionInfo={"status": "ok", "timestamp": 1586627803415, "user_tz": 240, "elapsed": 13975, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
for name, lst in mosts.items():
print("\n{name}:".format(name=name))
for i in lst[:10]:
feature = i[0]
coefs = i[1]
coefs = np.round(coefs, decimals=3)
print('|'.join([feature, str(coefs[0]), str(coefs[1]), str(coefs[2])]))
# + id="fcChjnvNY6YX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="debf54b6-5264-475a-80b9-81e7f229c858" executionInfo={"status": "ok", "timestamp": 1586627857401, "user_tz": 240, "elapsed": 892, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}}
feature_to_coefs['not disappoint']
# + id="7pGY2-NqY9TF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4c983e89-9e89-4a67-aa4e-87d6b10f571f" executionInfo={"status": "ok", "timestamp": 1586627865301, "user_tz": 240, "elapsed": 937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}}
feature_to_coefs['not disappointed']
# + id="YAkpIFrnZveP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4820b46e-a561-491f-dca4-ae79b975ff6d" executionInfo={"status": "ok", "timestamp": 1586628090411, "user_tz": 240, "elapsed": 1606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}}
for f, c in feature_to_coefs.items():
if 'service' in f:
print (f, c)
# + id="kUud9175iiYc" colab_type="code" colab={}
# Example
example_actual = 1
example_pred = 2
example_incorrects = []
for i in range(len(y_test_pred)):
if y_test_pred[i] == example_pred and y_test[i] == example_actual:
example_incorrects.append(i)
# + id="FYSJwkGhkIUo" colab_type="code" outputId="3e1e0457-d1ad-4b40-c444-08ac38eaa58b" executionInfo={"status": "ok", "timestamp": 1586547708510, "user_tz": 240, "elapsed": 325, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8PmDhUFm8THlQ0eP76DD2UF2SIKi7FuSRfOrD=s64", "userId": "11853385700800863483"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
r = random.choice(example_incorrects)
print("{0} : {1}".format(r, data_test.loc[r,'review']))
| notebooks/model_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple tutorial for understanding uncertainty part
# * This notebook file is not intended to execute. Please run the python files (active_DATE.py or further version) instead of running the blocks in this notebook.
# ## 1. active_DATE.py
# In active_DATE.py, there is a parameter for the uncertainty principle : --uncertainty naive or --uncertainty self-supervised.
parser.add_argument('--uncertainty', type=str, default = 'naive', choices = ['naive', 'self-supervised'], help = 'Uncertainty principle : ambiguity of illicitness or self-supervised manner prediction')
# ### 1.1 Naive uncertainty principle : ambiguity of illicitness (--uncertainty naive)
# In each sampling strategies with needs of uncertainty score, the function get_uncertainty() will do this :
def get_uncertainty(self):
if self.uncertainty_module is None :
# return np.asarray(self.get_output().apply(lambda x : -1.8*abs(x-0.5) + 1)) : This line is deprecated.
return np.asarray(-1.8*abs(self.get_output()-0.5) + 1) # This line will be worked
# uncertainty = self.uncertainty_module.measure(self.uncertainty_module.test_data ,'feature_importance')
# return np.asarray(uncertainty)[self.available_indices] :These two lines are for self-supervised uncertainty.
# From self.get_output(), we bring the illicitness probability from the DATE model. Then appling below function, we determine the uncertainty score between 0.1 to 1.
# +
import matplotlib.pyplot as plt
import numpy as np
r = np.arange(0, 1, 0.01)
x = [a for a in r]
y = [-1.8*abs(b-0.5) + 1 for b in r]
plt.plot(x, y)
plt.xlabel('illicitness probability')
plt.ylabel('uncertainty')
plt.show()
# -
# This is reasonable because the illicitness probability that is close to 0.5 is ambiguous to determine illicit or not from our model's point of view. Otherwise, if the illicitness probability is in both ends, then the illicitness is confidently illicit, or not.
# ### 1.2 Self-supervised uncertainty principle : prediction of features with risky profiling (--uncertainty self-supervised)
# #### 1.2.1. Initialize the uncertainty module
# In active_DATE.py, the uncertainty module will be generated if the sampling startegy needs. With training data, the uncertainty module will train each masked-feature predicitors.
if unc_mode == 'self-supervised' :
if samp in ['badge_DATE', 'diversity', 'hybrid']:
if uncertainty_module is None :
uncertainty_module = uncertainty.Uncertainty(train_labeled_data, './uncertainty_models/')
uncertainty_module.train()
uncertainty_module.test_data = test_data
class Uncertainty :
# Features to use : If we use the real data, please fit the feature names properly.
numerical_features = ['fob.value', 'cif.value', 'total.taxes', 'gross.weight', 'quantity', 'Unitprice', 'WUnitprice', 'TaxRatio', 'FOBCIFRatio', 'TaxUnitquantity']
categorical_features = ['RiskH.importer.id', 'RiskH.declarant.id',
'RiskH.HS6.Origin', 'RiskH.tariff.code', 'RiskH.HS6',
'RiskH.HS4', 'RiskH.HS2', 'RiskH.office.id']
column_to_use_unc_measure = numerical_features + categorical_features
def __init__(self, labeled_data, path = './uncertainty_models/') :
self.classifiers = dict()
self.regressors = dict()
self.data = pd.DataFrame(labeled_data)
self.importance_classifier = None
self.test_data = None
self.path = path
# +
# Initial training with training data
def train(self) :
for cc in self.categorical_features :
print('Train for '+cc)
columns = [col for col in self.column_to_use_unc_measure if col != cc]
train_set = pd.DataFrame(self.data, columns = columns)
xgb_clf = XGBClassifier(n_jobs=-1)
xgb_clf.fit(train_set ,self.data[cc].values)
self.classifiers[cc] = xgb_clf
xgb_clf.save_model(self.path + cc + '.model')
for nc in self.numerical_features :
print('Train for '+nc)
columns = [col for col in self.column_to_use_unc_measure if col != nc]
train_set = pd.DataFrame(self.data, columns = columns)
xgb_reg = XGBRegressor(n_jobs=-1)
xgb_reg.fit(train_set, self.data[nc].values)
self.regressors[nc] = xgb_reg
xgb_reg.save_model(self.path + nc + '.model')
self.importance_classifier = XGBClassifier(n_jobs=-1)
self.importance_classifier.fit(pd.DataFrame(self.data, columns=self.column_to_use_unc_measure), pd.DataFrame(self.data, columns=['illicit']).values.ravel())
self.importance_classifier.save_model(self.path + 'imp' + '.model')
# -
# #### 1.2.2. Uncertainty measurement
# For sampling strategies which need the uncertainty measurement, uncertainty score will be given by get_uncertainty function. The uncertainty score from here is applied in the query function of those sampling strategies.
def get_uncertainty(self):
if self.uncertainty_module is None :
# return np.asarray(self.get_output().apply(lambda x : -1.8*abs(x-0.5) + 1)) : This line is deprecated.
# return np.asarray(-1.8*abs(self.get_output()-0.5) + 1) : This line is for naive uncertainty.
uncertainty = self.uncertainty_module.measure(self.uncertainty_module.test_data ,'feature_importance')
return np.asarray(uncertainty)[self.available_indices]
# Measure the uncertainty of given test data from uncertainty module
def measure(self, test_data, option) :
print('Uncertainty measure')
unc = pd.DataFrame()
for cc in self.categorical_features :
print('Uncertainty measure : '+cc)
columns = [col for col in self.column_to_use_unc_measure if col != cc]
test_set = pd.DataFrame(test_data, columns = columns)
xgb_clf_pred = self.classifiers[cc].predict(test_set)
unc['unc.'+cc] = np.bitwise_xor(test_data[cc], xgb_clf_pred.tolist())
unc['unc.'+cc] = unc['unc.'+cc].apply(lambda x : 0.9*x + 0.1)
for idx, cat in enumerate(test_data[cc[6:]]) :
if cat not in set(self.data[cc[6:]]) :
unc['unc.'+cc][idx] = 1
for nc in self.numerical_features :
print('Uncertainty measure : '+nc)
columns = [col for col in self.column_to_use_unc_measure if col != nc]
test_set = pd.DataFrame(test_data, columns = columns)
xgb_reg_pred = self.regressors[nc].predict(test_set)
unc['unc.'+nc] = abs(test_data[nc] - xgb_reg_pred.tolist()) / test_data[nc]
unc['unc.'+nc] = np.clip(np.asarray(unc['unc.'+nc]), 0, 1)
unc['unc.'+nc] = unc['unc.'+nc].apply(lambda x : 0.9*x + 0.1)
if option == 'naive' :
# Model 1 : Naive equally-contributing uncertainty (mean)
return unc.mean(axis=1)
elif option == 'feature_importance' :
# Model 2 : Feature importance from illicitness
return unc.dot(self.importance_classifier.feature_importances_ / sum(self.importance_classifier.feature_importances_))
# #### 1.2.3. Retrain after query the samples
# After our sampling strategy select the samples to query, then we can retrain the model to reflect selected samples.
# tune the uncertainty
if unc_mode == 'self-supervised' :
if samp in ['badge_DATE', 'diversity', 'hybrid']:
uncertainty_module.retrain(test_data.iloc[indices - offset])
# Retrain the individual predictors by using queried samples
def retrain(self, queried_samples) :
for cc in self.categorical_features :
columns = [col for col in self.column_to_use_unc_measure if col != cc]
train_set = pd.DataFrame(queried_samples, columns = columns)
self.classifiers[cc].fit(train_set, queried_samples[cc].values, xgb_model = self.path + cc +'.model')
self.classifiers[cc].save_model(self.path + cc + '.model')
for nc in self.numerical_features :
columns = [col for col in self.column_to_use_unc_measure if col != nc]
train_set = pd.DataFrame(queried_samples, columns = columns)
self.regressors[nc].fit(train_set, queried_samples[nc].values, xgb_model = self.path + nc+'.model')
self.regressors[nc].save_model(self.path + nc + '.model')
self.importance_classifier.save_model(self.path + 'imp' + '.model')
self.data.append(pd.DataFrame(queried_samples, columns = self.column_to_use_unc_measure))
# ## 2. Notes
# * If the dataset changes from synthetic to real, then you have to change the feature names in uncertainty.py as follow:
# Features to use : If we use the real data, please fit the feature names properly.
numerical_features = ['fob.value', 'cif.value', 'total.taxes', 'gross.weight', 'quantity', 'Unitprice', 'WUnitprice', 'TaxRatio', 'FOBCIFRatio', 'TaxUnitquantity']
categorical_features = ['RiskH.importer.id', 'RiskH.declarant.id',
'RiskH.HS6.Origin', 'RiskH.tariff.code', 'RiskH.HS6',
'RiskH.HS4', 'RiskH.HS2', 'RiskH.office.id']
column_to_use_unc_measure = numerical_features + categorical_features
| query_strategies/Tutorial for understanding uncertainty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="gVCBcTq_amTZ"
# # Solve Burgers' PDE example using Physics-Informed Neural Nets (PINNs)
# + id="NClHRw98ggSY"
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn, optim, autograd
# + id="WBtRF6sPgtLA"
class BurgersPINN(nn.Module):
"""Physics-informed neural nets (PINNS) to solve Burgers' equation."""
def __init__(self,
nn_arch=[64, 32, 16, 8],
activation_fcn=torch.nn.Tanh,
misfit_fcn=torch.nn.MSELoss(),
domain=(-1, 1, 0, 1),
n_initcond_pts=128,
n_boundary_pts=128,
n_collocation_pts=1024,
device=None):
"""Args:
nn_arch - number of nodes for each hidden layer of the dense NN
activation_fcn - nn.Module nonlinearity class applied to the output
of hidden layer nodes (not applied to output layer)
misfit_fcn - training loss/objective function
domain - tuple of dimensions of the simulation domain with
format (x_lb, x_ub, t_start, t_end)
n_initcond_pts - number of constraint boundary points
n_boundary_pts - number of boundary points
n_collocation_pts - number of points where to test and ensure
that the trained PINN solves the PDE
device - compute device; defaults to "cuda:0" when available or
"cpu" otherwise
"""
super().__init__()
self._domain = domain
self._nn_arch = nn_arch
self._activation_fcn = activation_fcn
self._misfit_fcn = misfit_fcn
self._n_initcond_pts = n_initcond_pts
self._n_boundary_pts = n_boundary_pts
self._n_collocation_pts = n_collocation_pts
if device:
self._device = device
else:
self._device = "cuda:0" if torch.cuda.is_available() else "cpu"
# initialize neural net representation of domain
self._layers = []
n_inputs = 2
# .. add hidden layers
for i, n_nodes in enumerate(nn_arch):
self._layers.append(nn.Linear(n_inputs, n_nodes))
self._layers.append(activation_fcn())
n_inputs = n_nodes
# .. add output layer
self._layers.append(nn.Linear(n_inputs, 1))
# .. combine layers
self._model = nn.Sequential(*self._layers)
self.to(self._device)
def forward(self, x, t):
return self._model(torch.cat([x, t], axis=1))
def pde(self, x, t):
"""Burgers' PDE:
f(x,t|u) = du/dt + u*(du/dx) - (0.01/pi)(d^2u/dx^2) = 0.
"""
u = self(x, t)
u_t = autograd.grad(u.sum(), t, create_graph=True)[0]
u_x = autograd.grad(u.sum(), x, create_graph=True)[0]
u_xx = autograd.grad(u_x.sum(), x, create_graph=True)[0]
#pdb.set_trace()
return u_t + u*u_x - (0.01/np.pi)*u_xx
def boundary_points(self):
"""Returns a batch of boundary points for training the PINN.
Points are returned as a tuple (x, t, u).
"""
# boundary condition points, i.e., u(x,t), x={-1, 1}
x = np.where(np.random.rand(self._n_boundary_pts, 1) < 0.5,
self._domain[0], self._domain[1])
t = np.random.uniform(low=self._domain[2], high=self._domain[3],
size=(self._n_boundary_pts, 1))
u = np.zeros_like(x)
return (self.numpy_to_tensor(x),
self.numpy_to_tensor(t),
self.numpy_to_tensor(u))
def initial_condition_points(self, ic_func=None):
"""Returns a batch of boundary points for training the PINN.
Points are returned as a tuple (x, t, u).
"""
if not ic_func:
ic_func = lambda x: -np.sin(np.pi * x)
# initial condition points, i.e., u(x,t=0)
x = np.random.uniform(low=self._domain[0], high=self._domain[1],
size=(self._n_initcond_pts, 1))
t = np.full_like(x, self._domain[2])
u = ic_func(x)
return (self.numpy_to_tensor(x),
self.numpy_to_tensor(t),
self.numpy_to_tensor(u))
def collocation_points(self):
"""Returns a batch of random collocation points."""
# randomly sample locations in the (space x time) domain
x = np.random.uniform(low=self._domain[0], high=self._domain[1],
size=(self._n_collocation_pts, 1))
t = np.random.rand(self._n_collocation_pts, 1)
x = self.numpy_to_tensor(x, grad=True)
t = self.numpy_to_tensor(t, grad=True)
return x, t
def get_loss(self, mse_ic, mse_bc, mse_pde):
"""Returns combined loss and dict of individual component loss values."""
losses = {'mse_ic': mse_ic.item(),
'mse_bc': mse_bc.item(),
'mse_pde': mse_pde.item()}
return mse_ic + mse_bc + mse_pde, losses
def train(self, learning_rate=1e-3, n_iter=1000, disp_iter=100,
resample_initcond_pts=False,
resample_boundary_pts=False,
resample_collocation_points=True):
optimizer = optim.Adam(self.parameters(), lr=learning_rate, amsgrad=True)
f_vals = self.numpy_to_tensor(np.zeros((self._n_collocation_pts, 1)))
for it in range(n_iter):
if it==0 or resample_initcond_pts:
xic, tic, uic = self.initial_condition_points()
if it==0 or resample_boundary_pts:
xbc, tbc, ubc = self.boundary_points()
if it==0 or resample_collocation_points:
x_col, t_col = self.collocation_points()
# loss on initial condition points
pred_uic = self(xic, tic)
mse_ic = self._misfit_fcn(uic, pred_uic)
# loss on boundary points
pred_ubc = self(xbc, tbc)
mse_bc = self._misfit_fcn(ubc, pred_ubc)
# loss on collocation points
pred_f = self.pde(x_col, t_col)
mse_pde = self._misfit_fcn(f_vals, pred_f)
# combined loss
loss, losses = self.get_loss(mse_ic, mse_bc, mse_pde)
# backprop + gradient step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if it % disp_iter == 0:
print(f'[{it:5d}]',
' '.join([f'{ln}={lv:.5f},' for ln, lv in losses.items()]),
f'loss={loss:.5f}')
def numpy_to_tensor(self, x, grad=False):
return torch.from_numpy(x).float().requires_grad_(grad).to(self._device)
# + colab={"base_uri": "https://localhost:8080/"} id="BV52e2x2jQvn" outputId="af8924dd-6835-4940-cfff-601ce99048c5"
np.random.seed(1)
torch.manual_seed(1)
pinn = BurgersPINN()
print(pinn)
pinn.train(learning_rate=0.001, n_iter=14001, disp_iter=200)
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="LCZMotBd3qup" outputId="2119d351-de62-480e-91c2-ebcdf04950d7"
xg, tg = np.meshgrid(np.linspace(-1, 1, 201), np.linspace(0, 1, 101))
with torch.no_grad():
u = pinn(pinn.numpy_to_tensor(xg.reshape((-1, 1))),
pinn.numpy_to_tensor(tg.reshape((-1, 1))))
u = u.cpu().numpy().reshape(xg.shape)
fix, axs = plt.subplots(ncols=3, figsize=(12, 3), dpi=120)
# verify representation of initial condition
axs[0].plot(xg[0], -np.sin(np.pi*xg[0]), label='input training IC')
axs[0].plot(xg[0], u[0], '--', label='PINN IC representation')
axs[0].set_xlabel('x')
axs[0].set_title('u(x, t=0)')
axs[0].legend()
# verify accuracy of boundary conditions
axs[1].axhline(y=0, label='boundary condition')
axs[1].plot(tg[:, 0], u[:, 0], '--', label='PINN boundary representation')
axs[1].set_xlabel('t')
axs[1].set_title(r'u(x=-1, t)')
axs[1].legend()
axs[2].axhline(y=0, label='boundary condition')
axs[2].plot(tg[:, -1], u[:, -1], '--', label='PINN boundary representation')
axs[2].set_xlabel('t')
axs[2].set_title(r'u(x=+1, t)')
axs[2].legend()
plt.tight_layout()
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="oGfBrNXD2zYE" outputId="c0a72c47-0b1a-4cea-f276-0974b23ecbba"
plt.figure(figsize=(9, 3), dpi=120)
plt.imshow(u.T,
cmap='rainbow',
interpolation='nearest',
extent=[0, 1, -1, 1],
origin='lower',
aspect='auto')
plt.xlabel('t')
plt.ylabel('x')
plt.title('u(x,t)')
plt.colorbar()
plt.tight_layout();
# + colab={"base_uri": "https://localhost:8080/", "height": 278} id="qNU3WkeUpb4Q" outputId="07bc0f38-cb27-4738-d604-458358d32e60"
t_vals = [0.1, 0.2, 0.4, 0.6]
fix, axs = plt.subplots(ncols=len(t_vals), figsize=(3*len(t_vals), 3), dpi=90)
for t, ax in zip(t_vals, axs):
xg = np.linspace(-1, 1, 201)
with torch.no_grad():
u = pinn(pinn.numpy_to_tensor(xg.reshape((-1, 1))),
pinn.numpy_to_tensor(np.full((len(xg), 1), t)))
ax.plot(xg, u.numpy())
ax.set_xlabel('x')
ax.set_title(f'u(x, t={t})')
plt.tight_layout()
| PINNs_BurgersPDE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="0jq2-HZ0zpQr"
# # Data Structures
# + [markdown] id="kMGshyL5EoVo"
# ## Stack
#
# + [markdown] id="RRlRNa2x2TX-"
# ###### Stack From Dynamic Array
# + id="-v2dDHV_Elsk"
class d_Stack:
def __init__(self):
self.stack = []
def push(self, item):
self.stack.append(item)
def pop(self):
if self.stack != []:
popped = self.stack.pop()
return popped
def peek(self):
if self.stack != []:
return self.stack[-1]
# + colab={"base_uri": "https://localhost:8080/"} id="Icic6PtTQDKP" outputId="66b0f3e5-55ca-4874-93a4-f632d924fb37"
f = [1,2,3]
g = f.pop()
g
# + [markdown] id="93USwnm_2X4x"
# ###### Stack From Linked List
# + id="y3QP02HC2bDQ"
class Stack:
def __init__(self, value, next):
self.top = None
def push(self, item):
new_node = LinkedListNode(item)
new_node.next = self.top
def pop(self):
if self.top is not None:
popped_node = self.top
self.top = popped_node.next
return popped_node.value
def peek(self):
if self.top is not None:
return self.top.data
# + [markdown] id="yjU4RdQ3ErJ2"
# ## Queue
# + [markdown] id="DhDsIDvS2d7A"
# ###### Queue From Dynamic Array
# + id="_8iuGqemEs9W"
class d_Queue:
def __init__(self):
self.queue = []
def enqueue(self, item):
if self.queue != []:
self.queue.append(item)
else:
print('no queue')
def dequeue(self):
if self.queue != []:
front = self.queue.pop(0)
# + colab={"base_uri": "https://localhost:8080/"} id="TDox5irrA7VU" outputId="71773da6-309b-40d4-887f-91560a8c0881"
t = [1,2,3]
f = t.pop(0)
f
t
# + [markdown] id="Q20690lz2iz2"
# ###### Queue From Linked List
# + id="FtZtP6fW904M"
class Queue:
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, item):
if self.rear is None:
self.front = new_node
self.rear = new_node
else:
self.rear.next = new_node
self.rear = new_node
def dequeue(self):
if self.front is not None:
old_front = self.front
self.front = old_front.next
if self.front is None:
self.rear = None
return old_front
# + [markdown] id="_Ft_6HnXEtV-"
# ## Linked List
# + [markdown] id="yRR3lLX2FKHn"
# #### Singly Linked
#
# + id="AqW5yo-jEv_U"
class s_ListNode:
def __init__(self, value):
self.value = value
self.next = None
# + [markdown] id="DCQFyO_EFWnd"
# #### Doubly Linked
# + id="FvMdET6uFYT9"
class d_ListNode:
def __init__(self, value):
self.value = value
self.next = None
self.last = None
# + [markdown] id="Y1vOMI_IFiID"
# #### Circularly Linked
# + id="hjZfQa5lFkLj"
class c_ListNode:
def __init__(self, value):
self.value = value
self.next = None # Maybe an add connection function to limit connections to only last node
# + [markdown] id="zWnJu6CFEwU1"
# ## Binary Trees
# + id="awROaFHAIshw"
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# + [markdown] id="ZN87P_hEEyjm"
# #### Binary Search Tree
#
# Because of unique structure where all values lesser than the root node are placed on the left side and all values greater than or equal to the root node are placed on the right side, searching the tree for a certain value becomes somewhat easy.
#
# If you always insert to the root than it shoudl sta
# + colab={"base_uri": "https://localhost:8080/"} id="7KhAaf0-EyTn" outputId="c3f0ef6a-aa6a-4371-ddfa-4cf1a90f200c"
class BinarySearchTree:
def __init__(self, value):
# Current node's value
self.value = value
# Value of the node's left child
self.left = None
# Value of the node's right child
self.right = None
def insert(self, value):
# Check if the new node's value is less than our current node's value
if value < self.value:
# If no left child, place there
if not self.left:
self.left = BinarySearchTree(value)
# Otherwise repeat the process
else:
self.left.insert(value)
# Check if the new value is greater or equal to the current node's value
elif value >= self.value:
# if theres' no right child here already, place the new node there
if not self.right:
self.right = BinarySearchTree(value)
else:
# Otherwise repeat the process
self.right.insert(value)
def search(self, target):
if self.value == target:
return self
elif target < self.value:
if self.left is None:
return False
else:
return self.left.search(target)
else:
if self.right is None:
return False
else:
return self.right.search(target)
# + colab={"base_uri": "https://localhost:8080/"} id="MKkco59hSiGF" outputId="c171a54f-d4ef-4d22-c0db-d4673def075c"
arr = [1, 3, 5, 2, 10, 8] ## So it only works if the first value is the middle value to make true BST
for i in range(len(arr)):
if i == 0:
btree = BinarySearchTree(arr[i])
else:
btree.insert(arr[i])
print(btree.right.value)
print(btree.right.left.value)
print(btree.right.right.value)
print(btree.right.right.right.value)
print(btree.right.right.right.left.value)
# + [markdown] id="4uaYUWl-0LR-"
# ###### Binary Search
# + id="Puchctlc0Oo1"
## Binary search on an array
def binary_search(lst, target): # Assuming sorted
max = lst[0]
min = lst[-1]
check = len(lst) // 2
print(lst, target, check)
if lst[check] == target:
print('Woo')
return check
if lst[check] > target:
lst = lst[:check]
elif lst[check] < target:
lst = lst[check + 1:]
binary_search(lst, target=target)
a = binary_search([1,2,4,5,6], 5)
a
# + [markdown] id="nizxPC2zH6qs"
# ### Tree Traversals
# + [markdown] id="y13v5pobKCSq"
# ###### Depth First Search
# + id="ujay8QpzH-Gm"
# + [markdown] id="Q4DfUnqbKGv3"
# ###### Breadth First Search
# + id="V2rEHYibKJow"
# + [markdown] id="6pwQgNmQKKSO"
# ###### InOrder Traversal
# + id="fJ1q8lsfKQUG"
def inorder_traversal(root):
s = []
result = []
while True:
while root:
s.append(root)
root = root.left
if len(s) == 0:
return result
node = s.pop()
result.append(node.val)
root = node.right
return result
# + [markdown] id="ley95iuTKQ2y"
# ###### PreOrder Traversal
# + id="KV-mlkgzKS78"
def preorder_traversal(root):
s = []
result = []
while True:
while root:
s.append(root)
# + [markdown] id="IvgbGG3mKUqY"
# ###### PostOrder Traversal
# + id="2salmqdKKWus"
# + [markdown] id="JcPhvMno6g5G"
# ###### MaxDepth
# + id="AvzDYdTS6jXY"
def maxDepth(root):
stack = []
if root is not None:
stack.append((1, root))
depth = 0
while stack != []:
current_depth, root = stack.pop()
if root is not None:
depth = max(depth, current_depth)
stack.append((current_depth + 1, root.left))
stack.append((current_depth + 1, root.right))
return depth
# + [markdown] id="bDprd79bE0b2"
# ## Heap
# + id="MHp6TRPkE1qv"
# + [markdown] id="4jyyj8PWE17v"
# ## Hash Table
# + id="toAwn9NSE4yn"
class HashTableEntry:
def __init__(self, key, value):
self.key = key
self. value = value
self. next = NotImplementedError
# + [markdown] id="4CM5WDUOE5EH"
# ## Graph
# + id="CJSUr-ShE61V"
# Makes enough sense, need to play around with this and do some traversals
class Node:
def __init__(self, value):
self.value = value
self.connections = {}
class Vertex:
def __init__(self, value):
self.value = value
self.connections = {}
def add_connections(self, vert, weight = 0):
self.connections[vert] = weight
def get_connections(self):
return self.connections.keys()
def get_value(self):
return self.value
def get_weights(self, vert):
return self.connections[vert]
class Graph:
def __init__(self, vert):
return vert in self.vertices
def iter(self):
return iter(self.vertices.values)
def add_vertex(self, value):
self.count += 1
new_vert = Vertex(value)
self.vertices[value] = new_vert
return new_vert
def get_vertices(self):
return self.vertices.keys()
# + [markdown] id="TRP-rd62zv97"
# # Sorting Algorithms
# + id="IKHUS0eYzyI_"
| .ipynb_checkpoints/Data_Structures_&_Sorting_Algorithms-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import necessary modules
# please run this everytime you open it
import pandas as pd
import numpy as np
import matplotlib as plt
import re
import os
import warnings
warnings.filterwarnings('ignore')
# import krippendorff.py
# %run krippendorff.py
# %run Validation.ipynb
# +
# tested!
# to_reliability("Specialists data", "Covid_Evidencev1-2020-10-04T1836-DataHunt.csv", "cl", "T1.Q11")
# +
# tested!
# compute_alpha("Specialists data",
# "Covid_Evidencev1-2020-10-04T1836-DataHunt.csv", "cl",
# "T1.Q11",
# "ordinal", 6)
# +
# tested!
# decompose_question("T1.Q11")
# +
# tested!
# get_question_type("Argument relevance", "T1.Q3")
# -
# **get_measurement**
get_question_types("Specialists DataHuntByCase")
# **the ULTIMATE alpha calculator for Specialists tasks**
calculate_alpha("Specialists DataHuntByCase", "Specialists output")
# **runtime warning**
df = pd.read_csv("Specialists DataHuntByCase/Covid_Probabilityv1-2020-12-02T0750-DataHuntByCase.csv")
df["question_label"].unique()
df = pd.read_csv("Specialists DataHuntByCase/Covid_Probabilityv1-2020-12-02T0750-DataHuntByCase.csv")
df = df[df["question_label"] == "T1.Q6"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
df = df.reindex(columns=cols)
array = to_reliability_mc(df)
alpha(reliability_data=array, level_of_measurement="nominal")
# Might need some future analysis.
# **cl question demo**
df = pd.read_csv("Specialists DataHuntByCase/Covid_SourceRelevancev1-2020-12-02T0751-DataHuntByCase.csv")
df = df[df["question_label"] == "T1.Q1"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
df = df.reindex(columns=cols)
df.iloc[8:10]
# This guy selected both A1 and A2 as the answer choices, so confirmed that this is a cl question.
df.head()
# one-hot-encode the dataframe
one_hot_encode = pd.get_dummies(df, columns = ["answer_label"])
one_hot_encode[8:10]
# as an example, first select the first answer only
one_hot_encode = one_hot_encode.iloc[:, 0:4]
one_hot_encode.head()
# construct a pivot_table
pivot = pd.pivot_table(one_hot_encode, values="answer_label_T1.Q1.A1",
index="contributor_uuid",
columns="quiz_task_uuid",
aggfunc=lambda x: list(x)[0])
# let's look at where that contributor is
pivot.loc["38b9cf3a-588d-4137-a975-af770121a925", "9d4ac736-52b7-40df-b2a9-6164fa081960"]
# This is correct because that user input a 1 for A1.
# convert to array
array = pivot.to_numpy()
array
alpha(reliability_data=array, level_of_measurement="nominal")
# **Why nan?**
source = pd.read_csv("Specialists DataHuntByCase/Covid_Reasoningv1-2020-12-02T0750-DataHuntByCase.csv")
source = source[source["question_label"] == "T1.Q4"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
source = source.reindex(columns=cols)
source = pd.pivot_table(data=source, values="answer_label", index="contributor_uuid",
columns="quiz_task_uuid", aggfunc=select)
array = source.to_numpy()
array = to_int(array)
display(array)
try:
print(np.round(alpha(reliability_data=array, level_of_measurement="nominal"), 5))
except Exception as e:
print(e)
# **More than one value in the domain?**
source = pd.read_csv("Specialists DataHuntByCase/Covid_Reasoningv1-2020-12-02T0750-DataHuntByCase.csv")
source = source[source["question_label"] == "T1.Q5"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
source = source.reindex(columns=cols)
source = pd.pivot_table(data=source, values="answer_label", index="contributor_uuid",
columns="quiz_task_uuid", aggfunc=select)
array = source.to_numpy()
array = to_int(array)
display(array)
try:
print(np.round(alpha(reliability_data=array, level_of_measurement="nominal"), 5))
except Exception as e:
print(e)
# **Why 0?**
source = pd.read_csv("Specialists DataHuntByCase/Covid_Evidencev1-2020-12-02T0749-DataHuntByCase.csv")
source = source[source["question_label"] == "T1.Q6"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
source = source.reindex(columns=cols)
source = pd.pivot_table(data=source, values="answer_label", index="contributor_uuid",
columns="quiz_task_uuid", aggfunc=select)
array = source.to_numpy()
array = to_int(array)
display(array)
try:
print(np.round(alpha(reliability_data=array, level_of_measurement="nominal"), 5))
except Exception as e:
print(e)
# **Why negative?**
source = pd.read_csv("Specialists DataHuntByCase/Covid_Evidencev1-2020-12-02T0749-DataHuntByCase.csv")
source = source[source["question_label"] == "T1.Q14"]
cols = ["quiz_task_uuid", "contributor_uuid", "question_label", "answer_label"]
source = source.reindex(columns=cols)
source = pd.pivot_table(data=source, values="answer_label", index="contributor_uuid",
columns="quiz_task_uuid", aggfunc=select)
array = source.to_numpy()
array = to_int(array)
display(array)
try:
print(np.round(alpha(reliability_data=array, level_of_measurement="nominal"), 5))
except Exception as e:
print(e)
# df = pd.DataFrame(array)
# df.to_excel('example2.xlsx')
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4974794/#:~:text=Krippendorff's%20alpha%20in%20contrast%20is,negative%20values%20indicate%20inverse%20agreement.<br>
# Krippendorff’s alpha in contrast is based on the observed disagreement corrected for disagreement expected by chance. This leads to a range of −1 to 1 for both measures, where 1 indicates perfect agreement, 0 indicates no agreement beyond chance and negative values indicate inverse agreement.
| Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
# +
#from taking n purest pixels
purest_pixels_dict = np.load('./purest_pixels_dict.npy').item()
#from subtracting out signal from background
pure_signal_dict = np.load('./pure_signal_dict.npy').item()
# +
#http://evols.library.manoa.hawaii.edu/bitstream/handle/10524/35872/vol3-Poop-Que(LO).pdf#page=11
#Olivenza: Took average for LL chondrite
#'H' has about 25-30% total Iron (with over half in metallic form =>strongly magnetic)
#L contains 20-25% (with 5-10% in uncombined metal state)
#'LL' contain 19-22% iron (with only 0.3-3% metallic iron)
CLASSIFICATION_PER_SPECIMEN = {'Abee':'EH4', 'Acapulco':'Acapulcoite', 'Allende':'CV3','Brownsfield':'H3.7',
'Estacado':'H6', 'Estacado2':'H6', 'Gibeon':'IronIva', 'Hessle':'H5',
'Holbrook':"L/LL6", 'Homestead':'L5','Homestead2':'L5','Millbilille':'Eucrite-mmict',
'Olivenza':"LL5", 'Peekshill':'H6',
'PutnamCounty':'IronIva', 'Soku':'LL4', 'Steinbach1':'IronIva', 'Steinbach2':'IronIva',
'Sutton':'H5','Toluca1':'IronIAB-sLL', 'Toluca2':'IronIAB-sLL', 'Toluca3':'IronIAB-sLL',
'TolucaBig':'IronIAB-sLL'}
IRON_CATEGORY_PER_SPECIMEN = {'Abee':32.52, 'Acapulco':27.5, 'Allende':23.85,'Brownsfield':'H','Estacado':27.88,
'Estacado2':27.88, 'Gibeon':91.8, 'Hessle':'H', 'Holbrook':"L/LL", 'Homestead':'L',
'Homestead2':'L','Millbilille':'L','Olivenza':"LL", 'Peekshill':'H',
'PutnamCounty':91.57, 'Soku':'LL', 'Steinbach1':'HH', 'Steinbach2':'HH', 'Sutton':'H',
'Toluca1':91, 'Toluca2':91, 'Toluca3':91, 'TolucaBig':91}
IRON_ALL_CATEGORIZED = {'Abee':'H', 'Acapulco':'H', 'Allende':'L','Brownsfield':'H','Estacado':'H',
'Estacado2':'H', 'Gibeon':'HH', 'Hessle':'H', 'Holbrook':"L/LL", 'Homestead':'L',
'Homestead2':'L','Millbilille':'L','Olivenza':"LL", 'Peekshill':'H',
'PutnamCounty':'H', 'Soku':'LL', 'Steinbach1':'HH', 'Steinbach2':'HH', 'Sutton':'H',
'Toluca1':'HH', 'Toluca2':'HH', 'Toluca3':'HH', 'TolucaBig':'HH'}
IRON_SIMPLE_CATEGORIZED = {'Abee':'L', 'Acapulco':'L', 'Allende':'L','Brownsfield':'L','Estacado':'L',
'Estacado2':'L', 'Gibeon':'HH', 'Hessle':'L', 'Holbrook':"L", 'Homestead':'L',
'Homestead2':'L','Millbilille':'L','Olivenza':"L", 'Peekshill':'L',
'PutnamCounty':'L', 'Soku':'L', 'Steinbach1':'HH', 'Steinbach2':'HH', 'Sutton':'',
'Toluca1':'HH', 'Toluca2':'HH', 'Toluca3':'HH', 'TolucaBig':'HH'}
IRON_PERCENTAGE_IF_AVAILABLE = {'Abee':32.52, 'Acapulco':27.5, 'Allende':23.85,'Estacado':27.88,
'Estacado2':27.88, 'Gibeon':91.8, 'PutnamCounty':91.57,'Toluca1':91,
'Toluca2':91, 'Toluca3':91, 'TolucaBig':91}
COLORS = {'Abee':'darkslateblue', 'Acapulco':'green', 'Allende':'blue','Brownsfield':'yellow',
'Estacado':'purple', 'Estacado2':'brown', 'Gibeon':'black', 'Hessle':'lime',
'Holbrook':"orange", 'Homestead':'grey','Homestead2':'lightgreen',
'Millbilille':'lightcoral',
'Olivenza':"c", 'Peekshill':'cyan',
'PutnamCounty':'pink', 'Soku':'silver', 'Steinbach1':'maroon', 'Steinbach2':'fuchsia',
'Sutton':'lawngreen','Toluca1':'cyan', 'Toluca2':'ivory', 'Toluca3':'olive',
'TolucaBig':'red'}
# +
#create matrix of all data samples (no classes)
all_data = []
all_data_simplified_classes = []
all_data_standard_classes = []
simplified_classes = ['HH','L']
standard_classes = ['HH','H','L','L/LL','LL']
all_data_meteorite_classes = []
for sample in pure_signal_dict:
# if sample == 'TolucaBig':
# continue
for row in pure_signal_dict[sample]:
all_data.append(row)
all_data_simplified_classes.append(IRON_SIMPLE_CATEGORIZED[sample])
all_data_standard_classes.append(IRON_ALL_CATEGORIZED[sample])
all_data_meteorite_classes.append(sample)
print np.shape(all_data)
print np.shape(all_data_simplified_classes)
# +
all_data_mean = []
all_data_mean_classes = []
for sample in pure_signal_dict:
all_data_mean.append(np.mean(pure_signal_dict[sample], axis=0))
all_data_mean_classes.append(sample)
print np.shape(all_data_mean)
# +
import itertools
band = [610,680,730,760,810,860]
def add_many_variables(spectrum):
pairs = list(itertools.combinations(spectrum, 2))
differences = [abs(b-a) for (a,b) in pairs]
differences_squared = [abs(b-a)**2 for (a,b) in pairs]
ratios = [float(b)/float(a) for (a,b) in pairs]
ratios_squared = [(float(b)/float(a))**2 for (a,b) in pairs]
return spectrum
#return np.concatenate((spectrum,differences))
#return np.concatenate((spectrum,differences))
#return ratios
#based on expected iron changes
#return [spectrum[0],spectrum[1],spectrum[5]]
#return np.concatenate((spectrum,differences,ratios))
#sums
#slopes = [(b-a)/(band[i] for i, (a,b) in enumerate(pairs)]
#NOTE: tried a bunch of stuff. Seemed like just using the spectrum itself worked best, though it would be
#nice in partiuclar to pay attention to the ratio results since this helps get rid of any noise across all channels
# +
mega_feature_array = []
def build_mega_feature_array(original_dataset):
for sample in original_dataset:
mega_feature_array.append(add_many_variables(sample))
build_mega_feature_array(all_data)
print np.shape(all_data_mean)
print np.shape(mega_feature_array)
# +
from sklearn.decomposition import PCA as sklearnPCA
sklearn_pca = sklearnPCA(n_components=2)
print np.shape(mega_feature_array)
data_r = sklearn_pca.fit_transform(mega_feature_array)
print np.shape(data_r)
print sklearn_pca.explained_variance_ratio_
print sklearn_pca.components_
# +
colors = {}
for i, row in enumerate(data_r):
if all_data_standard_classes[i] == 'HH':
plt.scatter(row[0],row[1], color='red')
elif all_data_standard_classes[i] == 'H':
plt.scatter(row[0],row[1], color='green')
elif all_data_standard_classes[i] == 'L':
plt.scatter(row[0],row[1], color='blue')
elif all_data_standard_classes[i] == 'L/LL':
plt.scatter(row[0],row[1], color='yellow')
elif all_data_standard_classes[i] == 'LL':
plt.scatter(row[0],row[1], color='purple')
#plt.xlim(-2,0)
plt.show()
#plt.savefig('../results/PCA_5_cat')
# -
colors = {}
for i, row in enumerate(data_r):
if all_data_standard_classes[i] == 'HH':
plt.scatter(row[0],row[1], color='red')
elif all_data_standard_classes[i] == 'H':
continue
elif all_data_standard_classes[i] == 'L':
plt.scatter(row[0],row[1], color='blue')
elif all_data_standard_classes[i] == 'L/LL':
plt.scatter(row[0],row[1], color='yellow')
elif all_data_standard_classes[i] == 'LL':
plt.scatter(row[0],row[1], color='purple')
#plt.xlim(-2,1)
plt.show()
#plt.savefig('../results/PCA_4_cat')
colors = {}
for i, row in enumerate(data_r):
if all_data_standard_classes[i] == 'H':
continue
elif all_data_standard_classes[i] == 'L':
plt.scatter(row[0],row[1], color='blue')
elif all_data_standard_classes[i] == 'L/LL':
plt.scatter(row[0],row[1], color='blue')
elif all_data_standard_classes[i] == 'LL':
plt.scatter(row[0],row[1], color='blue')
elif all_data_standard_classes[i] == 'HH':
plt.scatter(row[0],row[1], color='red')
#plt.xlim(-3,0)
plt.show()
#plt.savefig('../results/PCA_HHv3Ls')
for i, row in enumerate(data_r):
plt.scatter(row[0],row[1], color=COLORS[all_data_meteorite_classes[i]])
plt.show()
#plt.savefig('../results/PCA_meteorite')
#Next: MNF
#Then: MDA
| DataAnalysis/PCA,MNFOld..ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## POS Tagging - Lexicon and Rule Based Taggers
#
# Let's look at the two most basic tagging techniques - lexicon based (or unigram) and rule-based.
#
# In this guided exercise, you will explore the WSJ (wall street journal) POS-tagged corpus that comes with NLTK and build a lexicon and rule-based tagger using this corpus as the tarining data.
#
# This exercise is divided into the following sections:
# 1. Reading and understanding the tagged dataset
# 2. Exploratory analysis
# ### 1. Reading and understanding the tagged dataset
# Importing libraries
import nltk
import numpy as np
import pandas as pd
import pprint, time
import random
from sklearn.model_selection import train_test_split
from nltk.tokenize import word_tokenize
import math
# reading the Treebank tagged sentences
wsj = list(nltk.corpus.treebank.tagged_sents())
# samples: Each sentence is a list of (word, pos) tuples
wsj[:3]
# In the list mentioned above, each element of the list is a sentence. Also, note that each sentence ends with a full stop '.' whose POS tag is also a '.'. Thus, the POS tag '.' demarcates the end of a sentence.
#
# Also, we do not need the corpus to be segmented into sentences, but can rather use a list of (word, tag) tuples. Let's convert the list into a (word, tag) tuple.
# converting the list of sents to a list of (word, pos tag) tuples
tagged_words = [tup for sent in wsj for tup in sent]
print(len(tagged_words))
tagged_words[:10]
# We now have a list of about 100676 (word, tag) tuples. Let's now do some exploratory analyses.
# ### 2. Exploratory Analysis
#
# Let's now conduct some basic exploratory analysis to understand the tagged corpus. To start with, let's ask some simple questions:
# 1. How many unique tags are there in the corpus?
# 2. Which is the most frequent tag in the corpus?
# 3. Which tag is most commonly assigned to the following words:
# - "bank"
# - "executive"
#
# question 1: Find the number of unique POS tags in the corpus
# you can use the set() function on the list of tags to get a unique set of tags,
# and compute its length
tags = [t[1] for t in tagged_words]
unique_tags = set(tags)
len(unique_tags)
# +
# question 2: Which is the most frequent tag in the corpus
# to count the frequency of elements in a list, the Counter() class from collections
# module is very useful, as shown below
from collections import Counter
tag_counts = Counter(tags)
tag_counts.most_common()
# -
# the most common tags can be seen using the most_common() method of Counter
tag_counts.most_common(5)
# Thus, NN is the most common tag followed by IN, NNP, DT, -NONE- etc. You can read the exhaustive list of tags using the NLTK documentation as shown below.
# list of POS tags in NLTK
nltk.help.upenn_tagset()
# question 3: Which tag is most commonly assigned to the word w. Get the tags list that appear for word w and then use the Counter()
# Try w ='bank'
tags_bank = [t for t in tagged_words if t[0].lower()=='bank']
bank = Counter(tags_bank).most_common()
bank
# question 3: Which tag is most commonly assigned to the word w. Try 'executive'
executive = Counter([t for t in tagged_words if t[0].lower()=='executive']).most_common()
executive
# ### 2. Exploratory Analysis Contd.
#
# Until now, we were looking at the frequency of tags assigned to particular words, which is the basic idea used by lexicon or unigram taggers. Let's now try observing some rules which can potentially be used for POS tagging.
#
# To start with, let's see if the following questions reveal something useful:
#
# 4. What fraction of words with the tag 'VBD' (verb, past tense) end with the letters 'ed'
# 5. What fraction of words with the tag 'VBG' (verb, present participle/gerund) end with the letters 'ing'
# +
# 4. how many words with the tag 'VBD' (verb, past tense) end with 'ed'
# first get the all the words tagged as VBD
past_tense_verbs = [t[0] for t in tagged_words if t[1]=='VBD']
# subset the past tense verbs with words ending with 'ed'. (Try w.endswith('ed'))
ed_verbs = [ved for ved in past_tense_verbs if ved[-2:]=='ed']
print(len(ed_verbs) / len(past_tense_verbs))
ed_verbs[:20]
# -
# 5. how many words with the tag 'VBG' end with 'ing'
participle_verbs = [t[0] for t in tagged_words if t[1]=='VBG']
ing_verbs = [ved for ved in participle_verbs if ved[-3:]=='ing']
print(len(ing_verbs) / len(participle_verbs))
ing_verbs[:20]
# ## 2. Exploratory Analysis Continued
#
# Let's now try observing some tag patterns using the fact the some tags are more likely to apper after certain other tags. For e.g. most nouns NN are usually followed by determiners DT ("The/DT constitution/NN"), adjectives JJ usually precede a noun NN (" A large/JJ building/NN"), etc.
#
# Try answering the following questions:
# 1. What fraction of adjectives JJ are followed by a noun NN?
# 2. What fraction of determiners DT are followed by a noun NN?
# 3. What fraction of modals MD are followed by a verb VB?
# +
# question: what fraction of adjectives JJ are followed by a noun NN
# create a list of all tags (without the words)
tags = [t[1] for t in tagged_words]
# create a list of JJ tags
jj_tags = [t for t in tags if t=='JJ']
# create a list of (JJ, NN) tags
jj_nn_tags = [t for index, t in enumerate(tags)
if tags[index-1]=='JJ' and tags[index]=='NN']
print(len(jj_tags))
print(len(jj_nn_tags))
print(len(jj_nn_tags) / len(jj_tags))
# +
# question: what fraction of determiners DT are followed by a noun NN
dt_tags = [t for t in tags if t=='DT']
dt_nn_tags = [t for index, t in enumerate(tags)
if tags[index-1]=='DT' and tags[index]=='NN']
print(len(dt_tags))
print(len(dt_nn_tags))
print(len(dt_nn_tags) / len(dt_tags))
# +
# question: what fraction of modals MD are followed by a verb VB?
md_tags = [t for t in tags if t=='MD']
md_vb_tags = [t for index, t in enumerate(tags)
if tags[index-1]=='MD' and tags[index]=='VB']
print(len(md_tags))
print(len(md_vb_tags))
print(len(md_vb_tags) / len(md_tags))
# -
| Treebank+EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import pandas as pd
from collections import Counter
PATH = '/Users/danil/Documents/github/sweet_RS/'
sys.path.append(str(PATH))
from src.utils import read_pickle, save_to_pickle
# -
preds = read_pickle('../data/processed/errors_check_preds.pickle')
correct_1 = read_pickle('../data/processed/errors_check_corrs.pickle')
# +
missed_users = []
norm_users = []
best_users = []
missed_films = []
correct_films = []
for ind, vals in enumerate(zip(preds.items(), correct_1.values())):
#print(vals)
user = vals[0][0]
pred, corr = set(vals[0][1]), set(vals[1])
error_rate = len(corr.intersection(pred)) / len(corr)
#print(error_rate)
#if error_rate > 0:
# print(pred, corr)
if error_rate == 0:
missed_users.append(user)
missed_films+=list(corr.difference(pred))
elif error_rate >= 0.2:
best_users.append(user)
correct_films+=list(corr.intersection(pred))
else:
norm_users.append(user)
correct_films+=list(corr.intersection(pred))
# +
missed_films_counter = Counter(missed_films)
missed_films_counter_sorted = missed_films_counter.most_common()
# -
movies_database = pd.read_csv('../data/raw/movies.csv')
missed_genres = []
for movie in missed_films_counter_sorted:
print('missed times:', movie[1])
film = movies_database[movies_database['id'] == movie[0]]
missed_genres+=film['genres'].values[0].split(',')
print(film['title'], film['genres'])
print('--------')
Counter(missed_genres)
# чаще всего пропускаю детские фильмы
correct_films_counter = Counter(correct_films)
correct_films_counter_sorted = correct_films_counter.most_common()
# +
correct_genres = []
for movie in correct_films_counter_sorted:
print('rocommended times:', movie[1])
film = movies_database[movies_database['id'] == movie[0]]
try:
correct_genres+=film['genres'].values[0].split(',')
except IndexError:
pass
print(film['title'], film['genres'])
print('--------')
# -
Counter(correct_genres)
# ну и угадываю я хреново, но вобщем нужно круче рекомендовать детские мультфильмы.
# поэтому дальше нужно добавить жанр в модель и метку сериал или нет
# # precise check
# +
N = 5
for ind, vals in enumerate(zip(preds.items(), correct_1.values())):
#print(vals)
user = vals[0][0]
pred, corr = set(vals[0][1]), set(vals[1])
error_rate = len(corr.intersection(pred)) / len(corr)
print(error_rate)
#if error_rate > 0:
# print(pred, corr)
if (error_rate == 0) and (len(corr) >= 3):
print('predict:')
for movie in pred:
film = movies_database[movies_database['id'] == movie]
print(film['title'].values[0], film['genres'].values[0])
print('correct:')
for movie in corr:
film = movies_database[movies_database['id'] == movie]
print(film['title'].values[0], film['genres'].values[0])
print('--------------')
# -
# check appearance of missed films
# +
user_hist_df = pd.read_csv('../data/raw/movies_dataset_10 months.csv')
user_hist_df['ts'] = pd.to_datetime(user_hist_df['ts'])
user_hist_df['month'] = user_hist_df['ts'].dt.month
test_1 = user_hist_df[user_hist_df['month'] == 6]
test_2 = user_hist_df[user_hist_df['month'] == 7]
train = user_hist_df[~user_hist_df['month'].isin([6, 7])]
# -
movie_views = train['movie_id'].value_counts()
for movie in missed_films_counter_sorted:
film = movies_database[movies_database['id'] == movie[0]]
print(movie[0], 'missed', movie[1], 'viewed', movie_views[movie[0]],
film['title'].values[0], film['year'].values[0])
i = 0
for ind, vals in enumerate(zip(preds.items(), correct_1.values())):
#print(vals)
user = vals[0][0]
pred, corr = set(vals[0][1]), set(vals[1])
error_rate = len(corr.intersection(pred)) / len(corr)
if 19860 in corr:
i+=1
i
# может быть такое что фильм очень много раз посмотрели только на тестовом месяце, а до этого намного меньше, походу рекомендалка их начала подбрасывать
| notebooks/error_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Distances and their cell IDs
# ## This notebook shows Mesh.distance(other_mesh), which returns a tuple with an array of distances and the corresponding point or cell ids nearest the closest points.
import shapeworks as sw
import numpy as np
import pyvista as pv
import math
pv.set_jupyter_backend('static')
DATA = "../Data/"
# ## Example of Mesh.distance results
# Receives array of distances and their associated cell/point ids in target
filename1 = DATA + "ellipsoid_05.vtk"
filename2 = DATA + "ellipsoid_07.vtk"
mesh1 = sw.Mesh(filename1)
mesh2 = sw.Mesh(filename2)
# ### point to cell distance
distances_and_cell_ids_1to2 = mesh1.distance(mesh2)
distances_and_cell_ids_2to1 = mesh2.distance(mesh1)
distances_and_cell_ids_1to2
distances_and_cell_ids_2to1
distances_and_cell_ids_1to2[0].size
distances_and_cell_ids_1to2[1].size
# #### validate data ownership, ensuring no copying from C++ to Python
distances_and_cell_ids_1to2[0].flags
distances_and_cell_ids_2to1[1].flags
# ### point to point distance
distances_and_point_ids_1to2 = mesh1.distance(mesh2, sw.Mesh.DistanceMethod.PointToPoint)
distances_and_point_ids_2to1 = mesh2.distance(mesh1, sw.Mesh.DistanceMethod.PointToPoint)
distances_and_point_ids_1to2
distances_and_point_ids_2to1
distances_and_point_ids_1to2[0].size
distances_and_point_ids_1to2[1].size
# #### validate distance of first point in mesh1 to specified closest point in mesh2
# distance 1to2[0]: 43.43859498
p0 = mesh1.getPoint(0)
p0
p1 = mesh2.getPoint(458)
p1
v = p1 - p0
v
math.sqrt(v.dot(v))
# ### plot distances as scalar field on meshes
pv_mesh1 = sw.sw2vtkMesh(mesh1)
pv_mesh2 = sw.sw2vtkMesh(mesh2)
# +
# used to maintain bounds even when meshes' positions change
a = pv.UniformGrid()
a.dimensions = np.array([9,8,8])
a.origin = (-15,-5,-15)
a.spacing = (10, 10, 10)
outline = a.outline()
p = pv.Plotter(shape=(1,1), border=False)
p.subplot(0,0)
p.add_text("distances")
p.add_mesh(outline)
p.add_mesh(pv_mesh1, scalars=distances_and_cell_ids_1to2[0], show_scalar_bar=True, opacity=1.0)
p.add_mesh(pv_mesh2, scalars=distances_and_cell_ids_2to1[0], show_scalar_bar=True, opacity=1.0)
p.camera_position = 'xy'
p.show()
# -
| Examples/Python/notebooks/Mesh distances and receiving tuple of arrays from C++.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Making all necessary imports
# +
import json
import re
from string import punctuation
from stopwords import french_stopwords, english_stopwords
import pandas as pd
from textblob import TextBlob
import gensim
from gensim import corpora
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download('punkt')
nltk.download('wordnet')
import warnings
warnings.filterwarnings('ignore')
# -
# # Loading data
with open("full2.json", "r") as read_file:
data = json.load(read_file)
# # Functions for text processing
# +
def character_replacement(input_string):
character_mapping = {"\\u00e9": "é",
"\\u2019": "'",
"\\": "",
"\\u00fb": "û",
"u00e8": "è",
"u00e0": "à",
"u00f4": "ô",
"u00ea": "ê",
"u00ee": "i",
"u00fb": "û",
"u2018": "'",
"u00e2": "a",
"u00ab": "'",
"u00bb": "'",
"u00e7": "ç",
"u00e2": "â",
"u00f9": "ù",
"u00a3": "£",
}
for character in character_mapping:
input_string = input_string.replace(character, character_mapping[character])
input_string = input_string.lower()
characters_to_remove = ["@", "/", "#", ".", ",", "!", "?", "(", ")", "-", "_", "’", "'", "\"", ":", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
transformation_dict = {initial: " " for initial in characters_to_remove}
no_punctuation_reviews = input_string.translate(str.maketrans(transformation_dict))
return no_punctuation_reviews
def tokenize(input_string):
return word_tokenize(input_string)
def remove_stop_words_french(input_tokens):
return [token for token in input_tokens if token not in french_stopwords]
def remove_stop_words_english(input_tokens):
return [token for token in input_tokens if token not in english_stopwords]
# -
# # Inflected languages
# +
# Lemmatization
lemmatizer = WordNetLemmatizer()
def lemmatize(tokens):
tokens = [lemmatizer.lemmatize(lemmatizer.lemmatize(lemmatizer.lemmatize(token,pos='a'),pos='v'),pos='n') for token in tokens]
return tokens
# Stemming
frenchStemmer=SnowballStemmer("french")
def stem(tokens):
tokens = [frenchStemmer.stem(token) for token in tokens]
return tokens
# -
# # Processing data
# +
# Creating a dataFrame with all reviews
reviews = pd.DataFrame.from_dict(data)
# Making basic cleaning
reviews.review = reviews.review.apply(lambda x: character_replacement(x))
reviews["tokens"] = reviews.review.apply(lambda x: tokenize(x))
reviews.tokens = reviews.tokens.apply(lambda token_list: [meaningful_word for meaningful_word in token_list if len(meaningful_word) > 3])
# Splitting reviews by language
french_reviews = reviews[reviews.review_language == "fr"]
english_reviews = reviews[reviews.review_language == "en"]
# removing stopwords
french_reviews.tokens = french_reviews.tokens.apply(lambda x: remove_stop_words_french(x))
english_reviews.tokens = english_reviews.tokens.apply(lambda x: remove_stop_words_english(x))
# Tokenizing
english_reviews['inflected'] = english_reviews['tokens'].apply(lemmatize)
french_reviews['inflected'] = french_reviews['tokens'].apply(stem)
# Preparing LDA inputs
dictionary = corpora.Dictionary(english_reviews['inflected'])
doc_term_matrix = [dictionary.doc2bow(rev) for rev in english_reviews['inflected']]
# -
# # LDA
# +
# Creating the object for LDA model using gensim library
LDA = gensim.models.ldamodel.LdaModel
# Build LDA model
num_topics = 5
lda_model = LDA(corpus=doc_term_matrix, id2word=dictionary,
num_topics=num_topics,
alpha=[0.0001] * num_topics,
eta=[0.0001] * len(dictionary),
chunksize=2000,
passes=6,
random_state=100,
)
print(lda_model.print_topics(num_words=8))
| Day6/X_HEC_Session_6_Notebook_1_LDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
survey_df= pd.read_csv("Data/survey.csv")
survey_df
survey_df= survey_df.drop(['Timestamp','work_interfere', 'no_employees', 'mental_health_consequence', 'phys_health_consequence','leave', 'coworkers', 'supervisor', 'mental_health_interview','phys_health_interview','mental_vs_physical','obs_consequence','comments','benefits','care_options','wellness_program','seek_help','anonymity'], axis =1)
nan_df = survey_df[survey_df["state"].isna()]
nan_df
# + tags=[]
survey_df["Gender"] = survey_df["Gender"].replace(['Male-ish',"Trans-female", 'something kinda male?',
'non-binary',
'Nah', 'All', 'Enby', 'fluid', 'Genderqueer',
'Androgyne', 'Agender',
'Guy (-ish) ^_^', 'male leaning androgynous',
'Trans woman','Neuter', 'Female (trans)', 'queer', 'A little about you',
'p','non-binary', 'Nah',
'ostensibly male, unsure what that really means'], 'Others')
# -
survey_df["Gender"] = survey_df["Gender"].replace(['M','MALE', "male","maile", "Mal",
'Make',"Man",'msle','Mail','Malr','m','Male ',
'Cis Male','Male (CIS)', 'cis male','Cis Man'
], 'Male')
survey_df["Gender"] = survey_df["Gender"].replace(['f','FEMALE', "female", "F", "Woman",
'queer/she/they','Femake', 'cis-female/femme',
'woman','femail','Female ',"Cis Female", "Female (cis)"], 'Female')
survey_df["self_employed"] = survey_df["self_employed"].fillna("No")
survey_df["Country"].unique()
survey_df["Country"] = survey_df["Country"].replace(['Canada', 'United Kingdom', 'Bulgaria', 'France',
'Portugal', 'Netherlands', 'Switzerland', 'Poland', 'Australia',
'Germany', 'Russia', 'Mexico', 'Brazil', 'Slovenia', 'Costa Rica',
'Austria', 'Ireland', 'India', 'South Africa', 'Italy', 'Sweden',
'Colombia', 'Latvia', 'Romania', 'Belgium', 'New Zealand',
'Zimbabwe', 'Spain', 'Finland', 'Uruguay', 'Israel',
'Bosnia and Herzegovina', 'Hungary', 'Singapore', 'Japan',
'Nigeria', 'Croatia', 'Norway', 'Thailand', 'Denmark',
'Bahamas, The', 'Greece', 'Moldova', 'Georgia', 'China',
'Czech Republic', 'Philippines'], 'Others')
survey_df
survey_df["state"] = ["Non US" if y != "United States" else x for x, y in zip(survey_df["state"], survey_df["Country"])]
survey_df
survey_df.dropna(subset=["state"], inplace=True)
survey_df
survey_df.isna().sum()
survey_df = survey_df[(survey_df["Age"]>0) & (survey_df["Age"]<100)]
survey_df
survey_df = survey_df.reset_index()
# + tags=[]
survey_df = survey_df.rename(columns = {'index' : 'id'})
# -
survey_df.to_csv("Data/survey_df.csv", index = False)
| data_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# ! wget http://data.statmt.org/news-crawl/en/news.2017.en.shuffled.deduped.gz
# ! gzip -d news.2017.en.shuffled.deduped.gz
# ! mv news.2017.en.shuffled.deduped data/
# ! mkdir data/news2017
# ! split -l 100000 --additional-suffix=.txt /root/data/news.2017.en.shuffled.deduped /root/data/news2017/text
# ! python3 -m spacy download en_core_web_sm
# -
import hyperhyper as hy
import time
import numpy as np
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
c = hy.Corpus.from_text_files('/root/data/news2017', preproc_func=hy.preprocessing.tokenize_texts, view_fraction=0.05)
bunch = hy.Bunch('/root/data/bunches/news2017', c)
bunch.svd(pair_args={'subsample_factor': 7e-5})
| notebooks/07_all_comments/news.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # Install a conda package in the current Jupyter kernel
# import sys
# # !conda install --yes --prefix {sys.prefix} tensorflow
# -
# # All of the packages I need to do this
# +
import warnings
warnings.filterwarnings("ignore")
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# -
# # Importing the minst fashion data and preprocessing it to be accepted into ANN
# +
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
input_dim = 784 # 28*28
output_dim = nb_classes = 10
nb_epoch = 20
X_train = X_train.reshape(60000, input_dim)
X_test = X_test.reshape(10000, input_dim)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = to_categorical(y_train, nb_classes)
Y_test = to_categorical(y_test, nb_classes)
# -
# # Now we are going to do something kind of like king of the court, but king of the neural network. The one I deem the best stays and the one that isn't goes. I will go down the list of hyperparameters to tune:
# - Number of layers
# - Activation functions
# - Number of Neurons
# - Batch sizes during training
#
#
# # We will see which model reigns supreme!
X_train.shape
epochs = 10
batch_size = 10
# +
model = Sequential()
# our first dense layer
model.add(Dense(128, input_shape=(784,), activation="relu"))
# our second dense layer
model.add(Dense(64, activation="relu"))
# last layer is the output layer.
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# setting verbose=1 prints out some results after each epoch
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)
# -
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# # I've seen worse
# +
model = Sequential()
# our first dense layer
model.add(Dense(128, input_shape=(784,), activation="relu"))
# our second dense layer
model.add(Dense(64, activation="relu"))
# our third layer
model.add(Dense(64, activation="relu"))
# our fourth layer
model.add(Dense(32, activation="relu"))
# our fifth layer
model.add(Dense(32, activation="relu"))
# last layer is the output layer.
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# setting verbose=1 prints out some results after each epoch
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)
# -
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# # The accuracy was a little better with this model.Model 2 moves on.
#
#
# # Next we compare activation functions
# +
# #Repeat model stats
# Test score: 0.34816762804985046
# Test accuracy: 0.8732
# loss: 0.3482
# +
#New model
model = Sequential()
# our first dense layer
model.add(Dense(128, input_shape=(784,), activation="selu"))
# our second dense layer
model.add(Dense(64, activation="selu"))
# our third layer
model.add(Dense(64, activation="selu"))
# our fourth layer
model.add(Dense(32, activation="selu"))
# our fifth layer
model.add(Dense(32, activation="selu"))
# last layer is the output layer.
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# setting verbose=1 prints out some results after each epoch
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)
# -
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# # 2nd model still coming out on top! Accuracy difference!
# +
#Number of neurons time
model = Sequential()
# our first dense layer
model.add(Dense(128, input_shape=(784,), activation="relu"))
# our second dense layer
model.add(Dense(128, activation="relu"))
# our third layer
model.add(Dense(64, activation="relu"))
# our fourth layer
model.add(Dense(64, activation="relu"))
# our fifth layer
model.add(Dense(32, activation="relu"))
# last layer is the output layer.
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# setting verbose=1 prints out some results after each epoch
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)
# -
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# # Well the accuracy is better but test score is worse. Keeping original model
batch_size = 15
# +
# #Repeat model stats
# Test score: 0.34816762804985046
# Test accuracy: 0.8732
# loss: 0.3482
# +
model = Sequential()
# our first dense layer
model.add(Dense(128, input_shape=(784,), activation="relu"))
# our second dense layer
model.add(Dense(64, activation="relu"))
# our third layer
model.add(Dense(64, activation="relu"))
# our fourth layer
model.add(Dense(32, activation="relu"))
# our fifth layer
model.add(Dense(32, activation="relu"))
# last layer is the output layer.
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# setting verbose=1 prints out some results after each epoch
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)
# -
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# # That model again! Let's look at them all together!
# - Model 1: Test score: 0.3742851203918457, Test accuracy: 0.8603
# - Model 2: Test score: 0.35084240040779113, Test accuracy: 0.8734
# - Model 3: Test score: 0.35251856787204744, Test accuracy: 0.8755
# - Model 4: Test score: 0.3646376796722412, Test accuracy: 0.8725
#
# # Model 3 might have performed the best out of all the models. I think tuning of the epochs as wel as neurons could lead to increased accuracy and overall better test score.
| DL_Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark import SparkContext
sc = SparkContext("local", "Simple App")
print('spark context: ', sc)
text_file = sc.textFile('file:/E:/code/git-2018/ETL-Workflow/ETL-Examples/src/main/python/resources/word-count-input')
print('--> ', text_file)
counts = text_file.flatMap(lambda line:line.split(' ')).map(lambda word:(word,1)).reduceByKey(lambda a,b: a+b)
#counts.saveAsTextFile('output')
result = counts.collect()
print(result)
| ETL-Examples/src/main/python/pyspark-sql-examples/word-count.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# metadata:
# interpreter:
# hash: df955ce39d0f31d56d4bb2fe0a613e5326ba60723fd33d8303a3aede8f65715c
# name: Python 3.7.6 64-bit
# ---
# # ___Imports___
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy import stats
# import plotly.express as px
# from geopy.geocoders import Nominatim
# import geopy as gp
# from datetime import datetime
# from glob import glob
# ## ___NASA data___
# +
# load data for nasa
nasa_data = pd.read_csv('../data/meteorite-landings.csv')
print(nasa_data.columns)
nasa_data.head()
np.shape(nasa_data)
# -
# ## ___IMO data___
# +
# load data for imo
imo_data = pd.read_csv('../data/IMO/data/IMO9920.csv')
print(imo_data.columns)
imo_data.head()
np.shape(imo_data)
# -
# ## ___Combining data___
# Combine the 2 datasets into 1
all_data = pd.concat([nasa_data, imo_data], sort=False, keys=['NASA', 'IMO'])
# Show columns, first 5 rows, and shape to verify combination
print(all_data.columns)
print(all_data.head)
np.shape(all_data)
# # ___NOTES___
#
# - Before Sampling
# - Nasa data ranges from index 0:45715
# - IMO data ranges from index 45716:end
# ## ___Cleaning___
# Confirmation of dropped columns
print(all_data.head)
print(np.shape(all_data))
# +
# Drop unnecessary columns
all_data = all_data.drop(['name', 'nametype', 'GeoLocation', 'Observer ID', 'Submitter ID', 'Actual Observer Name', 'Submitted by'], axis=1)
# +
# Check for Null Values
all_data.isnull().sum()
# -
# merge IMO ids to NASA NaN ids
all_data['id'] = all_data['id'].replace(np.NaN, all_data['Session ID'][45716:])
# merge IMO latitudess to NASA NaN latitudes
all_data['reclat'] = all_data['reclat'].replace(np.NaN, all_data['Latitude'][45716:])
# merge IMO longitudes to NASA NaN longitudes
all_data['reclong'] = all_data['reclong'].replace(np.NaN, all_data['Longitude'][45716:])
# +
# Convert the IMO Start Date to NASA year
for i in range(45716, np.shape(all_data)[0]):
all_data['Start Date'][i] = all_data['Start Date'][i][:4]
all_data['Start Date'].tail
# -
# merge IMO start dates to NASA NaN year
all_data['year'] = all_data['year'].replace(np.NaN, all_data['Start Date'][45716:])
print(all_data.isnull().sum())
test = all_data['Start Date'].iloc[45716]
test[:4]
all_data.columns
# Rename (and Re-drop) columns
all_data = all_data.drop(['Session ID', 'Start Date', 'City', 'Country', 'Latitude', 'Longitude'], axis=1)
all_data.rename(columns={'recclass':'class', 'reclat':'lat', 'reclong':'long'}, inplace=True)
print(all_data.columns)
print(np.shape(all_data))
print(all_data.isnull().sum())
# # ___Fix rest of NaN values___
# +
# Replace Numerical NaN Values with mean
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0)
all_data[['mass']] = imputer.fit_transform(all_data[['mass']])
all_data[['year']] = imputer.fit_transform(all_data[['year']])
all_data[['lat']] = imputer.fit_transform(all_data[['lat']])
all_data[['long']] = imputer.fit_transform(all_data[['long']])
all_data[['Elevation']] = imputer.fit_transform(all_data[['Elevation']])
print(all_data.isnull().sum())
# -
# Replace Categorical NaN with 'N/A'
all_data['class'] = all_data['class'].replace(np.NaN, 'N/A')
all_data['fall'] = all_data['fall'].replace(np.NaN, 'N/A')
print(all_data.isnull().sum())
# heat map of correlation of features
correlation_matrix = all_data.corr()
fig = plt.figure(figsize=(9,6))
sns.heatmap(correlation_matrix,vmax=0.8,square = True, annot= True)
plt.show()
all_data.to_csv('../data/ALL_DATA.csv', index = False)
# ## Sampling
# ### (Test sample size is set to 10%)
# ### Only comment out once all testing / experiment is done, to use real data
# ---
# +
# *****************************
# HIGHLY IMPORTANT
# *****************************
# Sample data
print("Original Data Stats: \n")
print(all_data.describe())
print('\n--------\n')
print("New Sample Data Stats: \n")
# all_data['year'].fillna(0).astype(int)
# all_data['mass'].fillna(0).astype(int)
all_data = all_data.sample(frac=0.1) # 10% sample set
print(all_data.describe())
| machine-learning/pre-processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/13_tf_obj_2/Data_Conversion%20-%20Monk%20Type%20to%20VOC%20Type.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Monk Format
#
# ## Dataset Directory Structure
#
# parent_dir (root_dir)
# |
# |-------ship (coco_dir)
# | |
# | |---Images (img_dir)
# | |----|
# | |-------------------img1.jpg
# | |-------------------img2.jpg
# | |-------------------.........(and so on)
# |
# |
# | |---labels.csv
#
#
# - Annotation format
#
# |image |label |
# |img1.jpg | x1 y1 x2 y2 label x1 y2 x2 y2 label .... (and so on) |
# # VOC Format
#
# ## Dataset Directory Structure
#
# parent_dir (root_dir)
# |
# |------kangaroo
# | |
# | |---Images (img_dir)
# | |----|
# | |-------------------img1.jpg
# | |-------------------img2.jpg
# | |-------------------.........(and so on)
# |
# |
# | |---voc_dir (anno_dir)
# | |----|
# | |--------------------img1.xml
# | |--------------------img2.xml
# | |-------------------.........(and so on)
#
#
# ! cp -r Monk_Object_Detection/example_notebooks/sample_dataset/ship .
# ! pip install numpy pascal-voc-writer pandas opencv-python tqdm
img_dir = "ship/Images";
anno_file = "ship/train_labels.csv";
# +
output_dir = "ship/voc";
import os
if(not os.path.isdir(output_dir)):
os.mkdir(output_dir)
# -
import pandas as pd
import cv2
import numpy as np
df = pd.read_csv(anno_file);
columns = df.columns
from pascal_voc_writer import Writer
from tqdm import tqdm
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i].split(" ");
img = cv2.imread("ship/Images/" + img_name)
h, w, c = img.shape;
writer = Writer(img_name, w, h)
for j in range(len(labels)//5):
x1 = labels[j*5+0]
y1 = labels[j*5+1]
x2 = labels[j*5+2]
y2 = labels[j*5+3]
label = labels[j*5+4]
writer.addObject(label, x1, y1, x2, y2);
writer.save(output_dir + "/" + img_name.split(".")[0] + ".xml")
| example_notebooks/13_tf_obj_2/Data_Conversion - Monk Type to VOC Type.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nwanna-Joseph/EfficientMatrixMultiplication/blob/main/My_implementation_of_Matrix_multiplication.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="VVTcKoLjJsFR" outputId="9fbef5a8-9e72-446c-a930-c644aba2dfb9"
def MatMul(A, B):
##### WRITE YOUR CODE HERE #####
num_A_rows = len(A)
num_B_rows = len(B)
if(num_A_rows == 0 or num_B_rows==0):
print("Invalid rows. Please validate")
return
num_A_col = len(A[0])
num_B_col = len(B[0])
if(num_A_col != num_B_rows):
assert("Invalid col * rows. Please validate")
total_moves = (num_A_col * num_A_rows) * num_B_col
# total_moves = num_A_rows * (num_B_col * num_B_rows)
# ((num_A_col * num_A_rows) * num_B_col) == (num_A_rows * (num_B_col * num_B_rows)) => true
# print(total_moves)
num_col = total_moves // num_A_rows
new_row = []
matrix = []
row = []
holder = 0
for a in range(total_moves):
# if a%num_A_col == 0 , new cell
# if a%num_A_col == 0 and a%num_B_col == 0 , new row
a_sub = a%num_A_col
b_sub = (a%num_col)//num_B_rows
if( a_sub == 0):
if(b_sub == 0):
row=[]
# print("new row")
# print("new cell")
holder = 0
holder += A[(a//num_col)][a_sub] * B[(a%num_B_rows)][b_sub]
# print(((a//num_col)), a_sub ,'|',(a%num_B_rows), b_sub )
if((a%num_B_rows) == num_B_rows-1):
# print(holder)
row.append(holder)
# print("add holder and cell")
if( b_sub == num_B_col-1):
matrix.append(row)
# print("add row")
return matrix
#### END CODE ####
A = [ [1,2,3],[4,5,6] ]
B = [ [4,5], [1,2], [0, 1] ]
print(MatMul(A, B))
assert MatMul(A, B)==[[6, 12], [21, 36]], "Your implementation is not correct"
print("Congrats, It is working")
| My_implementation_of_Matrix_multiplication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Synapse PySpark
# name: synapse_pyspark
# ---
# # Charting in Synapse Notebook
#
# Synapse has common used data visualization packages pre installed, such as **matplotlib**, **bokeh**, **seaborn**, **altair**, **plotly**. This notebook provides examples to do data visualization using charts in Synapse notebook.
#
# ## Matplotlib
#
# +
# Line charts
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y1 = [1, 3, 5, 3, 1, 3, 5, 3, 1]
y2 = [2, 4, 6, 4, 2, 4, 6, 4, 2]
plt.plot(x, y1, label="line L")
plt.plot(x, y2, label="line H")
plt.plot()
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.title("Line Graph Example")
plt.legend()
plt.show()
# +
# Bar chart
import matplotlib.pyplot as plt
# Look at index 4 and 6, which demonstrate overlapping cases.
x1 = [1, 3, 4, 5, 6, 7, 9]
y1 = [4, 7, 2, 4, 7, 8, 3]
x2 = [2, 4, 6, 8, 10]
y2 = [5, 6, 2, 6, 2]
# Colors: https://matplotlib.org/api/colors_api.html
plt.bar(x1, y1, label="Blue Bar", color='b')
plt.bar(x2, y2, label="Green Bar", color='g')
plt.plot()
plt.xlabel("bar number")
plt.ylabel("bar height")
plt.title("Bar Chart Example")
plt.legend()
plt.show()
# +
# Histogram
import matplotlib.pyplot as plt
import numpy as np
# Use numpy to generate a bunch of random data in a bell curve around 5.
n = 5 + np.random.randn(1000)
m = [m for m in range(len(n))]
plt.bar(m, n)
plt.title("Raw Data")
plt.show()
plt.hist(n, bins=20)
plt.title("Histogram")
plt.show()
plt.hist(n, cumulative=True, bins=20)
plt.title("Cumulative Histogram")
plt.show()
# +
# Scatter chart
import matplotlib.pyplot as plt
x1 = [2, 3, 4]
y1 = [5, 5, 5]
x2 = [1, 2, 3, 4, 5]
y2 = [2, 3, 2, 3, 4]
y3 = [6, 8, 7, 8, 7]
# Markers: https://matplotlib.org/api/markers_api.html
plt.scatter(x1, y1)
plt.scatter(x2, y2, marker='v', color='r')
plt.scatter(x2, y3, marker='^', color='m')
plt.title('Scatter Plot Example')
plt.show()
# +
# Stack plots
import matplotlib.pyplot as plt
idxes = [ 1, 2, 3, 4, 5, 6, 7, 8, 9]
arr1 = [23, 40, 28, 43, 8, 44, 43, 18, 17]
arr2 = [17, 30, 22, 14, 17, 17, 29, 22, 30]
arr3 = [15, 31, 18, 22, 18, 19, 13, 32, 39]
# Adding legend for stack plots is tricky.
plt.plot([], [], color='r', label = 'D 1')
plt.plot([], [], color='g', label = 'D 2')
plt.plot([], [], color='b', label = 'D 3')
plt.stackplot(idxes, arr1, arr2, arr3, colors= ['r', 'g', 'b'])
plt.title('Stack Plot Example')
plt.legend()
plt.show()
# +
# Pie charts
import matplotlib.pyplot as plt
labels = 'S1', 'S2', 'S3'
sections = [56, 66, 24]
colors = ['c', 'g', 'y']
plt.pie(sections, labels=labels, colors=colors,
startangle=90,
explode = (0, 0.1, 0),
autopct = '%1.2f%%')
plt.axis('equal') # Try commenting this out.
plt.title('Pie Chart Example')
plt.show()
# +
# fill_between and alpha
import matplotlib.pyplot as plt
import numpy as np
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Fills and Alpha Example")
plt.show()
# +
# Subplotting using Subplot2grid
import matplotlib.pyplot as plt
import numpy as np
def random_plots():
xs = []
ys = []
for i in range(20):
x = i
y = np.random.randint(10)
xs.append(x)
ys.append(y)
return xs, ys
fig = plt.figure()
ax1 = plt.subplot2grid((5, 2), (0, 0), rowspan=1, colspan=2)
ax2 = plt.subplot2grid((5, 2), (1, 0), rowspan=3, colspan=2)
ax3 = plt.subplot2grid((5, 2), (4, 0), rowspan=1, colspan=1)
ax4 = plt.subplot2grid((5, 2), (4, 1), rowspan=1, colspan=1)
x, y = random_plots()
ax1.plot(x, y)
x, y = random_plots()
ax2.plot(x, y)
x, y = random_plots()
ax3.plot(x, y)
x, y = random_plots()
ax4.plot(x, y)
plt.tight_layout()
plt.show()
# +
# 3D Scatter Plots
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
x1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y1 = np.random.randint(10, size=10)
z1 = np.random.randint(10, size=10)
x2 = [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10]
y2 = np.random.randint(-10, 0, size=10)
z2 = np.random.randint(10, size=10)
ax.scatter(x1, y1, z1, c='b', marker='o', label='blue')
ax.scatter(x2, y2, z2, c='g', marker='D', label='green')
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
plt.title("3D Scatter Plot Example")
plt.legend()
plt.tight_layout()
plt.show()
# +
# 3D Bar Plots
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = np.random.randint(10, size=10)
z = np.zeros(10)
dx = np.ones(10)
dy = np.ones(10)
dz = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ax.bar3d(x, y, z, dx, dy, dz, color='g')
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
plt.title("3D Bar Chart Example")
plt.tight_layout()
plt.show()
# +
# Wireframe Plots
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
x, y, z = axes3d.get_test_data()
ax.plot_wireframe(x, y, z, rstride = 2, cstride = 2)
plt.title("Wireframe Plot Example")
plt.tight_layout()
plt.show()
# -
# ## Seaborn
# Seaborn is a library layered on top of Matplotlib that you can use.
# +
# Scatterplot with a nice regression line fit to it, all with just one call to Seaborn's regplot.
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Generate some random data
num_points = 20
# x will be 5, 6, 7... but also twiddled randomly
x = 5 + np.arange(num_points) + np.random.randn(num_points)
# y will be 10, 11, 12... but twiddled even more randomly
y = 10 + np.arange(num_points) + 5 * np.random.randn(num_points)
sns.regplot(x=x, y=y)
plt.show()
# +
# Seanborn heatmap
import matplotlib.pyplot as plt
import numpy as np
# Make a 10 x 10 heatmap of some random data
side_length = 10
# Start with a 10 x 10 matrix with values randomized around 5
data = 5 + np.random.randn(side_length, side_length)
# The next two lines make the values larger as we get closer to (9, 9)
data += np.arange(side_length)
data += np.reshape(np.arange(side_length), (side_length, 1))
# Generate the heatmap
fig = plt.figure()
ax = fig.add_subplot(111)
sns.heatmap(data, ax=ax)
plt.show()
# -
# ## Bokeh
# You can render HTML or interactive libraries, like **bokeh**, using the **displayHTML()**.
#
# +
import numpy as np
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
from bokeh.embed import file_html
from bokeh.resources import CDN
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = ["#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x).astype(int), np.floor(30+2*y).astype(int))]
p = figure()
p.circle(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
show(p)
# create an html document that embeds the Bokeh plot
html = file_html(p, CDN, "my plot1")
# display this html
displayHTML(html)
# +
# Plotting glyphs over a map using bokeh.
from bokeh.plotting import figure, output_file
from bokeh.tile_providers import get_provider, Vendors
from bokeh.embed import file_html
from bokeh.resources import CDN
from bokeh.models import ColumnDataSource
tile_provider = get_provider(Vendors.CARTODBPOSITRON)
# range bounds supplied in web mercator coordinates
p = figure(x_range=(-9000000,-8000000), y_range=(4000000,5000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(tile_provider)
# plot datapoints on the map
source = ColumnDataSource(
data=dict(x=[ -8800000, -8500000 , -8800000],
y=[4200000, 4500000, 4900000])
)
p.circle(x="x", y="y", size=15, fill_color="blue", fill_alpha=0.8, source=source)
# create an html document that embeds the Bokeh plot
html = file_html(p, CDN, "my plot1")
# display this html
displayHTML(html)
# + [markdown] nteract={"transient": {"deleting": false}}
# ## Plotly
# You can render HTML or interactive libraries like **Plotly**, using the **displayHTML()**.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
from urllib.request import urlopen
import json
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
import pandas as pd
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/fips-unemp-16.csv",
dtype={"fips": str})
import plotly
import plotly.express as px
fig = px.choropleth(df, geojson=counties, locations='fips', color='unemp',
color_continuous_scale="Viridis",
range_color=(0, 12),
scope="usa",
labels={'unemp':'unemployment rate'}
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
# create an html document that embeds the Plotly plot
h = plotly.offline.plot(fig, output_type='div')
# display this html
displayHTML(h)
| Sample/OpenDatasets/Notebooks/06-ChartingSynapseNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Protocol 1.0
# VLS and SAG NWs with standard lock-in technique
# <br>
# The code can be used for 1, 2 or 3 devices silmutaneously
# <br>
# This version supports both 4-probe and 2-probe measurements
# <br>
# <br>
# This code tests the stability of the devices under N repetitions
# <br>
# Source-drain bias voltage, temperature and magnetic field can be added as extra parameters
#
#
# ## Imports
# +
# Copy this to all notebooks!
from qcodes.logger import start_all_logging
start_all_logging()
# Import qcodes and other necessary packages
import qcodes as qc
import numpy as np
import time
from time import sleep
import matplotlib
import matplotlib.pyplot as plt
import os
import os.path
# Import device drivers
from qcodes.instrument_drivers.QuantumDesign.DynaCoolPPMS import DynaCool
from qcodes.instrument_drivers.Keysight.Infiniium import Infiniium
# Import qcodes packages
from qcodes import Station
from qcodes import config
from qcodes.dataset.measurements import Measurement
from qcodes.dataset.plotting import plot_by_id
from qcodes.dataset.database import initialise_database,get_DB_location
from qcodes.dataset.experiment_container import (Experiment,
load_last_experiment,
new_experiment,
load_experiment_by_name)
from qcodes.instrument.base import Instrument
from qcodes.utils.dataset.doNd import do1d,do2d
# %matplotlib notebook
go = 7.7480917310e-5
# -
# ## Station
# (Need to load 3 Keithleys and 6 Lock-In Amps)
# +
# Create station, instantiate instruments
Instrument.close_all()
path_to_station_file = 'C:/Users/lyn-ppmsmsr-01usr/Desktop/station.yaml'
# 'file//station.yaml'
# Here we load the station file.
station = Station()
station.load_config_file(path_to_station_file)
# Connect to ppms
#Instrument.find_instrument('ppms_cryostat')
ppms = DynaCool.DynaCool(name = "ppms_cryostat", address="TCPIP0::10.10.117.37::5000::SOCKET")
station.add_component(ppms)
# SRS
lockin_1 = station.load_instrument('lockin_1')
lockin_2 = station.load_instrument('lockin_2')
lockin_3 = station.load_instrument('lockin_3')
lockin_4 = station.load_instrument('lockin_4')
lockin_5 = station.load_instrument('lockin_5')
lockin_6 = station.load_instrument('lockin_6')
# DMMs
dmm_a = station.load_instrument('Keithley_A')
dmm_b = station.load_instrument('Keithley_B')
dmm_c = station.load_instrument('Keithley_C')
dmm_a.smua.volt(0) # Set voltages to 0
dmm_a.smub.volt(0) # Set voltages to 0
dmm_b.smua.volt(0) # Set voltages to 0
dmm_b.smub.volt(0) # Set voltages to 0
dmm_c.smua.volt(0) # Set voltages to 0
dmm_c.smub.volt(0) # Set voltages to 0
for inst in station.components.values():
inst.print_readable_snapshot()
# -
# ## DB File, Location
### Initialize database, make new measurement
mainpath = 'C:/Users/MicrosoftQ/Desktop/Results/Operator_name' #remember to change << /Operator_name >> to save the db file in your own user folder
config.current_config.core.db_location = os.path.join(mainpath,'GROWTHXXXX_BATCHXX_YYYYMMDD.db')
config.current_config
newpath = os.path.join(mainpath,'GROWTHXXXX_BATCHXX_YYYYMMDD')
if not os.path.exists(newpath):
os.makedirs(newpath)
figurepath = newpath
initialise_database()
# ## Functions
# +
def wait_for_field():
time.sleep(1)
Magnet_state = ppms.magnet_state()
while Magnet_state is not 'holding':
#print('waiting for field')
time.sleep(0.1)
Magnet_state = ppms.magnet_state()
#print('field ready')
return
def wait_for_field_ramp():
Magnet_state = ppms.magnet_state()
while Magnet_state is not 'ramping':
time.sleep(1)
Magnet_state = ppms.magnet_state()
return
def field_ready():
return ppms.magnet_state() == 'holding'
def wait_for_temp():
Temp_state = ppms.temperature_state()
while Temp_state is not 'stable':
time.sleep(1)
Temp_state = ppms.temperature_state()
return
def wait_for_near_temp():
Temp_state = ppms.temperature_state()
while Temp_state is not 'near':
time.sleep(2)
Temp_state = ppms.temperature_state()
time.sleep(10)
return
# -
# ## Lock-in add-on functions
# Gains and conductance
# +
# AMPLIFICATIONS AND VOLTAGE DIVISIONS
ACdiv = 1e-4
DCdiv = 1e-2
GIamp1 = 1e7
GVamp2 = 100
GIamp3 = 1e6
GVamp4 = 100
GIamp5 = 1e6
GVamp6 = 100
# +
# DEFINICTIONS OF FUNCTIONS FOR DIFFERENTIAL CONDUCTANCE AND RERISTANCE FOR 2 AND 4 PROBE MEASUREMENTS
# Lock-ins 1(current), 2(voltage)
def desoverh_fpm12():
volt_ampl_1 = lockin_1.X
volt_ampl_2 = lockin_2.X
I_fpm = volt_ampl_1()/GIamp1
V_fpm = volt_ampl_2()/GVamp2
if V_fpm== 0:
dcond_fpm = 0
else:
dcond_fpm = I_fpm/V_fpm/go
return dcond_fpm
def desoverh_tpm1():
volt_ampl = lockin_1.X
sig_ampl = lockin_1.amplitude()
I_tpm = volt_ampl()/GIamp1
V_tpm = sig_ampl*ACdiv
dcond_tpm = I_tpm/V_tpm/go
return dcond_tpm
def ohms_law12():
volt_ampl_1 = lockin_1.X
volt_ampl_2 = lockin_2.X
I_fpm = volt_ampl_1()/GIamp1
V_fpm = volt_ampl_2()/GVamp2
if I_fpm== 0:
res_fpm = 0
else:
res_fpm = V_fpm/I_fpm
return res_fpm
# Lock-ins 3(current), 4(voltage)
def desoverh_fpm34():
volt_ampl_3 = lockin_3.X
volt_ampl_4 = lockin_4.X
I_fpm = volt_ampl_3()/GIamp3
V_fpm = volt_ampl_4()/GVamp4
if V_fpm== 0:
dcond_fpm = 0
else:
dcond_fpm = I_fpm/V_fpm/go
return dcond_fpm
def desoverh_tpm3():
volt_ampl = lockin_3.X
sig_ampl = lockin_3.amplitude()
I_tpm = volt_ampl()/GIamp1
V_tpm = sig_ampl*ACdiv
dcond_tpm = I_tpm/V_tpm/go
return dcond_tpm
def ohms_law34():
volt_ampl_3 = lockin_3.X
volt_ampl_4 = lockin_4.X
I_fpm = volt_ampl_3()/GIamp3
V_fpm = volt_ampl_4()/GVamp4
if I_fpm== 0:
res_fpm = 0
else:
res_fpm = V_fpm/I_fpm
return res_fpm
# Lock-ins 5(current), 6(voltage)
def desoverh_fpm56():
volt_ampl_5 = lockin_5.X
volt_ampl_6 = lockin_6.X
I_fpm = volt_ampl_5()/GIamp5
V_fpm = volt_ampl_6()/GVamp6
if V_fpm== 0:
dcond_fpm = 0
else:
dcond_fpm = I_fpm/V_fpm/go
return dcond_fpm
def desoverh_tpm5():
volt_ampl = lockin_5.X
sig_ampl = lockin_5.amplitude()
I_tpm = volt_ampl()/GIamp1
V_tpm = sig_ampl*ACdiv
dcond_tpm = I_tpm/V_tpm/go
return dcond_tpm
def ohms_law56():
volt_ampl_5 = lockin_5.X
volt_ampl_6 = lockin_6.X
I_fpm = volt_ampl_5()/GIamp5
V_fpm = volt_ampl_6()/GVamp6
if I_fpm== 0:
res_fpm = 0
else:
res_fpm = V_fpm/I_fpm
return res_fpm
# -
try:
lockin_1.add_parameter("diff_conductance_fpm", label="dI/dV", unit="2e^2/h", get_cmd = desoverh_fpm12)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_1.parameters['diff_conductance_fpm']
try:
lockin_1.add_parameter("conductance_tpm", label="I/V", unit="2e^2/h", get_cmd = desoverh_tpm1)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_1.parameters['conductance_tpm']
try:
lockin_1.add_parameter("resistance_fpm", label="R", unit="Ohm", get_cmd = ohms_law12)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_1.parameters['resistance_fpm']
try:
lockin_3.add_parameter("diff_conductance_fpm", label="dI/dV", unit="2e^2/h", get_cmd = desoverh_fpm34)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_3.parameters['diff_conductance_fpm']
try:
lockin_3.add_parameter("conductance_tpm", label="I/V", unit="2e^2/h", get_cmd = desoverh_tpm3)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_3.parameters['conductance_tpm']
try:
lockin_3.add_parameter("resistance_fpm", label="R", unit="Ohm", get_cmd = ohms_law34)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_3.parameters['resistance_fpm']
try:
lockin_5.add_parameter("diff_conductance_fpm", label="dI/dV", unit="2e^2/h", get_cmd = desoverh_fpm56)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_5.parameters['diff_conductance_fpm']
try:
lockin_5.add_parameter("conductance_tpm", label="I/V", unit="2e^2/h", get_cmd = desoverh_tpm5)
except KeyError:
print("parameter already exists. Deleting. Try again")
del lockin_5.parameters['conductance_tpm']
# # Measurement parameters
# +
Vgmin = -2 #V [consult the ppt protocol]
Vgmax = +5 #V [consult the ppt protocol]
Npoints = 801 # [consult the ppt protocol]
VSD = 0 #V DC [consult the ppt protocol]
timedelay = 0.1 # sec [consult the ppt protocol]
VAC = 1 #V AC [consult the ppt protocol]
f = 136.5 #Hz [consult the ppt protocol]
tcI = 0.03 #sec [consult the ppt protocol]
tcV = 0.03 #sec [consult the ppt protocol] Preferably the same with tcI
dB_slope = 12 # dB [consult the ppt protocol]
N = 3 #Repetitions [consult the ppt protocol]
temperature = 1.7 #K
temperature_rate = 0.1
magnetic_field = 0 #T
magnetic_field_rate = 0.22
# +
# Small calculation for measurement parameters
if 1/f*5 <= tcI and 1/f*5 <= tcV:
valid_meas = True
elif 1/f < tcI and 1/f < tcV:
valid_meas = True
print("Warning: Time constant must be much smaller than signal oscillation period", 1/f*1000, "msec")
else:
valid_meas = False
print("Error: Time constant must be smaller than signal oscillation period", 1/f*1000, "msec")
if tcI*2.5<=timedelay and tcV*2.5<=timedelay:
valid_meas = True
elif tcI<=timedelay and tcV<=timedelay:
valid_meas = True
print("Warning: Time delay is comparable with time constant")
print("Time constant:",tcI*1e3 ,"msec, (current); ", tcV*1e3, "msec, (voltage)")
print("Time delay:", timedelay*1e3,"msec")
else:
valid_meas = False
print("Error: Time delay is smaller than the time constant")
valid_meas
# -
# ## Frequency Test
# Small measurement for frequency choise
# <br>
# Use whichever lock-in you are interested to test (eg. lockin_X)
# +
new_experiment(name='lockin start-up', sample_name='DEVXX S21D18G38')
# Time constant choise:
# Example: f_min = 60 Hz => t_c = 1/60*2.5 sec = 42 msec => we should choose the closest value: 100 ms
lockin_1.time_constant(0.1)
tdelay = 0.3
dmm_a.smub.output('on') # Turn on the gate channel
dmm_a.smub.volt(-2) # Set the gate on a very high resistance area (below the pinch-off)
# 1-D sweep for amplitude dependence
#do1d(lockin_1.frequency,45,75,100,tdelay,lockin_1.X,lockin_1.Y,lockin_1.conductance_tpm)
# 2-D sweep repetition on a smaller frequency range for noise inspection
do2d(dmm_a.smua.volt,1,50,50,1,lockin_1.frequency,45,75,100,tdelay,lockin_1.X,lockin_1.Y,lockin_1.conductance_tpm)
dmm_a.smub.volt(0)
dmm_a.smub.output('off')
# +
# Set things up to the station
lockin_1.time_constant(tcI) # set time constant on the lock-in
lockin_1.frequency(f) # set frequency on the lock-in
lockin_1.amplitude(VAC) # set amplitude on the lock-in
lockin_1.filter_slope(dB_slope) # set filter slope on the lock-in
lockin_2.time_constant(tcV) # set time constant on the lock-in
lockin_2.filter_slope(dB_slope) # set filter slope on the lock-in
lockin_3.time_constant(tcI) # set time constant on the lock-in
lockin_3.frequency(f) # set frequency on the lock-in
lockin_3.amplitude(VAC) # set amplitude on the lock-in
lockin_3.filter_slope(dB_slope) # set filter slope on the lock-in
lockin_4.time_constant(tcV) # set time constant on the lock-in
lockin_4.filter_slope(dB_slope) # set filter slope on the lock-in
lockin_5.time_constant(tcI) # set time constant on the lock-in
lockin_5.frequency(f) # set frequency on the lock-in
lockin_5.amplitude(VAC) # set amplitude on the lock-in
lockin_5.filter_slope(dB_slope) # set filter slope on the lock-in
lockin_6.time_constant(tcV) # set time constant on the lock-in
lockin_6.filter_slope(dB_slope) # set filter slope on the lock-in
dcond1 = lockin_1.diff_conductance_fpm
cond1 = lockin_1.conductance_tpm
res1 = lockin_1.resistance_fpm
X1 = lockin_1.X
X2 = lockin_2.X
Y1 = lockin_1.Y
Y2 = lockin_2.Y
dcond3 = lockin_3.diff_conductance_fpm
cond3 = lockin_3.conductance_tpm
res3 = lockin_3.resistance_fpm
X3 = lockin_3.X
X4 = lockin_4.X
Y3 = lockin_3.Y
Y4 = lockin_4.Y
dcond5 = lockin_5.diff_conductance_fpm
cond5 = lockin_5.conductance_tpm
res5 = lockin_5.resistance_fpm
X5 = lockin_5.X
X6 = lockin_6.X
Y5 = lockin_5.Y
Y6 = lockin_6.Y
gate = dmm_a.smub.volt
bias1 = dmm_a.smua.volt
bias3 = dmm_b.smua.volt
bias5 = dmm_b.smub.volt
temp = ppms.temperature # read the temperature
temp_set = ppms.temperature_setpoint # set the temperature
temp_rate = ppms.temperature_rate # set the temperature rate
temp_rate(temperature_rate)
temp_set(temperature)
field = ppms.field_measured # read the magnetic field
field_set = ppms.field_target # set the field; a new qcodes function! field_rate is not in use anymore
field_rate = ppms.field_rate # set the the magnetic field rate
field_rate(magnetic_field_rate)
field_set(magnetic_field)
# -
# ## The measurement
# +
# If you want to add bias then uncheck
#dmm_a.smua.output('on') # bias for 1
#dmm_b.smua.output('on') # bias for 3
#dmm_b.smub.output('on') # bias for 5
#bias1(1e-3/DCdiv)
#bias3(1e-3/DCdiv)
#bias5(1e-3/DCdiv)
# If you want to add magnetic field then uncheck
#field_set(3)
#wait_for_field()
# If you want to add temperature then uncheck
#temp_set(3)
#wait_for_temp()
# The run measurement
gate(0)
dmm_a.smub.output('on') # Turn on the gate
#vv1 = "Vsd1="+"{:.3f}".format(bias1()*DCdiv*1e3)+"mV "
#vv2 = "Vsd2="+"{:.3f}".format(bias2()*DCdiv*1e3)+"mV "
#vv2 = "Vsd3="+"{:.3f}".format(bias3()*DCdiv*1e3)+"mV "
bb = "B="+"{:.3f}".format(ppms.field_measured())+"T "
tt = "T="+"{:.2f}".format(ppms.temperature())+"K "
ff = "f="+"{:.1f}".format(lockin_1.frequency())+"Hz "
aa = "Ampl="+"{:.4f}".format(lockin_1.amplitude()*ACdiv*1e3)+"mV"
Conditions = tt + bb + ff + aa
d1 = "/1/ DEV00 S99 VH99 VL99 D99"
d2 = "/3/ DEV00 S99 VH99 VL99 D99"
d3 = "/5/ DEV00 S99 VH99 VL99 D99"
Sample_name = d1# + d2 + d3
Experiment_name = "ramp to -Vgmin: "
new_experiment(name= "Protocol1.0 " + Experiment_name + Conditions, sample_name = Sample_name)
do1d(gate,0,Vgmin,int(Npoints/2),timedelay,
#cond1,X1,Y1,
dcond1,X1,Y1,X2,Y2,
#cond3,X3,Y3,
#dcond3,X3,Y3,X4,Y4,
#cond5,X5,Y5,
#dcond5,X5,Y5,X6,Y6,
do_plot=False)
for n in range(N):
Experiment_name = "iteration "+str(n)+": "
new_experiment(name= "Protocol1.0 " + Experiment_name+'UP; ' + Conditions, sample_name = Sample_name)
do1d(gate,Vgmin,Vgmax,Npoints,timedelay,
#cond1,X1,Y1,
dcond1,X1,Y1,X2,Y2,
#cond3,X3,Y3,
#dcond3,X3,Y3,X4,Y4,
#cond5,X5,Y5,
#dcond5,X5,Y5,X6,Y6,
do_plot=False)
new_experiment(name= "Protocol1.0 " + Experiment_name+'DOWN; ' + Conditions, sample_name = Sample_name)
do1d(gate,Vgmax,Vgmin,Npoints,timedelay,
#cond1,X1,Y1,
dcond1,X1,Y1,X2,Y2,
#cond3,X3,Y3,
#dcond3,X3,Y3,X4,Y4,
#cond5,X5,Y5,
#dcond5,X5,Y5,X6,Y6,
do_plot=False)
Experiment_name = "ramp to Zero: "
new_experiment(name= "Protocol1.0 " + Experiment_name + Conditions, sample_name = Sample_name)
do1d(gate,Vgmin,0,int(Npoints/2),timedelay,
#cond1,X1,Y1,
dcond1,X1,Y1,X2,Y2,
#cond3,X3,Y3,
#dcond3,X3,Y3,X4,Y4,
#cond5,X5,Y5,
#dcond5,X5,Y5,X6,Y6,
do_plot=False)
dmm_a.smub.output('off') # the gate
#dmm_a.smua.output('off') # the bias for 1
#dmm_b.smua.output('off') # the bias for 3
#dmm_b.smub.output('off') # the bias for 5
| old ideas/Protocol 1/Scripts/Protocol_1.0/protocol_1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Map violations data
# #### Set up environment
# +
# Install paretochart
# !pip install --upgrade paretochart
# Import necessary packages
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# %matplotlib inline
# -
# Troubleshooting issue with notebook not being able to import pareto module
import sys
# sys.path.append('/Users/davidplewis/anaconda3/lib/python3.6/site-packages/paretochart')
# sys.path.pop(6)
sys.path
sys.path
from paretochart import paretochart
# #### Import and clean data
# Import violations data
import os.path
root_path = os.path.dirname(os.getcwd())
violation_values = pd.read_csv(os.path.join(root_path,"DATA/violation_values.csv"))
violation_counts = pd.read_csv(os.path.join(root_path,"DATA/violation_counts.csv"))
violation_values.head()
violation_columns = (list(violation_values.columns))
violation_columns.remove("inspection_id")
violation_sums = violation_values[violation_columns].sum()
violation_sums = pd.DataFrame({'violation':violation_sums.index, 'count':violation_sums.values})
violation_sums
# #### Graph Violations
# +
# Make pareto chart from violation sums
data = violation_sums["count"].values.tolist()
labels = violation_sums["violation"].values.tolist()
test_data = [21, 2, 10, 4, 16]
# fig, axes = plt.subplots(2, 2)
chart = paretochart.pareto(data, labels, limit=.8, line_args=('g',))
# plt.title('Basic chart without labels', fontsize=10)
plt.rcParams["figure.figsize"] = (20,3)
# +
# Make bar graph of violation sums
violation_graph =violation_sums.plot(kind="bar", figsize=(10,7))
# -
| maps_and_plots/21_plot_violations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.12 ('ranx')
# language: python
# name: python3
# ---
# In this notebook we explore the `evaluate` function offered by `ranx`.
# First of all we need to install [ranx](https://github.com/AmenRa/ranx)
#
# Mind that the first time you run any ranx' functions they may take a while as they must be compiled first
# !pip install -U ranx
# Download the data we need
# +
import os
import requests
for file in ["qrels", "results"]:
os.makedirs("notebooks/data", exist_ok=True)
with open(f"notebooks/data/{file}.test", "w") as f:
master = f"https://raw.githubusercontent.com/AmenRa/ranx/master/notebooks/data/{file}.test"
f.write(requests.get(master).text)
# -
from ranx import Qrels, Run, evaluate
qrels = Qrels.from_file("notebooks/data/qrels.test", kind="trec")
run = Run.from_file("notebooks/data/results.test", kind="trec")
# Evaluate
#
# For a full list of the available metrics see [here](https://amenra.github.io/ranx/metrics/).
# Single metric
print(evaluate(qrels, run, "hits"))
print(evaluate(qrels, run, "hit_rate"))
print(evaluate(qrels, run, "precision"))
print(evaluate(qrels, run, "recall"))
print(evaluate(qrels, run, "f1"))
print(evaluate(qrels, run, "r-precision"))
print(evaluate(qrels, run, "mrr"))
print(evaluate(qrels, run, "map"))
print(evaluate(qrels, run, "ndcg"))
# Single metric with cutoff
evaluate(qrels, run, "ndcg@10")
# Multiple metrics
evaluate(qrels, run, ["map", "mrr", "ndcg"])
# Multiple metrics with cutoffs (you can use different cutoffs for each metric)
evaluate(qrels, run, ["map@100", "mrr@10", "ndcg@10"])
# By default, scores are saved in the evaluated Run
# You can disable this behaviour by passing `save_results_in_run=False`
# when calling `evaluate`
run.mean_scores
# +
import json # Just for pretty printing
print(json.dumps(run.scores, indent=4))
# 301, 302, and 303 are the query ids
# -
# Alternatively, per query scores can be extracted as Numpy Arrays by passing
# `return_mean = False` to `evaluate`
evaluate(qrels, run, ["map@100", "mrr@10", "ndcg@10"], return_mean=False)
# Finally, you can set the number of threads used for computing the metric
# scores, by passing `threads = n` to `evaluate`
# `threads = 0` by default, which means all the available threads will be used
# Note that if the number of queries is small, `ranx` will automatically set
# `threads = 1` to prevent performance degradations
evaluate(qrels, run, ["map@100", "mrr@10", "ndcg@10"], threads=1)
| notebooks/3_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Constructors (__init__)
#
# __init__() => Yapıcı method. Tüm sınıflarda obje oluşturmak içn kullanılır. "C" tabanlı dillerde "Constructor" olarak isimlendirilir. Bu yapı sınıf türetilirken yani instance alınırken nesneye bağlı değil, sınıf kendisine atrama yapmak için kullanılır. Sınıfın örneklemi alındığında yani sınıf başlatıldığında "__init__" kendisi otomatik olarak çalışır. Bir başka değişle bu yöntem bir sınıftan nesne oluşturduğumuzda çağrılır ve sınıfın niteliklerini başlatılmasına (initilazition) izin verir.
#
# "__init__()" nesnenin durumunu başlatmak için kullanılır. Yapıcı methot olarak adlandırılan bu yapının görevi bir sınıfın nesnesi oluşturulduğunda sınıfın öznitelikleirni (attributte) yani sınıfın veri üyelerini başlatmak onlara dışarıdan gelen değerleri atamktıdr. Bir sınıf nesnesi somutlaştırıldığında yani sınıf instance'si alındığında __init__ otomatik olarak çalışır.
# +
class student:
def __init__(self, studetId, firstName, lastName):
self.studentId = studetId
self.firstName = firstName
self.lastName = lastName
ogrenci_1 = student("B1245.12345","Burak","Yılmaz")
print(
"Öğrenci Bilgileri\nNo: {}\nAdı: {}\nSoyadı: {}"
.format(ogrenci_1.studentId, ogrenci_1.firstName, ogrenci_1.lastName)
)
# +
class Name:
def __init__(self, firstName, lastName):
self.FirstName = firstName
self.LastName = lastName
class Student:
def __init__(self, studentId, name, course):
self.StudentId = studentId
self.StudentName = name
self.Course = course
ogrenci_1 = Student("B123.4565",Name("Burak","Yılmaz"), "Python")
ogrenci_2 = Student("B213.4856",Name("İpek","Yılmaz"), "Python")
ogrenci_3 = Student("B321.4785",Name("Hakan","Yılmaz"), "Python")
students = [ogrenci_1, ogrenci_2, ogrenci_3]
for student in students:
print(
"Student Id: {}\nName: {} {}\nCourse: {}"
.format(
student.StudentId, student.StudentName.FirstName, student.StudentName.LastName, student.Course
)
)
# +
class Employee:
departmant = "Yazilim" # Default olarak yazilim bilgisini atadık
eleman_sayisi = 0
def __init__(self, name, age):
self.Name = name
self.Age = age
# Her bir instance aldığımda, yani sınıftan nesne türettiğimde eleman sayısı bir artacak
Employee.eleman_sayisi += 1
def eleman_sayisini_goster(self):
print("Toplam Eleman Sayısı: {}".format(Employee.eleman_sayisi))
def elemanlari_goster(self):
print("Ad: {}\nYaş: {}\nDepartman: {}"
.format(self.Name, self.Age, self.departmant,))
calican = Employee("Burak","31")
calican.departmant = "Muhassebe"
calican.eleman_sayisini_goster()
calican.elemanlari_goster()
# -
calisan2= Employee(
input("Ad: "),
input("Yaş: "),
)
calisan2.departmant = "Yazılım"
calisan2.eleman_sayisini_goster()
calisan2.elemanlari_goster()
# +
class SoftwareDeveloper():
def __init__(self, firstname, lastname, language):
self.FirstName = firstname
self.LastName = lastname
self.Language = language
def ShowInformation(self):
print("""
Information of Software Developer
First Name: {}
Last Name: {}
Language: {}
""".format(self.FirstName,self.LastName,self.Language))
developer = SoftwareDeveloper(
input("First Name: "),
input("Last Name: "),
input("Language: ")
)
developer.ShowInformation()
# +
class SoftwareDeveloper():
def __init__(self, firstname, lastname, language):
self.FirstName = firstname
self.LastName = lastname
self.Language = language
def ShowInformation(self):
print("""
Information of Software Developer
First Name: {}
Last Name: {}
Language: {}
""".format(self.FirstName, self.LastName, self.Language))
def AddNewLanguage(self, NewLanguage):
print("New language added..!")
self.Language.append(NewLanguage)
developer = SoftwareDeveloper("Burak","Yılmaz",["C#","Python","RPA","C++"])
developer.AddNewLanguage(input("Please type into new language skill: "))
developer.ShowInformation()
| OOP Lab-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from collections import defaultdict
import heapq
import numpy as np
SMALL_MATRIX = 'small_matrix.txt'
LARGE_MATRIX = 'large_matrix.txt'
UP, DOWN, LEFT, RIGHT = (-1,0), (1,0), (0,-1), (0,1)
def read_file(filename):
matrix = []
with open(filename) as file:
for line in file:
matrix.append([int(x) for x in line.split(',')])
return np.asarray(matrix)
# # Solution for 081-082-083 with Dijkstra
def get_neighbours(i, j, directions, matrix):
neighbours = []
for di, dj in directions:
try:
neighbours.append((matrix[i+di][j+dj], (i+di, j+dj)))
except IndexError:
continue
return neighbours
# Matrix implementation of dijkstra
def dijkstra(matrix, source, target, directions):
# Create data structures needed
queue = [(matrix[source],source,())] # nodes need to process
seen = set() # set (O(1) lookup) of seen nodes
mins = {source: 0} # dict with min path dist from source
if not isinstance(target, set):
target = {target}
while queue:
# Pop node in queue with smallest cost
cost, v1, path = heapq.heappop(queue) # num, (i, j), pathlist
# Process it if not seen yet
if v1 not in seen:
seen.add(v1) # add as seen
path = (v1, *path) # and update path
# Finished if v1 is target
if v1 in target:
return (cost, path)
# If not, we need to process v1's neighbours
for c, v2 in get_neighbours(*v1, directions, matrix):
# Only process if not seen before
if v2 not in seen:
prev = mins.get(v2, None)
nextt = cost + c
if prev is None or nextt < prev:
mins[v2] = nextt
heapq.heappush(queue, (nextt, v2, path))
return (-1, ())
# +
def solve_p81():
matrix = read_file(LARGE_MATRIX)
l = len(matrix) - 1
return dijkstra(matrix, (0,0), (l,l), (RIGHT, DOWN))
print(solve_p81)
# %timeit solve_p81()
# +
def solve_p82():
matrix = read_file(LARGE_MATRIX)
l = len(matrix) - 1
targets = {(idx,l) for idx in range(len(matrix))}
return min([dijkstra(matrix, (idx,0), targets, (RIGHT, UP, DOWN)) for idx in range(len(matrix))])
print(solve_p82()[0])
# %timeit solve_p82()
# +
def solve_p83():
matrix = read_file(LARGE_MATRIX)
l = len(matrix) - 1
return dijkstra(matrix, (0,0), (l,l), (RIGHT, LEFT, UP, DOWN))
print(solve_p83()[0])
# %timeit solve_p83()
# -
# # Solution for p081, calculating min path through matrix
# +
def find_min_path(matrix):
cost_matrix = np.zeros(shape=(len(matrix), len(matrix)))
cost_matrix[0][0] = matrix[0][0]
# Start with finding cost of first row and first column
for i in range(1,len(matrix)):
cost_matrix[0][i] = cost_matrix[0][i-1] + matrix[0][i]
cost_matrix[i][0] = cost_matrix[i-1][0] + matrix[i][0]
# Then loop rest of the two matrixes...
for i in range(1, len(matrix)):
for j in range(1, len(matrix)):
cost_matrix[i][j] = min(cost_matrix[i-1][j], cost_matrix[i][j-1]) + matrix[i][j]
return cost_matrix
def solve():
matrix = read_file(LARGE_MATRIX)
return int(find_min_path(matrix)[-1][-1])
# %timeit solve()
# -
# # Solution for p082, using edges and graph in dictionary instead of using matrix + get_neigbours()
def create_edges(matrix):
num_to_idx = {}
idx_to_num = {}
edges = []
# Create a mapping from unique number to idx, and vice versa
counter = 1
for idx, x in enumerate(matrix):
for idy, y in enumerate(x):
num_to_idx[counter] = (idx, idy)
idx_to_num[(idx,idy)] = counter
counter += 1
# Create all edges pointing to the right
for i in range(len(matrix)):
for j in range(len(matrix)-1):
f = idx_to_num[i,j]
t = idx_to_num[i,j+1]
c = matrix[i,j+1]
edges.append((f,t,c))
# Create all edges pointing down
for i in range(len(matrix) - 1):
for j in range(1, len(matrix)-1):
f = idx_to_num[i,j]
t = idx_to_num[i+1,j]
c = matrix[i+1,j]
edges.append((f,t,c))
# Create all edges pointing up
for i in range(1, len(matrix)):
for j in range(1, len(matrix)-1):
f = idx_to_num[i,j]
t = idx_to_num[i-1,j]
c = matrix[i-1,j]
edges.append((f,t,c))
# And lastly create two extra edges for start and end:
for i in range(len(matrix)):
edges.append((0, idx_to_num[i, 0], matrix[i][0]))
edges.append((idx_to_num[i,len(matrix)-1], -1, 0))
return edges
# +
# Graph implementation of dijkstra
def dijkstra_graph(edges, source, target):
g = defaultdict(list)
for l,r,c in edges:
g[l].append((c,r))
# Create data structures needed
queue = [(0,source,())] # nodes need to process
seen = set() # set (O(1) lookup) of seen nodes
mins = {source: 0} # dict with min path dist from source
while queue:
# Pop node in queue with smallest cost
cost, v1, path = heapq.heappop(queue) # num, (i, j), pathlist
# Process it if not seen yet
if v1 not in seen:
seen.add(v1) # add as seen
path = (v1, *path) # and update path
# Finished if v1 is target
if v1 == target:
return (cost, path)
# If not, we need to process v1's neighbours
for c, v2 in g.get(v1,None):
# Only process if not seen before
if v2 not in seen:
prev = mins.get(v2, None)
nextt = cost + c
if prev is None or nextt < prev:
mins[v2] = nextt
heapq.heappush(queue, (nextt, v2, path))
return (-1, ())
def solve_p82_graph():
return dijkstra_graph(create_edges(read_file(LARGE_MATRIX)), 0, -1)[0]
# -
print(solve_p82_graph())
# %timeit solve_p82_graph()
| 081-082-083/p081-082-083.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "slide"}
import pandas as pd
import numpy as np
import finterstellar as fs
# + slideshow={"slide_type": "-"}
vlu = fs.ValueAtExpiry()
view = fs.Visualize()
# + slideshow={"slide_type": "slide"}
x = np.arange(200,400)
# -
# 선물 : futures(x, 행사가)
fl300 = vlu.futures(x, 300)
# view.square_one_to_one_view(x, y1=값, y2=값, ...)
view.square_one_to_one_view(x, FL300=fl300)
# + slideshow={"slide_type": "slide"}
# 콜옵션 롱 : call_option(x, 행사가, 프리미엄)
cl300 = vlu.call_option(x, 300, 10)
# -
view.square_one_to_one_view(x, CL300=cl300)
# + slideshow={"slide_type": "slide"}
# 콜옵션 숏 : call_option(x, 행사가, 프리미엄) * -1
cs300 = vlu.call_option(x, 300, 10) * -1
# -
view.square_one_to_one_view(x, CS300=cs300)
# + slideshow={"slide_type": "slide"}
# 풋옵션 : put_option(x, 행사가, 프리미엄)
pl300 = vlu.put_option(x, 300, 10)
# -
view.square_one_to_one_view(x, PL300=pl300)
# + slideshow={"slide_type": "slide"}
# 원금보장형
cs350 = vlu.call_option(x, 350, 10) * -1
view.square_one_to_one_view(x, FL300=fl300, CS350=cs350, PL300=pl300)
# + slideshow={"slide_type": "slide"}
# 양매도 OTM
cs350 = vlu.call_option(x, 350, 10) * -1
ps250 = vlu.put_option(x, 250, 10) * -1
view.square_one_to_one_view(x, CS350=cs350, PS250=ps250)
# -
# # Hands on training
# + [markdown] slideshow={"slide_type": "slide"}
# 레버리지 상품을 구조화해보세요
# <img src="image/w5-leverage.png" style="height:400px;"/>
# -
# <img src="image/w5-structuring_covered_call_q.png" style="height:500px;"/>
# + slideshow={"slide_type": "subslide"}
# 레버리지형
view.square_one_to_one_view(x, FL300_1=fl300, FL300_2=fl300)
# + [markdown] slideshow={"slide_type": "slide"}
# 커버드콜 상품을 구조화해보세요
# <img src="image/w5-structuring_covered_call.png" style="height:400px;"/>
# -
# <img src="image/w5-structuring_covered_call_q.png" style="height:500px;"/>
# + slideshow={"slide_type": "subslide"}
# 커버드콜
#lf300 = vlu.futures(x, 300)
#sc350 = vlu.call_option(x, 350, 10) * -1
view.square_one_to_one_view(x, FL300=fl300, CS350=cs350)
# + [markdown] slideshow={"slide_type": "slide"}
# 원금보장형 상품을 구조화해보세요
# <img src="image/w5-structuring_bull_spread.png" style="height:400px;"/>
# -
# <img src="image/w5-structuring_bull_spread_q.png" style="height:500px;"/>
# + slideshow={"slide_type": "subslide"}
# 원금보장형
view.square_one_to_one_view(x, CL300=cl300, CS350=cs350)
# + [markdown] slideshow={"slide_type": "slide"}
# 양매도 ATM 상품을 구조화해보세요
# <img src="image/w5-structuring_short_straddle.png" style="height:400px;"/>
# -
# <img src="image/w5-structuring_short_straddle_q.png" style="height:500px;"/>
# + slideshow={"slide_type": "subslide"}
# 양매도 ATM
#cs300 = vlu.call_option(x, 300, 10) * -1
ps300 = vlu.put_option(x, 300, 10) * -1
view.square_one_to_one_view(x, CS300=cs300, PS300=ps300)
# + [markdown] slideshow={"slide_type": "slide"}
# 롱 버터플라이 (프로텍티브 양매도) 상품을 구조화해보세요
# <img src="image/w5-structuring_long_butterfly.png" style="height:500px;"/>
# -
# <img src="image/w5-structuring_long_butterfly_q.png" style="height:400px;"/>
# + slideshow={"slide_type": "subslide"}
# 롱 버터플라이
cl250 = vlu.call_option(x, 250, 10)
cl350 = vlu.call_option(x, 350, 10)
view.square_one_to_one_view(x, CL250=cl250, CS300_1=cs300, CS300_2=cs300, CL350=cl350)
# + slideshow={"slide_type": "slide"}
# knock in call : ki_call(x, 행사가, 배리어, 프리미엄)
kic = vlu.ki_call(x, 300, 350, 10)
view.square_one_to_one_view(x, KIC=kic)
# + slideshow={"slide_type": "slide"}
# knock out put : ko_put(x, 행사가, 배리어, 프리미엄)
kop = vlu.ko_put(x, 350, 250, 10)
view.square_one_to_one_view(x, KOP=kop)
# + [markdown] slideshow={"slide_type": "slide"}
# 최고난이도!<br>
# KIKO 상품을 구조화해보세요
# <img src="image/w5-kiko.png" style="height:300px;"/>
# + slideshow={"slide_type": "subslide"}
# KIKO
view.square_one_to_one_view(x, LKOP=kop, SKIC=kic*-2, FL=fl300)
| w5/w5-05 fs structuring A.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Determine a integral dada</b>
# $\int xe^{\frac{x}{2}}dx$
# $u = \frac{x}{2}$
# $2u = x$
# $du = \frac{1}{2}dx$
# $2du = dx$
# <b>$ \int xe^{\frac{x}{2}}dx \rightarrow 4 \cdot \int e^u udu$</b>
# $u' = u$
# $du' = du$
# $dv = e^u$
# $v = e^u$
# <b>Aplicando as substituições $4 \cdot \int e^u udu \rightarrow 4\cdot \int u'dv$</b>
# $4\cdot \int u'dv = 4( u'v - \int vdu')$
# $4 \cdot \int u'dv = 4(ue^u - \int e^udu)$
# $4 \cdot \int u'dv = 4(ue^u - e^u)$
# $4 \cdot \int u'dv = 4e^u(u - 1)$
# $4 \cdot \int u'dv = 4e^{\frac{x}{2}}(\frac{x}{2} - 1) + C$
| Problemas 6.1/02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Getting started with MPI using C
# The MPI command `mpiexec` provides each started application instance an MPI runtime environment. C programs need to be compiled and linked using `mpicc` to activate the MPI runtime environment.
# -----
#
# We start this example with a simple C program that provides the same capabilities as the [shell script of the previous example](../mpi-with-hostname.sh). Instead of getting the size and the rank from environment variables we now use MPI C API calls.
# !cat /hpclab/users/ulf/hpc-examples/mpi-simple/mpi-hello-world.c
# We compile and link this MPI C program with `mpicc` to assure that the MPI libraries will get loaded during runtime.
# !mpicc /hpclab/users/ulf/hpc-examples/mpi-simple/mpi-hello-world.c -o /hpclab/users/ulf/hpc-examples/mpi-simple/mpi-hello-world
# The exectuable program is directly availalbe on all cluster nodes, because we are working in an NFS file system.
# !df -h .
# Alright. Let's see the output if we start multiple application instances.
# !mpiexec -hosts c1:2,c2:3 /hpclab/users/ulf/hpc-examples/mpi-simple/mpi-hello-world
# So, the compiled C program provides as similar output as the shell script used in the previous example.
# -----
# This was a brief introduction into compiling and linking MPI C programs. See my [HPClab](https://www.beyond-storage.com/hpc-lab) for more [MPI examples](https://www.beyond-storage.com/examples-mpi).
# -----
| mpi-simple/doc/mpi-with-c.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LU Factorization
#
# Copyright (C) 2021 <NAME>
#
# In part based on material by <NAME>
#
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>
# +
import numpy as np
import numpy.linalg as la
np.set_printoptions(linewidth=150, suppress=True, precision=3)
# -
A = np.random.randn(4, 4)
A
# Initialize `L` and `U`:
L = np.eye(len(A))
U = np.zeros_like(A)
# Recall the "recipe" for LU factorization:
#
# $$\let\B=\boldsymbol \begin{array}{cc}
# & \left[\begin{array}{cc}
# u_{00} & \B{u}_{01}^T\\
# & U_{11}
# \end{array}\right]\\
# \left[\begin{array}{cc}
# 1 & \\
# \B{l}_{10} & L_{11}
# \end{array}\right] & \left[\begin{array}{cc}
# a_{00} & \B{a}_{01}\\
# \B{a}_{10} & A_{11}
# \end{array}\right]
# \end{array}$$
#
# Find $u_{00}$ and $u_{01}$. Check `A - L@U`.
# +
#clear
U[0] = A[0]
A - L@U
# -
# Find $l_{10}$. Check `A - L@U`.
# +
#clear
L[1:,0] = A[1:,0]/U[0,0]
A - L@U
# -
# Recall $A_{22} =\B{l}_{21} \B{u}_{12}^T + L_{22} U_{22}$. Write the next step generic in terms of `i`.
#
# After the step, print `A-L@U` and `remaining`.
i = 1
remaining = A - L@U
# +
#clear
U[i, i:] = remaining[i, i:]
L[i+1:,i] = remaining[i+1:,i]/U[i,i]
remaining[i+1:, i+1:] -= np.outer(L[i+1:,i], U[i, i+1:])
i = i + 1
print(remaining)
print(A-L@U)
# -
| demos/linear_systems/LU Factorization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python3
# ---
# # Tracking College of Marine Science K-Index Trends
# ## Faculty Active on Twitter
#
# This notebook builds a time series figure to track the evolution of CMS faculty Twitter users' K-index. A K-index above 5 indicates a scientist has "gone Kardashian" - having a social media impact that far outweighs the publication and citation impact of the scientist. Despite the negative connotation of "going Kardashian," several prominent, accomplished, and respected scientists are stratospheric with respect to the K-index! Thus, I think it is a good indication of a scientist's overall impact.
# +
#Load data and packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import plotly
import os
K_df = pd.read_csv('K-index_python_anonymized.csv', index_col='Name')
print(K_df.head())
# -
# ## Data organization
# Because we specified the Name column as the index in reading the data from the csv file, we can loop through the names and work on a mini-dataframe for each person inthe data set. This allows us to compute a linear regression through F (followers) vs. C (citations) to map each person's trend in time.
# +
#Make a container of coefficients from linear models:
keys_from_indices=K_df.index.unique(level='Name')
print(keys_from_indices)
mods={k:None for k in keys_from_indices} #Initialize dict keys from indices of K_df, all with values of None that will be replaced as we loop through
#Loop through the names and calculate a linear regression between F and C (Follower and Citations)
for individual in K_df.index:
if (K_df.loc[individual]['C']).size>=2:
#print(K_df.loc[individual][:]) #Use this to see what the data frames look like while iterating
x=K_df.loc[individual]['C']
y=K_df.loc[individual]['F']
z=np.polyfit(x, y, 1)
mods[individual]=np.poly1d(z)
else:
mods[individual]=' '
print(mods)
# -
# ## Plotting the Faculty Data in Followers/Citations Space
# Here, we plot the data from the K_df DataFrame as well as the trend lines. The data will not make much graphical sense until the framework of the K-index is added, which occurs in the second part of the code cell!
#
# ## Kardashian Index Lines
# The final flourish of this plot is the lines for K_indices 1-5. Above 5, and one is considered a scientific Kardashian.
# +
colors_dict=dict(zip(keys_from_indices,['red','yellow','lightblue','pink','lightgreen','brown','orange','purple']))
marker_shapes_dict=dict(zip(keys_from_indices,['o','^','s','X','d','s','o','>']))
K_fig=plt.figure()
plt.subplot(111)
for individual in K_df.index:
if (K_df.loc[individual]['C']).size>=2:
x=K_df.loc[individual]['C']
y=K_df.loc[individual]['F']
plt.scatter(x,y,c=colors_dict[individual], marker=marker_shapes_dict [individual], s=50, edgecolor='k', figure=K_fig)
#print(mods[individual]) #Test polynomial function
plt.plot(x,mods[individual](x),'--',c=colors_dict[individual])
plt.xlabel('Citations (Google Scholar)')
plt.ylabel('Followers (Twitter)')
#Add K-index lines 1-5
x_limits=plt.xlim()
y_limits=plt.ylim()
if min(x_limits) <= 0:
min_xes = 0.01
else:
min_xes = min(x_limits)
xes=np.linspace(min_xes, max(x_limits)*0.9, 50)
end_k=5 #Highest K factor line ot plot - K=5 is traditionally considered Kardashian.
k=pd.DataFrame([])
for j in range(end_k):
k[j]=np.multiply(np.multiply(np.power(xes, 0.32), 43.3), j+1)
if j==end_k-1:
clr='k'
solid_dash='-'
else:
clr='gray'
solid_dash='--'
plt.plot(xes,k[j],ls=solid_dash,c=clr)
plt.text(0.9*max(x_limits), max(k[j][~k[j].isna()]),('k = '+str(j+1)))
#K_fig.savefig('CMS_K-index.png')
# -
# ## Plotly Options
#
# In the following cell, I will make a similar figure in plotly for web interactivity.
# +
import plotly.express as px
import plotly.graph_objects as go
k_fig=go.Figure(data=go.Scatter(x=[], y=[]))
for individual in K_df.index:
if (K_df.loc[individual]['C']).size>=2:
k_fig.add_trace(go.Scatter(x=K_df.loc[individual]['C'], y=K_df.loc[individual]['F'],text = individual, hoverinfo = 'x+y+text',))
#K_fig=(px.scatter(K_df, x='C', y='F', color=K_df.index, trendline='ols', ))
k_fig.update_layout({'showlegend':False})
k_fig.show()
k_fig.write_html("USF_CMS_Kindices_go_2021.html")
#Note this is a work in progress! Still figuring out the best way to incorporate the K-value isopleths and a legend with Plotly.
# -
| K-index_anonymized.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compare two dice
from itertools import product
# This takes the cartesian product and counts all the cases where you win. So like 5 would beat 2.
def count_wins(dice1, dice2):
dice1_wins, dice2_wins = 0, 0
for (x,y) in product(dice1, dice2):
if x == y:
pass
elif x < y:
dice2_wins += 1
else:
dice1_wins += 1
return (dice1_wins, dice2_wins)
# +
# dice1 = [1, 2, 3, 4, 5, 6]
# dice2 = [1, 2, 3, 4, 5, 6]
dice1 = [1, 1, 6, 6, 8, 8]
dice2 = [2, 2, 4, 4, 9, 9]
dice1 = [1, 1, 2, 4, 5, 7]
dice2 = [1, 2, 2, 3, 4, 7]
dice1 = [1, 2, 2, 3, 4, 7]
dice2 = [1, 2, 3, 4, 5, 6]
# -
count_wins(dice1, dice2)
from itertools import combinations
# This just finds the die that you should pick if given a set of choices
def find_the_best_dice(dices):
highest = -1
counts = [0]*len(dices)
for x, y in combinations(range(len(dices)), 2):
c1, c2 = count_wins(dices[x],dices[y])
if c1 == c2:
counts[x] += 1
counts[y] += 1
elif c1 > c2:
counts[x] +=1
elif c2 > c1:
counts[y] +=1
print(counts)
try:
highest = counts.index(len(dices)-1)
except:
pass
return highest
find_the_best_dice([[1, 1, 2, 4, 5, 7], [1, 2, 2, 3, 4, 7], [1, 2, 3, 4, 5, 6]])
# This just gives you a dict back with a strategy that you should take in it. I am going to use this with the bot.
def compute_strategy(dices):
strategy = dict()
strategy["choose_first"] = True
strategy["first_dice"] = 0
best_strat = find_the_best_dice(dices)
if best_strat != -1:
strategy['first_dice'] = best_strat
else:
strategy['choose_first'] = False
strategy.pop('first_dice', None)
for i in range(len(dices)):
best_alt = (0, 0)
for j in range(len(dices)):
if i != j:
_ , contender = count_wins(dices[i], dices[j])
if contender > best_alt[1]:
best_alt = (j, contender)
strategy[i] = best_alt[0]
return strategy
compute_strategy([[1, 1, 4, 6, 7, 8], [2, 2, 2, 6, 7, 7], [3, 3, 3, 5, 5, 8]])
compute_strategy([[4, 4, 4, 4, 0, 0], [7, 7, 3, 3, 3, 3], [6, 6, 2, 2, 2, 2], [5, 5, 5, 1, 1, 1]])
| Dice Game/diceStrategy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"checksum": "309a3ba5ce59242c851ba05ae8de207f", "grade": false, "grade_id": "cell-2dce2716070d680f", "locked": true, "schema_version": 1, "solution": false}
# ## Assignment 1.2 Python Classes
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2b1e1b2ffc210ed77f6ae32374302fc1", "grade": false, "grade_id": "cell-706c679697b07648", "locked": true, "schema_version": 1, "solution": false}
# https://docs.python.org/3/tutorial/classes.html
# + [markdown] deletable=false editable=false nbgrader={"checksum": "773a462e3acf0309a12ecd30fcf62487", "grade": false, "grade_id": "cell-1052f25f3783f2ab", "locked": true, "schema_version": 1, "solution": false}
# - The constructor is always written as a function called __init__()
# - It must always take as its first argument a **reference to the instance** being constructed
# - Single underscore before a member variable denotes it protected and double underscore denotes it private but it is just a convention
# + [markdown] deletable=false editable=false nbgrader={"checksum": "3a2b7f0b89b6e466ee8d2c1b6b8af4e4", "grade": false, "grade_id": "cell-a4d8f6ba25b7cae7", "locked": true, "schema_version": 1, "solution": false}
# Create a Class Rectangle such that its objects are initialized with its length and breadth.
# Make two class methods get_area and get_perimeter which calculates and return the area and circumference respectively
# + deletable=false nbgrader={"checksum": "a95e628edc126d2280b0ba71733706b1", "grade": false, "grade_id": "cell-7b9cb67eee934ad9", "locked": false, "schema_version": 1, "solution": true}
class Rectangle():
# Your code here
def __init__(self, length, breadth):
self.length = length
self.breadth = breadth
def get_area(self):
# Code here
return self.length * self.breadth
def get_perimeter(self):
#Code here
return 2 * (self.length + self. breadth)
# YOUR CODE HERE
def __str__(self):
return "Rectangle(length={}, breadth={})".format(self.length, self.breadth)
# Operator overloading dunder methods
def __eq__(self, other):
return self.get_area() == other.get_area()
def __gt__(self, other):
return self.get_area() > other.get_area()
def __lt__(self, other):
return self.get_area() < other.get_area()
def __ge__(self, other):
return self.get_area() >= other.get_area()
def __le__(self, other):
return self.get_area() <= other.get_area()
def __ne__(self, other):
return self.get_area() != other.get_area()
# + deletable=false editable=false nbgrader={"checksum": "b217767700e37dc55ac33a5cf80cc648", "grade": true, "grade_id": "cell-513c46a34a22e61e", "locked": true, "points": 1, "schema_version": 1, "solution": false}
rec1 = Rectangle(4,10)
assert(rec1.get_area() == 40)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "b6bef5008ce0faaf861cd32767575bd1", "grade": false, "grade_id": "cell-371a06bffe4d1a2d", "locked": true, "schema_version": 1, "solution": false}
# ### Magic Methods or Dunder methods
# [Python Special Methods](https://docs.python.org/3/reference/datamodel.html#special-method-names)
# + deletable=false nbgrader={"checksum": "1f3fd629f011bc95038d63788e7f5524", "grade": false, "grade_id": "cell-bcdd2ef93e91a97a", "locked": false, "schema_version": 1, "solution": true}
# Modify the Rectangle class to print object as "Rectangle(length=a, breadth=b)"
# where a and b are length and breadth
# YOUR CODE HERE
print(rec1)
# + deletable=false editable=false nbgrader={"checksum": "94057ed3472072479b463f6fe9505312", "grade": true, "grade_id": "cell-962a216ebf3b2ffd", "locked": true, "points": 1, "schema_version": 1, "solution": false}
# + deletable=false nbgrader={"checksum": "c428f8fdf4661af7301ff7d9f2f0a54d", "grade": false, "grade_id": "cell-8d1bcf7bdbbedf55", "locked": false, "schema_version": 1, "solution": true}
# Modify rectangle class to compare rectangle objects (comparing with areas)
# Operator overloading of rich comparisons = ,> ,< ,<= ,>= ,!= ,
# YOUR CODE HERE
rect1 = Rectangle(1, 3)
rect2 = Rectangle(1, 2)
print(rect1 == rect2)
print(rect1 > rect2)
print(rect1 < rect2)
print(rect1 >= rect2)
print(rect1 <= rect2)
print(rect1 != rect2)
# + deletable=false editable=false nbgrader={"checksum": "26b9383febd195ae1d1c86b62b85b1de", "grade": true, "grade_id": "cell-5f2f866622093343", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert(Rectangle(3,10)==Rectangle(3,10))
assert(Rectangle(2,10)<Rectangle(3,10))
assert(Rectangle(3,10)!=Rectangle(2,10))
assert(Rectangle(3,10)>=Rectangle(3,10))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "9542ed87a4b7d3715f58ffd0529ecefb", "grade": false, "grade_id": "cell-34a1737b2a51850b", "locked": true, "schema_version": 1, "solution": false}
# ### Inheritance
# + [markdown] deletable=false editable=false nbgrader={"checksum": "f91399da16138fa2599ee1d7d2046606", "grade": false, "grade_id": "cell-2ab070ec875982c5", "locked": true, "schema_version": 1, "solution": false}
# Derive a class **Square** from **Rectangle** with one properties "length" (initialized through constructor) and able to calculate area, perimeter and comparison
# + deletable=false nbgrader={"checksum": "7afa7613628113e11738ab427c04bbc7", "grade": false, "grade_id": "cell-92fe1c73a1a02948", "locked": false, "schema_version": 1, "solution": true}
class Square(Rectangle):
# YOUR CODE HERE
def __init__(self, length):
self.length = length
self.breadth = length
# + deletable=false editable=false nbgrader={"checksum": "79a977234acc06775cce58ee4290d639", "grade": true, "grade_id": "cell-28da33eb75674da3", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert(Square(9).get_area()==81)
assert(Square(10) != Square(5))
| Assignments/1.2 OOP-in-Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### OCI Data Science - Useful Tips
# <details>
# <summary><font size="2">Check for Public Internet Access</font></summary>
#
# ```python
# import requests
# response = requests.get("https://oracle.com")
# assert response.status_code==200, "Internet connection failed"
# ```
# </details>
# <details>
# <summary><font size="2">Helpful Documentation </font></summary>
# <ul><li><a href="https://docs.cloud.oracle.com/en-us/iaas/data-science/using/data-science.htm">Data Science Service Documentation</a></li>
# <li><a href="https://docs.cloud.oracle.com/iaas/tools/ads-sdk/latest/index.html">ADS documentation</a></li>
# </ul>
# </details>
# <details>
# <summary><font size="2">Typical Cell Imports and Settings for ADS</font></summary>
#
# ```python
# # %load_ext autoreload
# # %autoreload 2
# # %matplotlib inline
#
# import warnings
# warnings.filterwarnings('ignore')
#
# import logging
# logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)
#
# import ads
# from ads.dataset.factory import DatasetFactory
# from ads.automl.provider import OracleAutoMLProvider
# from ads.automl.driver import AutoML
# from ads.evaluations.evaluator import ADSEvaluator
# from ads.common.data import ADSData
# from ads.explanations.explainer import ADSExplainer
# from ads.explanations.mlx_global_explainer import MLXGlobalExplainer
# from ads.explanations.mlx_local_explainer import MLXLocalExplainer
# from ads.catalog.model import ModelCatalog
# from ads.common.model_artifact import ModelArtifact
# ```
# </details>
# <details>
# <summary><font size="2">Useful Environment Variables</font></summary>
#
# ```python
# import os
# print(os.environ["NB_SESSION_COMPARTMENT_OCID"])
# print(os.environ["PROJECT_OCID"])
# print(os.environ["USER_OCID"])
# print(os.environ["TENANCY_OCID"])
# print(os.environ["NB_REGION"])
# ```
# </details>
# +
###### test connection ######
import requests
response = requests.get("https://oracle.com")
assert response.status_code==200, "Internet connection failed"
# +
###### check environments ######
import os
print(os.environ["NB_SESSION_COMPARTMENT_OCID"])
print(os.environ["PROJECT_OCID"])
print(os.environ["USER_OCID"])
print(os.environ["TENANCY_OCID"])
print(os.environ["NB_REGION"])
# + language="bash"
# ###### install libs ######
# #pip install matplotlib
# #pip install pandas
# #pip install seaborn
# #pip list
# #pip install scikit-learn
# +
###### import libs ######
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
import gzip
import pickle
import logging
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import ads
from ads.dataset.factory import DatasetFactory
from ads.automl.provider import OracleAutoMLProvider
from ads.automl.driver import AutoML
from ads.evaluations.evaluator import ADSEvaluator
plt.rcParams['figure.figsize'] = [10, 7]
plt.rcParams['font.size'] = 15
sns.set(color_codes=True)
sns.set(font_scale=1.5)
sns.set_palette("bright")
sns.set_style("whitegrid")
# +
###### check ads env ######
ads.environment.ads_inspect.check_ads_env()
# +
####### load data #######
name = 'd_aea.csv'
id_col = "Id"
target_col = "ACTION"
train_name = './d_aea/train_{}'.format(name)
test_name = './d_aea/test_{}'.format(name)
df = pd.read_csv(train_name)
test_df = pd.read_csv(test_name)
print(df.head())
df.shape
#df = df.drop(['id'], axis=1)
test_df.head()
#test_df = test_df.drop(['id'], axis=1)
#df.to_csv(train_name,index=False)
#test_df.to_csv(test_name, index=False)
#print(test_df.columns)
#print(df.columns)
# -
### dataset statistics ######
sns.countplot(x=target_col, data=df)
plt.title("train dataset")
print(df[target_col].unique())
# +
###### train model ######
train = DatasetFactory.open(df).set_target(target_col)
print([train])
# create an automl job
ml_engine = OracleAutoMLProvider(n_jobs=-1, loglevel=logging.ERROR)
oracle_automl = AutoML(training_data = train, provider=ml_engine)
# time budget is in seconds
best_model, baseline = oracle_automl.train(score_metric='roc_auc', time_budget=600)
# +
###### predict test dataset ######
print(test_df.shape)
y_pred = best_model.predict(test_df)
y_true = test_df[target_col]
print(y_true.shape)
test_acc = np.mean(y_pred==y_true)
print("test accuracy = {}".format(test_acc))
import sklearn
from sklearn import metrics
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred, pos_label=1)
test_auc = metrics.auc(fpr, tpr)
print("test auc = {}".format(test_auc))
# -
oracle_automl.print_trials(max_rows=129, sort_column='Mean Validation Score')
oracle_automl.visualize_algorithm_selection_trials()
# +
###### load kaggle test dataset ######
kaggle_test_name = './d_aea/kaggle-test_{}'.format(name)
kaggle_test_df = pd.read_csv(kaggle_test_name)
kaggle_test_id = np.array(kaggle_test_df[id_col])
kaggle_test_df = kaggle_test_df.drop([id_col], axis=1)
print(kaggle_test_df.head())
kaggle_test_df.shape
# +
###### predict kaggle test dataset ######
kaggle_y_pred = best_model.predict(kaggle_test_df)
assert kaggle_y_pred.shape == kaggle_test_id.shape
print(kaggle_y_pred.shape)
print(kaggle_y_pred[:100])
# +
###### save kaggle prediction ######
kaggle_pred_name = './d_aea/kaggle-test_{}_predictions.csv'.format(name)
kaggle_pred_df = pd.DataFrame({id_col: kaggle_test_id, target_col: kaggle_y_pred})
print(kaggle_pred_df.head())
kaggle_pred_df.to_csv(kaggle_pred_name, index=False)
# -
| code/oracle_datascience/train_d_aea.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# import all required module
import pandas as pd
import seaborn as sns
import numpy as np
import os
import matplotlib.pyplot as plt
from pandas_profiling import ProfileReport
# import loading of data method
from scripts import project_functions
# Load and proces data
df = project_functions.load_and_process("https://health-infobase.canada.ca/src/doc/SubstanceHarmsData.csv")
display(df.head())
# explore for numerical columns:
df.describe(include='all').T
display(df.head(10))
# convert "Value" column into integers and assign it to a new column
df['Value_int'] = df['Value'].astype(int)
# print new bank lines
print("\n\n")
# ****************** Graph 1 ******************
#Here we want to see how the total deaths differ from province to province.. using death count as a measure
df1 = df.groupby(["Region","Year_Quarter"]).filter(lambda x: (x["Year_Quarter"] == '2020').any())
df1 = df1.reset_index()
print(df1.head())
sns.set_theme(style="whitegrid", font_scale=1.5 )
sns.barplot(data = df1,x = 'Region', y = 'Value_int', color = 'red')
plt.xticks(fontsize = 15, rotation=45, ha='right')
plt.yticks(fontsize = 15)
plt.xlabel('Regions', size=12)
plt.ylabel('Total apparent Opioid toxicity deaths', size=12)
plt.title('Total Opioid related deaths for 2020 in Canada', size=20, pad=40)
plt.show()
# print new bank lines
print("\n\n")
# ****************** Graph 2 ******************
#Here we want to explore how the opioid crisis has progressed in canada. Again use death count as a measure.
df2 = df.groupby('Year_Quarter').Value_int.sum()
df2 = df2.reset_index()
print(df2.head())
sns.set_theme(style="darkgrid", font_scale=1.5 )
sns.lineplot(data = df2,x = 'Year_Quarter', y = 'Value_int', color = 'blue')
plt.xticks(fontsize = 15, rotation=45, ha='right')
plt.yticks(fontsize = 15)
plt.xlabel('Year', size=12)
plt.ylabel('Opioid related death count', size=12)
plt.title('Total Opioid related deaths in Canada : 2016 - 2020', size=20, pad=40)
plt.show()
# print new bank lines
print("\n\n")
# ****************** Graph 3 ******************
#Curious to see if the same trend seen above..for all of canada.. will be the same province to province. yes.. we can see its the same trend followed
df3 = df.groupby(["Region","Year_Quarter"]).Value_int.sum()
df3 = df3.reset_index()
print(df3)
sns.lineplot(data=df3, x="Year_Quarter", y="Value_int", hue="Region")
#sns.set(rc={"figure.figsize":(30, 15)})
plt.xticks(fontsize = 15, rotation=45, ha='right')
plt.yticks(fontsize = 15)
plt.xlabel('Year', size=12)
plt.ylabel('Opioid related death count', size=12)
plt.title('Total Opioid related deaths by Province : 2016 - 2020', size=20, pad=40)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# -
# Initial observations from visualizations above:
#
# <li>There are definitely some regions vastly more affected by the opioid crisis compared to others .. looks like Ontario, BC, Alberta are affected disproportionately
# <li>Total opioid crisis has definitely been getting worse... possible factors that could be contributing???
# <li>Similar to the nationwide picture, all provinces are also seeing an upward trend in death count confiriming it
| analysis/Shalini/EDA_Shalini.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Classification
# *from Python Machine Learning by <NAME> under the MIT License (MIT)*
#
# This code might be directly from the book, mine, or a mix.
# ## Perceptron learning algorithm
import numpy as np
class Perceptron(object):
""" Perceptron classifier
eta: learning rate (0.0-1)
n_iter: passes over the training dataset """
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.shuffle = shuffle
if random_state:
np.random.seed(random_state)
def fit(self, X, y):
""" Fit training data
X: [n_samples, n_features] -- training vectors
y: [n_samples] -- target values
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
""" Calculate net input """
return np.dot(X, self.w_[1:] + self.w_[0])
def predict(self, X):
""" Return class label after unit step """
return np.where(self.net_input(X) >= 0.0, 1, -1)
def _shuffle(self, X, y):
""" Shuffle training data """
r = np.random.permutation(len(y))
return X[r], y[r]
# ### Import Iris dataset and print the last records
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/iris/iris.data', header=None)
df.tail()
# ### Drawing setosa and versicolor sepal and petal lenght
# +
import matplotlib.pyplot as plt
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0,2]].values
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('sepal lenght')
plt.ylabel('petal lenght')
plt.legend(loc='upper left')
plt.show()
# -
# ### Training the model
ppn = Perceptron(eta=0.1, n_iter=10, shuffle=False)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of updates')
plt.show()
# ### Plot decision regions
# +
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# -
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.legend(loc='upper left')
plt.show()
# ## Adaline (adaptative linear neuron)
class AdalineGD(object):
"""ADAptive LInear NEuron classifier.
eta: learning rate (0.0-1)
n_iter: passes over the training dataset """
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
""" Fit training data
X: [n_samples, n_features] -- training vectors
y: [n_samples] -- target values
"""
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
""" Calculate net input """
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
""" Compute linear activation """
return self.net_input(X)
def predict(self, X):
""" Return class label after unit step """
return np.where(self.activation(X) >= 0.0, 1, -1)
# +
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)
ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squared-error)')
ax[0].set_title('Adaline - Learning rate 0.01')
ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)
ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Sum-squared-error')
ax[1].set_title('Adaline - Learning rate 0.0001')
plt.show()
# -
# #### Standarize features
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
# #### Draw adaline decision regions and convergence
# +
ada = AdalineGD(n_iter=15, eta=0.01)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.show()
# -
# ## Large scale machine learning and stochastic gradient descent
class AdalineSGD(object):
"""ADAptive LInear NEuron classifier.
eta: learning rate (0.0-1)
n_iter: passes over the training dataset """
def __init__(self, eta=0.01, n_iter=50, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.shuffle = shuffle
if random_state:
np.random.seed(random_state)
def fit(self, X, y):
""" Fit training data
X: [n_samples, n_features] -- training vectors
y: [n_samples] -- target values
"""
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
return self
def partial_fit(self, X, y):
""" Fit training data without reinitializing the weights """
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def net_input(self, X):
""" Calculate net input """
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
""" Compute linear activation """
return self.net_input(X)
def predict(self, X):
""" Return class label after unit step """
return np.where(self.activation(X) >= 0.0, 1, -1)
def _shuffle(self, X, y):
""" Shuffle training data """
r = np.random.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
""" Initialize weights to zeros """
self.w_ = np.zeros(1 + m)
self.w_initialized = True
def _update_weights(self, xi, target):
""" Apply Adaline learning rule to update the weights """
output = self.net_input(xi)
error = (target - output)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error**2
return cost
# +
ada = AdalineSGD(n_iter=15, eta=0.01, random_state=1)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Stochastic Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Average Cost')
plt.show()
# -
# # Experiments with other databases
# ### Balance Scale Data Set
#
# The attributes are the left weight, the left distance, the right weight, and the right distance. The correct way to find the class is the greater of (left-distance \* left-weight) and (right-distance \* right-weight).
dboston = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/balance-scale/balance-scale.data', header=None)
dboston.tail()
# +
y = dboston.iloc[:, 0].values
y = np.where(y == 'R', -1, 1)
L = dboston.iloc[:, [1,2]].values.prod(axis=1)
R = dboston.iloc[:, [3,4]].values.prod(axis=1)
X = np.column_stack((L, R))
left = X[X[:,0] <= X[:,1]]
right = X[X[:,0] > X[:,1]]
plt.scatter(left[:, 0], left[:, 1], color='red', marker='o', label='left')
plt.scatter(right[:, 0], right[:, 1], color='blue', marker='x', label='right')
plt.legend(loc='upper left')
plt.show()
# +
ppn = Perceptron(eta=0.1, n_iter=5, shuffle=False)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of updates')
plt.show()
# -
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('right')
plt.ylabel('left')
plt.legend(loc='upper left')
plt.show()
| Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import random as random
from collections import Counter
from math import factorial
import matplotlib.pyplot as plt
# %matplotlib inline
def coin():
toss = []
for i in range(0,5): #5 coins are tossed
s = random.randint(0,1) #0 for head and 1 for tail
toss.append(s)
h = Counter(toss)
d = dict(h)
d1=pd.DataFrame(d.items(),columns=['Head','Frequency'])
d1.sort_values('Head', axis=0, ascending=True)
nd=d1.Frequency[0]
return nd
N=100
data=[]
for i in range(N):
dd=coin()
data.append(dd)
cc= Counter(data)
H= dict(cc)
DF=pd.DataFrame(H.items(),columns=['Head','Frequency'])
DF
DF.sort_values('Head',axis=0,ascending=True,inplace=True)
DF
plt.bar(DF['Head'],DF['Frequency'],color='magenta',edgecolor='black')
plt.plot(DF['Head'],DF['Frequency'],color='blue',linewidth=2)
plt.xlabel('No.of Heads',fontsize=12)
plt.ylabel('Frequency',fontsize=12)
plt.title('Tossing of coins',fontsize=16)
plt.show()
# +
DF['Cum.Freq.'] = DF['Frequency'].cumsum()
DF
# +
plt.plot(DF['Head'], DF['Cum.Freq.'], c='red',label='Less than')
plt.plot(DF['Head'], N-DF['Cum.Freq.'], c='green',label='Greater than')
plt.xlabel('No. of Heads', fontsize=12)
plt.ylabel('Cum.freq.',fontsize=12)
plt.legend()
plt.title('Ogive Curve', fontsize = 16 ,color='magenta')
plt.tight_layout()
plt.show()
# -
rdata=sorted(data)
mean=np.mean(rdata)
sd=np.sqrt(np.var(rdata))
print("Mean :",mean)
print("standard deviation :",sd)
sk=pd.DataFrame(rdata).skew()[0]
print("skewness :",sk)
ku=pd.DataFrame(rdata).kurtosis()[0]
print("kurtosis :",ku)
if(sk>0):
print('Distribution is positively tail')
elif(sk<0):
print('Distribution is negatively tail')
elif(sk1==0):
print('Distribution is symmetric ')
if(ku>0):
print('Distribution is Leptokurtic')
elif(ku<0):
print('Distribution is Platykurtic')
elif(ku==0):
print('Distribution is Mesokurtic')
Q=np.percentile(rdata, [25, 50, 75]) # percentile
print('first quartiles is ',Q[0])
print('Median is ',Q[1])
print('third quartiles is ',Q[2])
# Binomial distribution
# $prob(x) = \frac{n!}{n! (n-x)!}{ p^x q^{(n-x)}}$
def binomial(x, n, p):
return float(factorial(n)/(factorial(x) * factorial(n - x))) * p ** x * (1 - p)**(n - x)
n=max(DF['Head'])+1
ef=[] #expected frequency
for x in range(min(DF['Head']), n):
y =binomial(x, n, 0.5)
ef.append(round(N*y))
DF['Exp.Freq.']=ef
DF
plt.plot(DF['Head'], ef,'r-',label='Exp.freq.')
plt.plot(DF['Head'], DF['Frequency'],'g-.', label='Obs.freq.')
plt.xlabel("No. of head", fontsize=12)
plt.ylabel("Freqyency", fontsize=12)
plt.legend()
plt.title('Binomial Distribution', fontsize = 16 ,color='blue')
plt.tight_layout()
plt.show()
chi2=sum((ef-DF['Frequency'])**2/ef) #DF['Frequency']=obs.frequency
print('chi square is ',chi2)
| coin toss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 2D harmonisc oscillator `vs` 2 1D harmonic oscillators
import phlab
from matplotlib import pyplot as plt
ws=phlab.rixs()
# Here we will create few models and compare results
model1 = ws.model_single_osc(name = 'first mode')
model2 = ws.model_single_osc(name = 'second mode')
model3 = ws.model_double_osc( name= '2d')
# - Key input parameters:
# Model 1 (first mode):
model1.input['coupling'] = 0.09
model1.input['omega_ph'] = 0.03
model1.input['gamma_ph'] = 0.001
# Model 2 (second mode):
model2.input['coupling'] = 0.1
model2.input['omega_ph'] = 0.08
model2.input['gamma_ph'] = 0.001
# Model 3 (2D model):
model3.input['coupling0'] = 0.09
model3.input['omega_ph0'] = 0.03
model3.input['coupling1'] = 0.1
model3.input['omega_ph1'] = 0.08
model3.input['nm'] = 15
model3.input['gamma'] = 0.105
model3.input['gamma_ph'] = 0.001
model3.color = 'r'
# Run all three models :
for model in [model1,model2,model3]:
model.run()
# +
plt.figure(figsize = (10,5))
vitem=ws.visual(model_list=[model3],exp=[])
plt.plot(model1.x, (model1.y)/max(model1.y+model2.y),
color = 'skyblue',
linewidth = 2,
label = 'model1',alpha = 1)
plt.plot(model2.x, (model2.y)/max(model1.y+model2.y),
color = 'lightpink',
linewidth = 2,
label = 'model2',alpha = 1)
plt.plot(model1.x, (model1.y+model2.y)/max(model1.y+model2.y),
color = 'b',
linewidth = 2,
label = 'model1 + model2')
plt.xlim([-0.1,0.6])
vitem.show(scale = 0)
# -
| examples/03_example/.ipynb_checkpoints/03_example_notebook-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# @author <NAME>
# This example demonstrate how to use tinn library to solve a traditional xor problem
# +
#Importing the required modules
from tinn.neural import NeuralNet
from tinn.layer import Layer
import numpy as np
# +
#Creating a 2 layer neural network with 1 hidden layer with 3 neurons and 1 output layer
model=NeuralNet()
# Hidden layer with 3 neurons and sigmoid activation function
model.add(Layer(3,'sigmoid'))
# Output layer with 1 neuron and sigmoid activation function
model.add(Layer(1,'sigmoid'))
# +
# Defining the training data
# # +----------------+-----+
# | || | |
# | A || B | X |
# # +----------------------+
# | || | |
# | 0 || 0 | 0 |
# | || | |
# | 0 || 1 | 1 |
# | || | |
# | 1 || 0 | 1 |
# | || | |
# | 1 || 1 | 0 |
# # +----------------+-----+
#
inputs=np.asarray([[0,0],[0,1],[1,0],[1,1]])
outputs=np.asarray([[0],[1],[1],[0]])
# -
inputs.shape
outputs.shape
# Reshape the inputs and outputs to work with tinn
inputs=inputs.reshape(4,2,1)
outputs=outputs.reshape(4,1,1)
model.train(inputs,outputs,learning_rate=0.01,epocs=10,suffle=True)
# Output for (0,0)-> 0.0581
model.predict(inputs[0])
#Output for (0,1)-> 0.9303
model.predict(inputs[1])
#Output for (1,0)->0.9439
model.predict(inputs[2])
#Output for (1,0)->0.0558336
model.predict(inputs[3])
| examples/Xor Problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Problem Statement:
#
# Prepare a classification model using SVM for salary data
#
# Data Description:
#
# age -- age of a person
# workclass -- A work class is a grouping of work
# education -- Education of an individuals
# maritalstatus -- Marital status of an individulas
# occupation -- occupation of an individuals
# relationship --
# race -- Race of an Individual
# sex -- Gender of an Individual
# capitalgain -- profit received from the sale of an investment
# capitalloss -- A decrease in the value of a capital asset
# hoursperweek -- number of hours work per week
# native -- Native of an individual
# Salary -- salary of an individual
#
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn import metrics
import warnings
warnings.filterwarnings("ignore")
from sklearn.preprocessing import LabelEncoder
# +
# Importing the train dataset
salary_train = pd.read_csv("G:/data sceince/Assignments/SVM/SalaryData_Train(1).csv")
salary_train.head()
# +
# Importing the test dataset
salary_test = pd.read_csv("G:/data sceince/Assignments/SVM/SalaryData_Test(1).csv")
salary_test.head()
# -
salary_train.info()
salary_test.info()
salary_train.shape
salary_train.tail()
# +
#Converting the Y variable into labels
label_encoder = LabelEncoder()
salary_test['Salary'] = label_encoder.fit_transform(salary_test.Salary)
salary_train['Salary'] = label_encoder.fit_transform(salary_train.Salary)
# +
# converting the categorical columns into dummy variables
salary_train = pd.get_dummies(salary_train)
salary_test = pd.get_dummies(salary_test)
# -
salary_train.dtypes
# +
# assigning the training data to x_train and y_train and test data to x_test and y_test
x_train = salary_train.drop('Salary', axis = 1)
y_train = salary_train['Salary']
x_test = salary_test.drop('Salary', axis = 1)
y_test = salary_test['Salary']
# -
svm = SVC(kernel = 'linear', gamma = 0.1, C = 1)
svm.fit(x_train,y_train)
preds = svm.predict(x_test)
preds
accuracy = metrics.accuracy_score(preds,y_test)
accuracy
# Inference : Accuracy of our model is 79.64%
| Assign_SVM_Salary_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Using Markov transition fields and network graphs to uncover time series behavior
# Markov transition fields (MTF) is a visualization technique to highlight behavior of time series. This notebook dives into how we build and interpret these fields. We will then further build on top of MTF by exploring network graphs interpretation.
#
# ## Initialization
# ---
# ### Notebook instance update
# This is a time series imaging analysis module:
# !pip install --upgrade tsia
# Restart notebook kernel:
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
# ### Imports
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sys
from matplotlib import gridspec
from numba import njit, prange
from pyts.image import MarkovTransitionField
import tsia.plot
import tsia.markov
import tsia.network_graph
# -
# %matplotlib inline
plt.style.use('Solarize_Light2')
# ## Loading data of interest
# ---
# ### Load tag time series
# +
DATA = 'data'
tag_df = pd.read_csv(os.path.join(DATA, 'signal-1.csv'))
tag_df['timestamp'] = pd.to_datetime(tag_df['timestamp'], format='%Y-%m-%dT%H:%M:%S.%f')
tag_df = tag_df.set_index('timestamp')
fig = plt.figure(figsize=(28,4))
plt.plot(tag_df, linewidth=0.5)
plt.show()
# -
# ## Markov Transition Fields (MTF)
# ---
# ### MTF overview
# The `pyts` package includes some time series imaging capabilities out of the box, like the **Markov Transition Fields**:
# +
n_bins = 8
strategy = 'quantile'
X = tag_df.values.reshape(1, -1)
n_samples, n_timestamps = X.shape
mtf = MarkovTransitionField(image_size=48, n_bins=n_bins, strategy=strategy)
tag_mtf = mtf.fit_transform(X)
# -
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(111)
_, mappable_image = tsia.plot.plot_markov_transition_field(mtf=tag_mtf[0], ax=ax, reversed_cmap=True)
plt.colorbar(mappable_image);
# ### Process decomposition
# In the following sections, we are going to decompose the way a Markov transition fields is built to better understand its properties and how it can be used to build our understanding of time series behavior:
#
# 1. Discretize the timeseries along the different values it can take
# 2. Build the Markov transition **matrix**
# 3. Compute transition probabilities
# 4. Compute the Markov transition **field**
# 5. Compute an aggregated MTF
# 6. Extracting metrics
# 7. Mapping transition probabilities back to the initial signals
# #### **1 -** Discretization
X_binned, bin_edges = tsia.markov.discretize(tag_df)
# Each value `x[i]` of the timeseries is associated to one of the bin (quantile) we just computed:
X_binned
# The first and the last bin edges respectively correspond to the lowest and highest value taken by the signal. The intermediate bin edges are computed by the discretizer. This quantization process bins the different values taken by this signal in the following bins:
bin_edges
# Let's plot these bins over the time series to visualize how the values are discretized:
tsia.plot.plot_timeseries_quantiles(tag_df, bin_edges, label='signal-1')
plt.legend();
# #### **2 -** Build the Markov transition matrix
X_mtm = tsia.markov.markov_transition_matrix(X_binned)
X_mtm
# **How to read the Markov transition matrix?** Let's have a look at the first row: **`| 465 | 86 | 1 | 0 | 0 | 0 | 0 | 0 |`**.
#
# * The first cell tells us that 465 points that are in the bin 0 (with values between 82.43 and 118.42) are followed by a value that stays in this very bin.
# * The second cell tells us that 86 points that are in bin 0 transition to the next bin (with a value between 118.42 and 137.42)
# * ...
# * The eighth cell tells us that no point from bin 0 transition to the last bin
#
# Let's now have a look at the second row: **`| 80 | 405 | 63 | 2 | 0 | 2 | 0 | 0 |`**. The first cell tells us that 80 points that are in bin 1 (with a value between 118.42 and 137.42) are followed by a value that is in bin 0 (between 82.43 and 118.42)
#
# The diagonal captures the frequency at which a quantile transitions to itself (*self-transition frequency*).
# #### **3 -** Compute transition probabilities
# **Normalization**: we will now normalize each bin. This matrix contains now the transition probability on the magnitude axis.
X_mtm = tsia.markov.markov_transition_probabilities(X_mtm)
np.round(X_mtm * 100, 1)
# The first column correspond to the probabilities that a given value (part of a given bin) transitions to bin 0. The second column are probabilities that a given value transitions to bin 1, etc. For instance, if a given value `X[i]` is equal to **130.0**, it is between **118.42 and 137.42** and it's binned in bin 1. Given the matrix above, there is:
# * A **14.5%** chance that the next value will be between **82.43 and 118.42** (bin 0)
# * A **73.4%** chance to stay within the same range (between **118.42 and 137.42**, bin 1)
# * A **11.4%** chance that the next value will be between **137.42 and 156.78** (bin 2)
# * Etc.
# #### **4 -** Compute the Markov transition **field**
# The idea of the transition field is to represent the Markov transition probabilities sequentially to preserve the information in the time domain. The MTF generation process aligns each probability along the temporal order to build the MTF for the whole signal:
# +
@njit()
def _markov_transition_field(X_binned, X_mtm, n_timestamps, n_bins):
X_mtf = np.zeros((n_timestamps, n_timestamps))
# We loop through each timestamp twice to build a N x N matrix:
for i in prange(n_timestamps):
for j in prange(n_timestamps):
# We align each probability along the temporal order: MTF(i,j)
# denotes the transition probability of the bin 'i' to the bin
# 'j':
X_mtf[i, j] = X_mtm[X_binned[i], X_binned[j]]
return X_mtf
X_mtf = _markov_transition_field(X_binned, X_mtm, n_timestamps, n_bins)
np.round(X_mtf * 100, 1)
# -
# **How to read the Markov transition field?** **`MTF[i,j]`** denotes the transition probability from the quantile **`q[i]`** to the quantile **`q[j]`**. Let's have a look at a couple values in the second row of this field:
second_row = np.round(X_mtf * 100, 1)[1]
second_row[0:10]
# Let's have a look at **`M[1,2]`**:
#
# * We look in which bin falls the signal at timestep **`x[1]`**: this is **`X_binned[1] = 2`**
# * We also look in which bin falls the signal at timestep **`x[2]`**: this is **`X_binned[2] = 2`**
# * We then look up into the Markov transition matrix to get the probability that a value from bin 2 stays into bin 2
# * This value is **68.8%**, hence **`M[1,2] = 68.8%`**
#
# What does this mean? Well, the transition that occurs between timestep `x[1]` and timestamp `x[2]` has an **68.8%** chance from happening when looking at the whole signal.
#
# Let's now have a look at **`M[1,6]`**:
#
# * We look in which bin falls the signal at timestep **`x[1]`**: this is **`X_binned[1] = 2`**
# * We also look in which bin falls the signal at timestep **`x[10]`**: this is **`X_binned[6] = 1`**
# * We then look up into the Markov transition matrix to get the probability that a value from bin 2 transitions into bin 1
# * This value is **10.7%**, hence **`M[1,6] = 10.7%`**
#
# The transition that happens between timestep `x[1]` and `x[6]` has a **10.7%** chance of happening when looking at the whole signal.
fig = plt.figure(figsize=(15,12))
ax = fig.add_subplot(1,1,1)
_, mappable_image = tsia.plot.plot_markov_transition_field(mtf=X_mtf, ax=ax, reversed_cmap=True)
plt.colorbar(mappable_image);
# #### **5 -** Compute an aggregated MTF
# To make the image size manageable and computation more efficient *(the above MTF is a matrix of dimension 4116 x 4116)*, we reduce the MTF size by averaging the pixels in each non-overlapping `m x m` patch with the blurring kernel `1/m²`. `m` is the image size and we set it, arbitrarily, to 48 above. That is, we aggregate the transition probabilities in each subsequence of length `m = 48` together. Let's compute an aggregated MTF accordingly:
# +
image_size = 48
window_size, remainder = divmod(n_timestamps, image_size)
if remainder == 0:
X_amtf = np.reshape(
X_mtf, (image_size, window_size, image_size, window_size)
).mean(axis=(1, 3))
else:
# Needs to compute piecewise aggregate approximation in this case. This
# is fully implemented in the pyts package
pass
# -
# We can now plot the aggregated MTF and find the initial results obtained with the **`pyts.image`** Python module:
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(1,1,1)
_, mappable_image = tsia.plot.plot_markov_transition_field(mtf=X_amtf, ax=ax, reversed_cmap=True)
plt.colorbar(mappable_image);
# #### **6 -** Extracting metrics
# The diagonal of the MTF contains probabilities for the self-transitions:
# * Self-transition probabilities are probabilities to move from a quantile to the same one on the next timestep).
# * We can extract the characteristic of this distribution (mean and standard deviation).
# * The other diagonals of the MTF are harder to interpret but can still be plotted.
_ = tsia.plot.plot_mtf_metrics(X_amtf)
# #### **7 -** Mapping transition probabilities back to the initial signals
# One way we can map back these probabilities on the original signal, is to take the transition probabilities shown on the diagonal of the MTF:
mtf_map = tsia.markov.get_mtf_map(tag_df, X_amtf, reversed_cmap=True)
_ = tsia.plot.plot_colored_timeseries(tag_df, mtf_map)
def plot_colored_timeseries(tag, image_size=96, colormap='jet'):
# Loads the signal from disk:
tag_df = pd.read_csv(os.path.join(DATA, f'{tag}.csv'))
tag_df['timestamp'] = pd.to_datetime(tag_df['timestamp'], format='%Y-%m-%dT%H:%M:%S.%f')
tag_df = tag_df.set_index('timestamp')
# Build the MTF for this signal:
X = tag_df.values.reshape(1, -1)
mtf = MarkovTransitionField(image_size=image_size, n_bins=n_bins, strategy=strategy)
tag_mtf = mtf.fit_transform(X)
# Initializing figure:
fig = plt.figure(figsize=(28, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[1,4])
# Plotting MTF:
ax = fig.add_subplot(gs[0])
ax.set_title('Markov transition field')
_, mappable_image = tsia.plot.plot_markov_transition_field(mtf=tag_mtf[0], ax=ax, reversed_cmap=True)
plt.colorbar(mappable_image)
# Plotting signal:
ax = fig.add_subplot(gs[1])
ax.set_title(f'Signal timeseries for tag {tag}')
mtf_map = tsia.markov.get_mtf_map(tag_df, tag_mtf[0], reversed_cmap=True, step_size=0)
_ = tsia.plot.plot_colored_timeseries(tag_df, mtf_map, ax=ax)
return tag_mtf
# Let's have a look at a coarse MTF (`image_size=8` makes it quite coarse) to better understand what we can extract from this Markov field:
stats = []
mtf = plot_colored_timeseries('signal-1', image_size=8)
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-1'})
stats.append(s)
# **What is the interpretation we can give to this plot?**
# * In average, the average transition probabiliy of the first section (in yellow) is around 19%: this means that the transitions that we see in this section do not happen very often (19% of the time) when we look at the whole signal.
# * In contrast, the section in dark blue (the sixth one), have transitions that happen more often in this signal (around 50% of the time)
#
# **The blue section is closer to the normal behavior of this signal whereas the yellow section is diverging more from it.** Let's have a look at the results we can get with a less coarse MTF and also from other signals. Let's also plot the MTF metrics for each of these signals...
mtf = plot_colored_timeseries('signal-2', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-2'})
stats.append(s)
mtf = plot_colored_timeseries('signal-3', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-3'})
stats.append(s)
mtf = plot_colored_timeseries('signal-4', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-4'})
stats.append(s)
mtf = plot_colored_timeseries('signal-5', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-5'})
stats.append(s)
mtf = plot_colored_timeseries('signal-6', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-6'})
stats.append(s)
mtf = plot_colored_timeseries('signal-7', image_size=48)
_ = tsia.plot.plot_mtf_metrics(mtf[0])
s = tsia.markov.compute_mtf_statistics(mtf[0])
s.update({'Signal': 'signal-7'})
stats.append(s)
stats = pd.DataFrame(stats)
stats.set_index('Signal')
# ## Network graphs
# ---
# ### Overview
# From the MTF, we can generate a graph `G = (V, E)`: we have a direct mapping between vertex *V* and the time index *i*. From there, there is two possible encoding of interest:
# * **Flow encoding:** this representation help us observe where do the big information flow occurs
# * We map the flow of time to the vertex, using a color gradient from T0 to TN to color each node of the network graph
# * We use the MTF weight to color the edges between vertices
# * **Modularity encoding:** modularity is an important pattern in network analysis to identify specific local structures.
# * We map the module label (which *community ID*) to each vertex with a specific color attached to each community
# * We map the size of the vertices to a clustering coefficient
# * We map the edge color to the module label of the target vertex
#
# Let's start by loading a first signal and gets its MTF:
# +
tag_df = pd.read_csv(os.path.join(DATA, 'signal-1.csv'))
tag_df['timestamp'] = pd.to_datetime(tag_df['timestamp'], format='%Y-%m-%dT%H:%M:%S.%f')
tag_df = tag_df.set_index('timestamp')
image_size = 48
X = tag_df.values.reshape(1, -1)
mtf = MarkovTransitionField(image_size=image_size, n_bins=n_bins, strategy=strategy)
tag_mtf = mtf.fit_transform(X)
# -
# ### Process decomposition
# In the following sections, we are going to decompose the way a network graph is built to better understand its properties and how it can be used to build our understanding of time series behavior:
#
# 1. Build the network graph
# 2. Compute the partitions and modularity and encode this information in the network graph representation
# 3. Plot the network graph
# 4. Map the partitions color back to the time series
# #### **1 -** Build the network graph
# We extract the graph from the MTF and plot a vanilla network graph from there:
G = tsia.network_graph.get_network_graph(tag_mtf[0])
_ = tsia.plot.plot_network_graph(G, title='Network graph')
# #### **2 -** Encode partitions and modularity into the network graph representation
# The above network graph does not give us a lot of new insights. We will search for the communities in this network graph with the **[Louvain method](https://en.wikipedia.org/wiki/Louvain_method)**: **modularity** is a scale value between −0.5 (non-modular clustering) and 1 (fully modular clustering) that measures the relative density of edges inside communities with respect to edges outside communities. Optimizing this value theoretically results in the best possible grouping of the nodes of a given network, however going through all possible iterations of the nodes into groups is impractical so heuristic algorithms are used. In the Louvain Method of community detection, first small communities are found by optimizing modularity locally on all nodes, then each small community is grouped into one node and the first step is repeated. This method is implemented in the `community` package (documented **[here](https://python-louvain.readthedocs.io/en/latest/)**) and we use it in the `tsia` package to encode our network graph:
encoding = tsia.network_graph.get_modularity_encoding(G)
# On top of modularity and number of communities (or partitions), there are other statistics of interest we can compute from a network graph:
stats = tsia.network_graph.compute_network_graph_statistics(G)
stats
# #### **3 -** Plot the network graph
nb_partitions = stats['Partitions']
modularity = stats['Modularity']
title = rf'Partitions: $\bf{nb_partitions}$ - Modularity: $\bf{modularity:.3f}$'
_ = tsia.plot.plot_network_graph(G, title=title, encoding=encoding)
# **What is the interpretation we can give to this plot?** The modularity metric and the Louvain methods uncovers 4 communities in this signal. At this stage, it is difficult to understand how we can leverage this knowledge though...
# #### **5 -** Map the partitions color back to the time series
ng_map = tsia.network_graph.get_network_graph_map(tag_df, encoding, reversed_cmap=True)
_ = tsia.plot.plot_colored_timeseries(tag_df, ng_map)
# Each community detected in the network graphs has its own shapelet in the original time series. Let's now further build our intuition by drawing the same plot for the other signals from before:
def plot_communities_timeseries(tag, image_size=48, colormap='jet'):
# Loads the signal from disk:
tag_df = pd.read_csv(os.path.join(DATA, f'{tag}.csv'))
tag_df['timestamp'] = pd.to_datetime(tag_df['timestamp'], format='%Y-%m-%dT%H:%M:%S.%f')
tag_df = tag_df.set_index('timestamp')
X = tag_df.values.reshape(1, -1)
mtf = MarkovTransitionField(image_size=image_size, n_bins=n_bins, strategy=strategy)
tag_mtf = mtf.fit_transform(X)
G = tsia.network_graph.get_network_graph(tag_mtf[0])
statistics = tsia.network_graph.compute_network_graph_statistics(G)
nb_partitions = statistics['Partitions']
modularity = statistics['Modularity']
encoding = tsia.network_graph.get_modularity_encoding(G, reversed_cmap=True)
ng_map = tsia.network_graph.get_network_graph_map(tag_df, encoding, reversed_cmap=True)
fig = plt.figure(figsize=(28, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[1,4])
ax = fig.add_subplot(gs[0])
title = rf'Partitions: $\bf{nb_partitions}$ - Modularity: $\bf{modularity:.3f}$'
tsia.plot.plot_network_graph(G, ax=ax, title=title, reversed_cmap=True, encoding=encoding)
ax = fig.add_subplot(gs[1])
tsia.plot.plot_colored_timeseries(tag_df, ng_map, ax=ax)
return statistics
# +
signals = [f'signal-{i}' for i in range(1,7)]
stats = []
for signal in signals:
s = plot_communities_timeseries(signal)
s.update({'Signal': signal})
stats.append(s)
# -
# Features that can be extracted from network graphs:
stats = pd.DataFrame(stats)
stats.set_index('Signal')
# ## Conclusion and future work
# ---
# Mapping **Markov transition fields** transition probabilities back onto the time series signal appears like a great tool to provide feedback to end users about the behavior of a given signal. On the other hand, mapping network graphs communities back to original time series does not seem to bring much values: however, the features extracted from these networks graphs (see table above) will be interesting to use to automatically qualify the usability of the signals for a given use case. As one of the future area of investigation, we are interested into investigating how these tools could be applied to multivariate timeseries (especially highly dimensional ones as we can find in industrial settings with hundreds if not thousands of sensors per industrial equipment).
#
# ### Potential usage
# Markov transition fields and network graphs could be potentially used:
# * To compare training and validation dataset consistency (and detect concept drift).
# * To train semi-supervised anomaly prediction models: train a temporal CNN on normal signals, perform and inline MTF calculation every minute (for instance) and compare inference results with the trained model to detect signal behavior change.
# * To further explain why an unsupervised time series model found something of interest.
#
# ## References
# ---
# * <NAME>; <NAME> (2015), *Imaging Time-Series to Improve Classification and Imputation*, Arxiv
# * <NAME>.; <NAME> (2018) *Encoding temporal Markov dynamics in graph for visualizing and mining time series* Arxiv.
| mtf-deep-dive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="lo88NNs-CBf9"
# # DataApplication Lab baseline
# ## 比赛: M5 时序预测
#
#
# 本 NoteBook 是 Kaggle M5 时序预测比赛的 Baseline,主要使用机器学习的建模方式进行时序的建模
# 和预测。
#
# - 特征工程来参考:https://www.kaggle.com/kneroma/-m5-first-public-notebook-under-0-50
# - groupKfold参考:https://www.kaggle.com/ragnar123/simple-lgbm-groupkfold-cv
#
#
# - **BaseLine步骤**:
# 1. 数据分析 EDA
# 2. 特征工程
# 3. 模型调参
# 4. stacking
#
# >**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
#
# - 缺陷:
# 1. groupKfold参考kernel的线上分数是0.53,但是现在我们single model部分提交的线上结果分数达到4,要回头认真比对groupKfold
# 2. 在调参过程中也要更改folds=groupKfold。
#
# ---
# + [markdown] id="48Do5iL1CBf-"
# ---
# ## 1、数据分析
#
# 为了节约时间,我们直接对我们后面建模有用的结果进行分析,关于数据的详细分析可以参考 [EDA](https://www.kaggle.com/headsortails/back-to-predict-the-future-interactive-m5-eda)
#
# - 查看 sales 数据前几行
# - 查看 sales 数据聚合结果趋势
# - 查看 sales 数据标签分布
# + id="Fl8o_BitcAFg" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="6940b4ae-44fa-470b-8315-b3e628f9573e"
import numpy as np
from scipy import sparse
import pandas as pd
import lightgbm as lgb
import re
import string
import time
import seaborn as sns
import itertools
import sys
from sklearn import preprocessing, pipeline, metrics, model_selection
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import gc
from datetime import datetime, timedelta
from sklearn.model_selection import GroupKFold
from sklearn import metrics
import matplotlib.pyplot as plt
pd.set_option('display.max_colwidth',100)
# %matplotlib inline
# + id="slPZTO4jDRNi" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="1072382a-3f9a-4829-de3c-89af5d171c86"
from google.colab import drive
drive.mount('/content/drive')
# + id="ebC2t9YSCiWi"
# !cp -r /content/drive/My\ Drive/colab/kaggle_M5/m5-forecasting-accuracy/ /content/
# + id="UVUNwzTkCBgG"
sale_data = pd.read_csv('/content/m5-forecasting-accuracy/sales_train_validation.csv')
# + id="jhBTjbGhCBgN" colab={"base_uri": "https://localhost:8080/", "height": 270} outputId="beb0c213-36ff-4c8f-bc45-e27e33d32104"
print(sale_data.shape)
sale_data.head(5)
# + id="lhWhp9OiCBgS"
day_data = sale_data[[f'd_{day}' for day in range(1,1914)]]
total_sum = np.sum(day_data,axis=0).values
# + id="SOT7WC4lCBgW" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="a2f0f3d7-90d5-4ed7-e7de-57fd1f44650f"
plt.plot(total_sum)
# + id="Ayk70PcKCBgZ" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="ddce7c39-2bdf-4a15-9014-b1362551fc1e"
plt.hist(day_data[day_data<100].values.reshape(-1),bins=100);
# + [markdown] id="ACgRPvRfCBgb"
# ---
# ## 2、特征工程
#
# 选定机器学习的建模方案,核心思想是对时间序列抽取窗口特征。
#
# <img src="机器学习建模.jpg" style="width:1100px;height:500px;float:center">
#
#
#
# 抽取窗口特征:
#
# - 前7天
# - 前28天
# - 前7天均值
# - 前28天均值
#
# 关联其他维度信息
#
# - 日期
# - 价格
# + id="WUHY2hQzVDoJ"
# helper functions to reduce memory
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# + id="vgRkyf1lM2eQ"
def create_train_data(train_start=750,test_start=1800,is_train=True):
"""
traing_start: 取多长的数据进行训练, 从第750个到1931个
test_start: 取多长的数据进行预测
"""
# 基本参数
PRICE_DTYPES = {"store_id": "category", "item_id": "category", "wm_yr_wk": "int16","sell_price":"float32" }
CAL_DTYPES={"event_name_1": "category", "event_name_2": "category", "event_type_1": "category",
"event_type_2": "category", "weekday": "category", 'wm_yr_wk': 'int16', "wday": "int16",
"month": "int16", "year": "int16", "snap_CA": "float32", 'snap_TX': 'float32', 'snap_WI': 'float32' }
start_day = train_start if is_train else test_start
numcols = [f"d_{day}" for day in range(start_day,1914)]
catcols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id']
SALE_DTYPES = {numcol:"float32" for numcol in numcols}
SALE_DTYPES.update({col: "category" for col in catcols if col != "id"})
# 加载price数据
price_data = pd.read_csv('/content/m5-forecasting-accuracy/sell_prices.csv',dtype=PRICE_DTYPES)
print(f"price data shape is {price_data.shape}")
# 加载cal数据
cal_data = pd.read_csv('/content/m5-forecasting-accuracy/calendar.csv',dtype=CAL_DTYPES)
print(f"calender data shape is {cal_data.shape}")
# 加载sale数据
sale_data = pd.read_csv('/content/m5-forecasting-accuracy/sales_train_validation.csv',dtype=SALE_DTYPES,usecols=catcols+numcols)
print(f"sale data shape is {sale_data.shape}")
# 类别标签转换
for col, col_dtype in PRICE_DTYPES.items():
if col_dtype == "category":
price_data[col] = price_data[col].cat.codes.astype("int16")
price_data[col] -= price_data[col].min()
cal_data["date"] = pd.to_datetime(cal_data["date"])
for col, col_dtype in CAL_DTYPES.items():
if col_dtype == "category":
cal_data[col] = cal_data[col].cat.codes.astype("int16")
cal_data[col] -= cal_data[col].min()
for col in catcols:
if col != "id":
sale_data[col] = sale_data[col].cat.codes.astype("int16")
sale_data[col] -= sale_data[col].min()
# 注意提交格式里有一部分为空
if not is_train:
for day in range(1913+1, 1913+ 2*28 +1):
sale_data[f"d_{day}"] = np.nan
sale_data = pd.melt(sale_data,
id_vars = catcols,
value_vars = [col for col in sale_data.columns if col.startswith("d_")],
var_name = "d",
value_name = "sales")
print(f'sale data shape is {sale_data.shape} after melt')
sale_data = sale_data.merge(cal_data, on= "d", copy = False)
print(f'sale data shape is {sale_data.shape} after merging with cal_data')
sale_data = sale_data.merge(price_data, on = ["store_id", "item_id", "wm_yr_wk"], copy = False)
print(f'sale data shape is {sale_data.shape} after merging with price_data')
return sale_data
# + id="z5E_pVj2M-3Y" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="7ae5c526-cf88-459e-9878-94ed9020194b"
sale_train_data = create_train_data(train_start=1500,is_train=True)
sale_test_data = create_train_data(is_train=False)
# + id="vaDE5P9enKRL" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="0453b832-a86f-42b3-f69c-46edad477001"
sale_train_data.tail()
# + id="P1EMWEx-nOCJ" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="49a4fce7-afc8-4fe6-d842-76932a1532d0"
sale_test_data.head()
# + id="7t1fRtdknRH5" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="0e8f41fd-e278-4758-d417-53736ea17819"
sale_test_data.tail()
# + id="yUpZQoofm_KC" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="038b4661-ad03-4dae-bad5-fb1889b1391a"
sale_data = pd.concat([sale_train_data,sale_test_data])
print ("Full Sale Data set created.")
# + id="yMsS3K6DNUFt" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="2b3856b7-2070-481e-a746-e6bdfd241293"
print(sale_data.shape)
sale_data.columns
# + id="ruHKToInNW8Z" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="3f3bf29a-8760-41f7-a1bf-a12740ac20e9"
sale_data.head()
# + id="P0WxSOIjhjZv" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="a91e064c-55ce-41c5-a1e2-1c5e11810272"
sale_data.tail()
# + id="jEf3nUC_CBge" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="572910d9-b124-4e84-c50f-90cda1eb3e72"
# 可以在这里加入更多的特征抽取方法
# 获取7天前的数据,28天前的数据
# lags = [7, 28], 因为验证集28天现在sale为空,我们对验证集不能取lag=7
lags = [28]
lag_cols = [f"lag_{lag}" for lag in lags]
# 如果是测试集只需要计算一天的特征,减少计算量
# 注意训练集和测试集特征生成要一致
for lag, lag_col in zip(lags, lag_cols):
sale_data[lag_col] = sale_data[["id","sales"]].groupby("id")["sales"].shift(lag)
# 将获取7天前的数据,28天前的数据做移动平均
# wins = [7, 28]
# use the rolling windows from 'simple GroupKFold CV'
wins = [7, 15, 30, 90]
for win in wins :
for lag,lag_col in zip(lags, lag_cols):
sale_data[f"rmean_{lag}_{win}"] = sale_data[["id", lag_col]].groupby("id")[lag_col].transform(lambda x : x.rolling(win).mean())
sale_data[f'rstd_{lag}_{win}'] = sale_data[['id', lag_col]].groupby('id')[lag_col].transform(lambda x: x.rolling(win).std())
sale_data = reduce_mem_usage(sale_data)
# 处理时间特征
# 有的时间特征没有,通过datetime的方法自动生成
date_features = {
"wday": "weekday",
"week": "weekofyear",
"month": "month",
"quarter": "quarter",
"year": "year",
"mday": "day",
"dayofweek": "dayofweek",
"dayofyear": "dayofyear"
}
for date_feat_name, date_feat_func in date_features.items():
if date_feat_name in sale_data.columns:
sale_data[date_feat_name] = sale_data[date_feat_name].astype("int16")
else:
sale_data[date_feat_name] = getattr(sale_data["date"].dt, date_feat_func).astype("int16")
# + id="RHXnvgpKcCbX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2d882c23-88e7-448c-9148-ccd0cd48062a"
sale_data.shape
# + id="khuV7uQVojU-" colab={"base_uri": "https://localhost:8080/", "height": 609} outputId="cb2e7b66-d2ce-4c1a-9267-c6c2b065ded6"
sale_data[(sale_data['date'] >= '2016-01-25') & (sale_data['date'] <= '2016-05-22')]
# + id="_d8h3MiscVkQ" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="b156b335-cee4-4d7c-a8f8-1a960ae6992a"
sale_data.columns
# + id="pvaNuGHBcb8B"
# 清洗数据,选择需要训练的数据
# sale_data.dropna(inplace=True)
cat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id'] + ["event_name_1", "event_name_2", "event_type_1", "event_type_2"]
useless_cols = ["id", "date", "sales","d", "wm_yr_wk"]
# + id="-SyEHHrLePg2" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="ad5dfcfa-928c-47bb-dd67-69879e51b9eb"
print('-'*50)
print('\n')
print(f'Training model with {len(sale_data.columns)} features...')
# + id="73Uf2ruaWWwh"
X_train = sale_data[(sale_data['date'] >= '2016-01-25') & (sale_data['date'] <= '2016-04-24')]
y_train = X_train["sales"]
X_test = sale_data[(sale_data['date'] >= '2016-04-25') & (sale_data['date'] <= '2016-05-22')]
# X_test.drop(['sales'], axis=1, inplace=True)
train_cols = X_train.columns[~X_train.columns.isin(useless_cols)]
X_train = X_train[train_cols]
X_test = X_test[train_cols]
# + id="eZ3GcE6vCBgi" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="c3e6b4d2-4511-4488-dc51-32d41050def1"
X_train.head()
# + id="VLZvsytJCBgk" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="aedaee62-c96c-4d94-bb0e-105d815fc9c5"
y_train.head()
# + id="683-pr6SQlHY" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="79728cd6-4f4f-4247-cdca-736e4ffdec99"
print(X_train.shape)
print(y_train.shape)
# + id="WGZ91ur5WhI2"
X_train.reset_index(inplace=True, drop=True)
# + id="TZFuzp8mWg_i" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="3170e4e1-2dbf-4640-f19b-3358fc482d77"
X_train.head()
# + id="zXgiRmK5W1Gq"
y_train.reset_index(inplace=True, drop=True)
# + id="UYwcGVysW1Dl" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="6ea9e611-d7cc-4755-9c55-0cc0260e03f9"
y_train.head()
# + id="1Y3JZ5RmgyLN" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="b908f6cd-a90b-4109-f7c5-704d5d249761"
X_test.head()
# + id="88g5uYyLgyFx" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="775edd58-4c79-4672-d7a8-db4fa0ee45e3"
X_test.tail()
# + id="g_3GQGhQxVAo"
test = sale_data[(sale_data['date'] >= '2016-04-25') & (sale_data['date'] <= '2016-05-22')][['id', 'date', 'sales']]
# + id="BT8ogtTDxrvA" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="d1185b9f-8765-43bc-c543-43ed161d0e16"
test.head()
# + id="BctkULgXxxDt" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="940f9bae-5633-4cb8-a24b-40e5ba76a5dc"
test.tail()
# + id="CPwW-6dD0oHm"
del(sale_train_data,sale_test_data)
# + [markdown] id="hgsx2AGeXI6K"
# ---
#
# ## 3、模型调参
#
# + [markdown] id="T1cpYBSo0Rkx"
# ### LightGBM Manual tuning
# + id="dBqALb189mqE"
# define custom loss function
def custom_asymmetric_train(y_pred, y_true):
y_true = y_true.get_label()
residual = (y_true - y_pred).astype("float")
grad = np.where(residual < 0, -2 * residual, -2 * residual * 1.15)
hess = np.where(residual < 0, 2, 2 * 1.15)
return grad, hess
# define custom evaluation metric
def custom_asymmetric_valid(y_pred, y_true):
y_true = y_true.get_label()
residual = (y_true - y_pred).astype("float")
loss = np.where(residual < 0, (residual ** 2) , (residual ** 2) * 1.15)
return "custom_asymmetric_eval", np.mean(loss), False
# + id="SI3zknOfyM3V"
import copy
greater_is_better = False
# params = {
# "objective" : "tweedie",
# "metric" :"rmse",
# "force_row_wise" : True,
# "learning_rate" : 0.075,
# "sub_feature" : 0.8,
# "sub_row" : 0.75,
# "bagging_freq" : 1,
# "lambda_l2" : 0.1,
# "metric": ["rmse"],
# "nthread": 8,
# "tweedie_variance_power":1.1, # 老师发现1.2比较好。
# 'verbosity': 1,
# 'num_iterations' : 1500,
# 'num_leaves': 128,
# "min_data_in_leaf": 104,
# }
# lgb_metric = 'rmse'
lgb_metric = "custom_asymmetric_eval"
default_lgb_params = {}
# default_lgb_params["objective"] = "tweedie"
# default_lgb_params["tweedie_variance_power"] = 1.1
default_lgb_params["learning_rate"] = 0.1 #0.05
default_lgb_params["metric"] = lgb_metric
default_lgb_params["bagging_freq"] = 1
# default_lgb_params['colsample_bytree'] = 0.85
# default_lgb_params['colsample_bynode'] = 0.85
# default_lgb_params['min_data_per_leaf'] = 25
default_lgb_params["seed"] = 1234
params_lgb_space = {}
params_lgb_space['feature_fraction'] = [0.1, 0.3, 0.5, 0.7, 0.9]
params_lgb_space['num_leaves'] = [3, 7, 15, 31, 63, 127]
params_lgb_space['max_depth'] = [3, 7, 10, 15, 31, -1]
params_lgb_space['min_gain_to_split'] = [0, 0.1, 0.3, 1, 1.5, 2, 3]
params_lgb_space['bagging_fraction'] = [0.2, 0.4, 0.6, 0.8, 1]
params_lgb_space['min_sum_hessian_in_leaf'] = [0, 0.0001, 0.001, 0.1, 1, 3, 10]
params_lgb_space['lambda_l2'] = [0, 0.01, 0.1, 0.5, 1, 10]
params_lgb_space['lambda_l1'] = [0, 0.01, 0.1, 0.5, 1, 10]
# + id="oIATE-B9yRoW"
kfolds = 5
kf = model_selection.GroupKFold(n_splits=kfolds)
group = X_train['week'].astype(str) + '_' + X_train['year'].astype(str)
kf_ids = list(kf.split(X_train, y_train, group))
# + id="U-qdi-C4zJ9p" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="37657e18-7ca9-451a-9a08-25a8f9712e99"
kf_ids
# + id="vGjfOktqQ9S9"
from sklearn.model_selection import KFold,StratifiedKFold
kfolds = 5
random_state = 42
skf = StratifiedKFold(n_splits=kfolds, shuffle=True, random_state=random_state)
# + id="ytde836PXHN3" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="56aae09d-1ca5-414d-8658-c714b52e7b3b"
# folds = kf.split(X_train, y_train, group),
# folds=skf.split(X_train, y_train),
best_lgb_params = copy.copy(default_lgb_params)
for p in params_lgb_space:
print ("\n Tuning parameter %s in %s" % (p, params_lgb_space[p]))
params = best_lgb_params
scores = []
for v in params_lgb_space[p]:
print ('\n %s: %s' % (p, v), end="\n")
params[p] = v
lgb_cv = lgb.cv(params,
lgb.Dataset(X_train,
label=y_train,
categorical_feature=cat_feats
),
num_boost_round=100000,
fobj = custom_asymmetric_train,
feval = custom_asymmetric_valid,
nfold=kfolds,
stratified=False,
early_stopping_rounds=50,
verbose_eval=500)
# print(lgb_cv)
if greater_is_better:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
else:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
best_lgb_iteration = len(lgb_cv['%s-mean' % (lgb_metric)])
print (', best_score: %f, best_iteration: %d' % (best_lgb_score, best_lgb_iteration))
scores.append([v, best_lgb_score])
# best param value in the space
best_param_value = sorted(scores, key=lambda x:x[1],reverse=greater_is_better)[0][0]
best_param_score = sorted(scores, key=lambda x:x[1],reverse=greater_is_better)[0][1]
best_lgb_params[p] = best_param_value
print("Best %s is %s with a score of %f" %(p, best_param_value, best_param_score))
print('\n Best manually tuned parameters:', best_lgb_params)
# + [markdown] id="bPBGpLmuJpKl"
# Best feature_fraction is 0.9 with a score of 4.608439
#
# Best num_leaves is 63 with a score of 4.593394
#
# Best max_depth is 31 with a score of 4.593394
#
# Best min_gain_to_split is 0.1 with a score of 4.609980
#
# Best bagging_fraction is 1 with a score of 4.611675
#
# Best min_sum_hessian_in_leaf is 0 with a score of 4.611248
#
# Best lambda_l2 is 10 with a score of 4.605351
# + id="ccFXb9V4JpDV"
best_lgb_params = {'learning_rate': 0.05,
'metric': 'custom_asymmetric_eval',
'bagging_freq': 1,
'colsample_bytree': 0.85,
'colsample_bynode': 0.85,
'min_data_per_leaf': 25,
'seed': 1234,
'lambda_l1': 0.01,
'feature_fraction': 0.9,
'num_leaves': 63,
'max_depth': 31,
'min_gain_to_split': 0.1,
'bagging_fraction': 1,
'min_sum_hessian_in_leaf': 0,
'lambda_l2': 10
}
# + id="Mw00v637gMZQ"
# groupkfold cv
# Best manually tuned parameters: {'learning_rate': 0.05, 'metric': 'custom_asymmetric_eval',
#'bagging_freq': 1, 'colsample_bytree': 0.85, 'colsample_bynode': 0.85, 'min_data_per_leaf': 25,
# 'seed': 1234, 'lambda_l1': 0.01,
#'feature_fraction': 0.9, 'num_leaves': 63, 'max_depth': 31, 'min_gain_to_split': 0.1,
#'bagging_fraction': 1, 'min_sum_hessian_in_leaf': 0, 'lambda_l2': 10}
# + id="lEUjjV4Jdo8O" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="ac364525-c6e4-4b99-e02a-f80115f3b95a"
print ('\n Best manually tuned parameters:', best_lgb_params)
# + [markdown] id="gtXDBhc1dstV"
# ### Automated tuning with Bayesian Optimization
# + id="_u3cEgrWdqPy" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="126d9c88-5599-46b8-c3d9-8f7db452f646"
# !pip install bayesian-optimization
# + id="KwqD1JyRdqMe" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="9eb201d8-1519-46ad-bdd8-4cef862bb64e"
from bayes_opt import BayesianOptimization
def lgb_evaluate(
num_leaves,
max_depth,
min_sum_hessian_in_leaf,
min_gain_to_split,
feature_fraction,
bagging_fraction,
lambda_l2,
lambda_l1
):
params = dict()
params["learning_rate"] = 0.05
params["metric"] = lgb_metric
params["bagging_freq"] = 1
params['colsample_bytree'] = 0.85
params['colsample_bynode'] = 0.85
params['min_data_per_leaf'] = 25
params["seed"] = 1234
params['num_leaves'] = int(num_leaves)
params['max_depth'] = int(max_depth)
params['min_sum_hessian_in_leaf'] = min_sum_hessian_in_leaf
params['min_gain_to_split'] = min_gain_to_split
params['feature_fraction'] = feature_fraction
params['bagging_fraction'] = bagging_fraction
params['bagging_freq'] = 1
params['lambda_l2'] = lambda_l2
params['lambda_l1'] = lambda_l1
params["metric"] = lgb_metric
lgb_cv = lgb.cv(params,
lgb.Dataset(X_train,
label=y_train
),
num_boost_round=100000,
fobj = custom_asymmetric_train,
feval = custom_asymmetric_valid,
folds = kf.split(X_train, y_train, group),
nfold=5,
stratified=False,
early_stopping_rounds=50,
verbose_eval=False)
if greater_is_better:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
else:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
best_lgb_iteration = len(lgb_cv['%s-mean' % (lgb_metric)])
print (', best_score: %f, best_iteration: %d' % (best_lgb_score, best_lgb_iteration))
return -best_lgb_score
lgb_BO = BayesianOptimization(lgb_evaluate,
{
'num_leaves': (10, 20),
'max_depth': (2, 20),
'min_sum_hessian_in_leaf': (5, 15),
'min_gain_to_split': (0,0),
'feature_fraction': (0.2, 0.4),
'bagging_fraction': (0.8,1),
'lambda_l2': (5, 15),
'lambda_l1': (0.1, 5)
}
)
## I use 5, 20 to save time but you may want to change it to larger numbers,e.g. 8, 30
lgb_BO.maximize(init_points=5, n_iter=20)
# + id="BWVfJxn0dqK_" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="c7a0cbc9-cfd0-450a-b169-e5414f0653c3"
lgb_BO_scores = pd.DataFrame([p['params'] for p in lgb_BO.res])
lgb_BO_scores['score'] = [p['target'] for p in lgb_BO.res]
lgb_BO_scores = lgb_BO_scores.sort_values(by='score',ascending=False)
# + id="Hf08tWtac9Zt" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="4ec6f1bd-8674-451b-c2f5-cb305510425a"
lgb_BO_scores.head()
# + id="FIiYQ1F-dqH2"
lgb_BO_scores.to_csv("/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/lgb_BO_scores.csv", index=False)
# + id="LgVy16flKzDj"
lgb_BO_scores = pd.read_csv("/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/lgb_BO_scores.csv")
# + id="lSXGiV_ndqFW" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b540bcb1-3caf-4642-def9-37d86d2b4729"
lgb_best_params = lgb_BO_scores.T.to_dict().get(lgb_BO_scores.index.values[0])
lgb_best_params['objective'] = "tweedie"
lgb_best_params["tweedie_variance_power"] = 1.1
lgb_best_params['learning_rate'] = 0.01 ## from 0.05 to 0.01
lgb_best_params['seed'] = 1234
lgb_best_params['metric'] = lgb_metric
lgb_best_params['bagging_freq'] = 1
lgb_best_params['num_leaves'] = int(lgb_best_params['num_leaves'])
lgb_best_params['max_depth'] = int(lgb_best_params['max_depth'])
print(lgb_best_params)
lgb_cv = lgb.cv(lgb_best_params,
lgb.Dataset(X_train,
label=y_train
),
num_boost_round=100000,
nfold=5,
stratified=True,
early_stopping_rounds=50,
verbose_eval=100)
if greater_is_better:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
else:
best_lgb_score = min(lgb_cv['%s-mean' % (lgb_metric)])
best_lgb_iteration = len(lgb_cv['%s-mean' % (lgb_metric)])
print (', best_score: %f, best_iteration: %d' % (best_lgb_score, best_lgb_iteration))
# + id="Uw9xfTvIdqBw" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="5aa1f130-197c-4946-b3b2-5603cb285f70"
lgb_best_params
# + id="2MYwgHtmLTU-"
lgb_best_params = {'bagging_fraction': 0.9233320946989939,
'bagging_freq': 1,
'feature_fraction': 0.3788606347629591,
'lambda_l1': 4.888559071941389,
'lambda_l2': 5.073224584824621,
'learning_rate': 0.01,
'max_depth': 9,
'metric': 'rmse',
'min_gain_to_split': 0.0,
'min_sum_hessian_in_leaf': 11.07704576482789,
'num_leaves': 18,
'objective': 'tweedie',
'score': -1.9418723740899249,
'seed': 1234,
'tweedie_variance_power': 1.1}
best_lgb_iteration = 14125
# + [markdown] id="ONU-auQr0LvE"
# ### single model submission
#
# Now let's retrain the model with the learning rate 0.01 as well as the tuned iterations(num_boost_round) and generate the submission.
# + id="cUxzV2jldp97"
model = lgb.train(lgb_best_params,
lgb.Dataset(X_train,
label=y_train
),
num_boost_round=best_lgb_iteration)
# + id="CPGF_bRGdp5C" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="39ac1271-e2f8-4335-a776-68f98381f9a2"
test_data = create_train_data(is_train=False)
# + id="5GJl-SBc91mr"
date = datetime(2016,4, 25)
day = date + timedelta(days=0)
test_data = test_data[test_data.date >= day - timedelta(days=57)]
# + id="Jl8xDxW74O9d"
# test_data.head()
# + id="X4SeSOiL_7VK"
# test_data.tail()
# + id="TqAOb5F44O6t"
test_data = create_feature(test_data)
# + id="MBNycGdK-sdH" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="c47565f5-0482-4022-dd54-de4a42388ee6"
test_data.head()
# + id="w7rQgwmJ_gP4" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="8ca9a2d7-401c-4868-9e9c-f1ea0f9ebed6"
test_data.tail()
# + id="NqcW3Qx9_SRT" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d14de116-e5b4-4817-fd6a-e4fdd14f7536"
test_data.shape
# + id="v9-PakFR4O4a"
X_test = test_data[(test_data['date'] >= '2016-04-25') & (test_data['date'] <= '2016-05-22')]
X_test = X_test[train_cols]
# + id="3UbGck6V9I2t" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="397bd349-d434-4bd9-a9ac-38cadb3ec437"
X_test.head()
# + id="LJLfYvw69Iz7" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="050b74f5-ce3e-4300-fdde-ac5da2241de0"
X_test.shape
# + id="eMUWvqa09Ixf"
preds = model.predict(X_test)
# + id="COtUJ7R9SHFj"
test = test_data[(test_data['date'] >= '2016-04-25') & (test_data['date'] <= '2016-05-22')]
test = test[['id', 'date', 'sales']]
test['sales'] = preds
# + id="EVbqf_i8SrOV"
test.to_csv('/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/test.csv', index=False)
# + id="7OT32k5ySrCw" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="6604ed56-60f8-4ce3-8e09-d565a43e6427"
predictions = pd.pivot(test, index = 'id', columns = 'date', values = 'sales').reset_index()
# + id="GdQHbTOJYd3n" colab={"base_uri": "https://localhost:8080/", "height": 379} outputId="25705d65-00f3-43ea-84b0-f41a8679eeac"
predictions.head(10)
# + id="ZP20OWeySq_m" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a22e21e2-eaca-445c-9f14-db3b94a91d94"
predictions.shape
# + id="7Dsr5LUdTHs6"
predictions.columns = ['id'] + ['F' + str(i + 1) for i in range(28)]
# read submission
submission = pd.read_csv('/content/m5-forecasting-accuracy/sample_submission.csv')
evaluation_rows = [row for row in submission['id'] if 'evaluation' in row]
evaluation = submission[submission['id'].isin(evaluation_rows)]
validation = submission[['id']].merge(predictions, on = 'id')
final = pd.concat([validation, evaluation])
final.to_csv('/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/submission_finetune_random5fold.csv', index = False)
# + [markdown] id="hUHo0Q6lhsWU"
# ## 4、 Stacking
#
#
# + [markdown] id="sAwM5rbj0WRF"
# ### Level 1: 6 LightGBM models
#
# We will create out-out-fold predictions for 6 LightGBM models (5 automatically tuned models + 1 manually tuned model) and
# + id="UWnSQzwPtd7Z"
from sklearn.metrics import mean_squared_error
# + id="UrUP6RC6oyFo"
kf = model_selection.GroupKFold(n_splits=5)
group = X_train['week'].astype(str) + '_' + X_train['year'].astype(str)
kf_ids = list(kf.split(X_train, y_train, group))
# + id="fbzzrwU_qTrV" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b0109326-b05c-41b4-8d6e-8a6866081d10"
kf_ids[0]
# + id="4vunMu3CjIyk"
def lgb_stack(params_list, X_train, y_train, X_test,
kfolds, num_boost_round=10000000, early_stopping_rounds=0, fold='groupkfold', shuffle=True, randome_state=42):
if fold == 'stratified':
kf = model_selection.StratifiedKFold(
n_splits=kfolds, random_state=randome_state, shuffle=shuffle)
kf_ids = list(kf.split(train_x, train_y))
elif fold == 'groupkfold':
kf = model_selection.GroupKFold(n_splits=kfolds)
group = X_train['week'].astype(str) + '_' + X_train['year'].astype(str)
kf_ids = list(kf.split(X_train, y_train, group))
else:
kf = model_selection.KFold(n_splits=kfolds, random_state=randome_state)
kf_ids = list(kf.split(train_x, train_y))
train_x = X_train.values
test_x = X_test.values
train_y = y_train.values
train_blend_x = np.zeros((train_x.shape[0], len(params_list)))
test_blend_x = np.zeros((test_x.shape[0], len(params_list)))
blend_scores = np.zeros((kfolds, len(params_list)))
print("Start blending.")
for j, params in enumerate(params_list):
print("Blending model", j+1, params)
test_blend_x_j = np.zeros((test_x.shape[0]))
for i, (train_ids, val_ids) in enumerate(kf_ids):
start = time.time()
print("Model %d fold %d" % (j+1, i+1))
train_x_fold = train_x[train_ids]
train_y_fold = train_y[train_ids]
val_x_fold = train_x[val_ids]
val_y_fold = train_y[val_ids]
print(i, params)
# Set n_estimators to a large number for early_stopping
if early_stopping_rounds == 0:
model = lgb.train(params,
lgb.Dataset(train_x_fold, train_y_fold),
num_boost_round=num_boost_round,
verbose_eval=500
)
val_y_predict_fold = model.predict(val_x_fold)
score = np.sqrt(mean_squared_error(val_y_fold, val_y_predict_fold))
print("Score for Model %d fold %d: %f " % (j+1,i+1,score))
blend_scores[i, j] = score
train_blend_x[val_ids, j] = val_y_predict_fold
test_blend_x_j = test_blend_x_j + model.predict(test_x)
print("Model %d fold %d finished in %d seconds." % (j+1,i+1, time.time()-start))
else:
model = lgb.train(params,
lgb.Dataset(train_x_fold, train_y_fold),
valid_sets=[lgb.Dataset(val_x_fold,
val_y_fold,
)],
valid_names=['valid'],
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=500
)
best_iteration = model.best_iteration
print(model.best_score['valid']['rmse'])
val_y_predict_fold = model.predict(val_x_fold, num_iteration=best_iteration)
score = np.sqrt(mean_squared_error(val_y_fold, val_y_predict_fold))
print("Score for Model %d fold %d: %f " % (j+1,i+1,score))
blend_scores[i, j] = score
train_blend_x[val_ids, j] = val_y_predict_fold
test_blend_x_j = test_blend_x_j + \
model.predict(test_x, num_iteration=best_iteration)
print("Model %d fold %d finished in %d seconds." % (j+1,i+1, time.time()-start))
print(time.time()-start)
test_blend_x[:,j] = test_blend_x_j/kfolds
print("Score for model %d is %f" % (j+1, np.mean(blend_scores[:, j])))
return train_blend_x, test_blend_x, blend_scores
# + id="bJIh3qUdKho-" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="77abe3a3-e6d1-433b-f760-7060cc2f88a3"
lgb_params_list = []
lgb_metric = 'rmse'
for i in range(5):
lgb_param = lgb_BO_scores.iloc[i].to_dict()
lgb_param.pop('score')
lgb_param['max_depth'] = int(lgb_param['max_depth'])
lgb_param['num_leaves'] = int(lgb_param['num_leaves'])
lgb_param["objective"] = "tweedie"
lgb_param["tweedie_variance_power"] = 1.1
lgb_param['learning_rate'] = 0.01
lgb_param['metric'] = lgb_metric
lgb_param['seed'] = 42
lgb_params_list.append(lgb_param)
## Best manual-tuned parameters
lgb_params_list.append(best_lgb_params)
print(lgb_params_list)
# + id="jDNqzxABKhh2" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5c64503c-101e-4d19-ceb5-4e86664b7d30"
import pickle
train_blend_x_lgb_01, test_blend_x_lgb_01, blend_scores_lgb_01 = lgb_stack(lgb_params_list,
X_train,
y_train,
X_test,
early_stopping_rounds=200,
kfolds=5)
pickle.dump(train_blend_x_lgb_01, open(
'/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/train_blend_x_lgb_01.pkl', 'wb'))
pickle.dump(test_blend_x_lgb_01, open(
'/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/test_blend_x_lgb_01.pkl', 'wb'))
# + id="I2W4yrWKwHFV" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f3cb493a-f2a4-401e-d3e4-99e43061bf82"
np.sqrt(mean_squared_error(y_train,train_blend_x_lgb_01.mean(axis=1)))
# + id="Bw88lnqnwG3C" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="ade97a1b-218f-4511-bb39-4128efdee128"
[np.sqrt(mean_squared_error(y_train,train_blend_x_lgb_01[:,n])) for n in range(6)]
# + id="7ieG3tnyKha9" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c1c99c8f-34ff-4e6a-ad2b-01f850f741fe"
train_blend_x_lgb_01.shape
# + [markdown] id="2pao2zF1zvjn"
# ### Level 2 stacking I : ridge regression
# We will be using the predictions (Level 1 features) to train another model as Level 2 which will then be used to make the actual predictions.
# + id="sx4MMw5ozvRP"
# Stacke level 1 features
train_blend_x = train_blend_x_lgb_01
test_blend_x = test_blend_x_lgb_01
# + id="w6CIeAF1zvPO"
from sklearn.metrics import make_scorer
def my_rmse(y_true, y_preds):
return np.sqrt(mean_squared_error(y_true, y_preds))
my_rmse_score = make_scorer(my_rmse, greater_is_better=False)
# + id="8M4ORaEezvMR"
from sklearn.linear_model import Ridge,ElasticNet, SGDRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import GridSearchCV
def search_model(train_x, train_y, est, param_grid, n_jobs, cv, refit=False):
##Grid Search for the best model
model = GridSearchCV(estimator = est,
param_grid = param_grid,
scoring = my_rmse_score,
verbose = 10,
n_jobs = n_jobs,
iid = True,
refit = refit,
cv = cv)
# Fit Grid Search Model
model.fit(train_x, train_y)
print("Best score: %0.3f" % model.best_score_)
print("Best parameters set:", model.best_params_)
print("Scores:", model.cv_results_)
return model
# + id="g3R7E5ruzvJD" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="afd48005-a330-45eb-8a5c-6b288cbfa8df"
param_grid = {
"alpha":[0.001,0.01,0.1,1,10,30,100]
}
model = search_model(train_blend_x,
y_train,
Ridge(),
param_grid,
n_jobs=1,
cv=kf.split(X_train, y_train, group),
refit=True)
print ("best alpha:", model.best_params_)
preds_ridge = model.predict(test_blend_x)
# + id="QKlNHcQTzvHK" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="27ee29a5-635c-4ad0-c873-5b6d8a4b5030"
preds_ridge
# + id="WQCB5qyazvD3"
ridge_test = test_data[(test_data['date'] >= '2016-04-25') & (test_data['date'] <= '2016-05-22')]
ridge_test = ridge_test[['id', 'date', 'sales']]
ridge_test ['sales'] = preds_ridge
# + id="-_TQhvxezvBy" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="0c639d8f-067e-4683-93c8-a5371a18b88e"
ridge_test.head()
# + id="ygBzP5C_zu-r"
ridge_predictions = pd.pivot(ridge_test, index = 'id', columns = 'date', values = 'sales').reset_index()
# + id="MbJv5-315KQO" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bacdbd63-e578-4a93-eb68-6c4ee43ad38b"
ridge_predictions.shape
# + id="C1LTR4Dr5KOk"
ridge_predictions.columns = ['id'] + ['F' + str(i + 1) for i in range(28)]
# read submission
submission = pd.read_csv('/content/m5-forecasting-accuracy/sample_submission.csv')
evaluation_rows = [row for row in submission['id'] if 'evaluation' in row]
evaluation = submission[submission['id'].isin(evaluation_rows)]
validation = submission[['id']].merge(ridge_predictions, on = 'id')
ridge_final = pd.concat([validation, evaluation])
ridge_final.to_csv('/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/sub_stacking_ridge.csv', index = False)
# + [markdown] id="GdUM18sS7ZUb"
# ### Level 2 stacking II: LightGBM regression
# Stacking can also be done by with both level 1 predictions and original features.
# + id="JgTDV8QI-4MX"
# len(X_train.values[0])
# + id="vYU0bTJf--bZ"
# train_blend_x_lgb_01
# + id="tJJDQr_ZHXh-"
# sparse.coo_matrix(train_blend_x_lgb_01)
# + id="kRKWGDsICQnq"
# sparse.hstack([X_train.values, sparse.coo_matrix(train_blend_x_lgb_01)]).tocsr()
# + id="4q4Brc_j5KL8" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="90ad507e-b7e3-4b91-9c40-539d2770b35e"
train_blend_x = sparse.hstack([X_train.values, sparse.coo_matrix(train_blend_x_lgb_01)]).tocsr()
test_blend_x = sparse.hstack([X_test.values, sparse.coo_matrix(test_blend_x_lgb_01)]).tocsr()
print (train_blend_x.shape, train_blend_x.shape)
# + id="OYR0NO7C5KId" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69542cad-1790-47a2-b1af-0ac0ed384c56"
lgb_params = {'learning_rate': 0.05, 'metric': 'rmse',
'bagging_freq': 1, 'seed': 1234, 'objective': 'regression',
'num_leaves': 7, 'verbose': 1,
'max_depth': 5, 'min_gain_to_split': 0,
'feature_fraction': 0.1,
'bagging_fraction': 0.9,
'min_sum_hessian_in_leaf': 1,
'lambda_l2': 0, 'lambda_l1': 0
}
lgb_cv = lgb.cv(lgb_params,
lgb.Dataset(train_blend_x,
label=y_train
),
num_boost_round=100000,
nfold=5,
folds=kf.split(X_train, y_train, group),
early_stopping_rounds=50,
verbose_eval=500)
best_lgb_score = min(lgb_cv['rmse-mean'])
best_lgb_iteration = len(lgb_cv['rmse-mean'])
print(', best_score: %f, best_iteration: %d' %
(best_lgb_score, best_lgb_iteration))
# + id="5HXVzZN95KGY"
model = lgb.train(lgb_params,
lgb.Dataset(train_blend_x,
label=y_train
),
num_boost_round=best_lgb_iteration)
preds_lgb = model.predict(test_blend_x)
# + id="LSXWbT3U5KDa"
lgb_test = test_data[(test_data['date'] >= '2016-04-25') & (test_data['date'] <= '2016-05-22')]
lgb_test = lgb_test[['id', 'date', 'sales']]
lgb_test ['sales'] = preds_lgb
# + id="s2moX6qbI5vK" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="5e544596-cd2e-4e64-bb6c-703de5d7405b"
lgb_test.head()
# + id="QeTZoYNKJGM9"
lgb_predictions = pd.pivot(lgb_test, index = 'id', columns = 'date', values = 'sales').reset_index()
# + id="M3kjzhsPJGLU" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6abff9ce-8857-4d44-b394-a2a0c369356c"
lgb_predictions.shape
# + id="ahbd42ocJGIY"
lgb_predictions.columns = ['id'] + ['F' + str(i + 1) for i in range(28)]
# read submission
submission = pd.read_csv('/content/m5-forecasting-accuracy/sample_submission.csv')
evaluation_rows = [row for row in submission['id'] if 'evaluation' in row]
evaluation = submission[submission['id'].isin(evaluation_rows)]
validation = submission[['id']].merge(lgb_predictions, on = 'id')
lgb_final = pd.concat([validation, evaluation])
lgb_final.to_csv('/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/sub_stacking_lgb.csv', index = False)
# + [markdown] id="FRXkFQduJghD"
# ### Averaging level 2 predictions
# We can also create a quick ensemble by averaging level 2 predictions:
# + id="4A1prVCcJGGN"
preds_l2 = preds_ridge*0.5 + preds_lgb*0.5
# + id="Ob3iZjvcJGEk"
l2_test = test_data[(test_data['date'] >= '2016-04-25') & (test_data['date'] <= '2016-05-22')]
l2_test = l2_test[['id', 'date', 'sales']]
l2_test ['sales'] = preds_l2
# + id="4uEALdaJJGBr" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="3fe2c346-9518-455a-9603-6a4c2ec4d61a"
l2_test.head()
# + id="-uy_vtuEJF9b"
l2_predictions = pd.pivot(l2_test, index = 'id', columns = 'date', values = 'sales').reset_index()
# + id="z32Y3NGEI5tF"
l2_predictions.columns = ['id'] + ['F' + str(i + 1) for i in range(28)]
# read submission
submission = pd.read_csv('/content/m5-forecasting-accuracy/sample_submission.csv')
evaluation_rows = [row for row in submission['id'] if 'evaluation' in row]
evaluation = submission[submission['id'].isin(evaluation_rows)]
validation = submission[['id']].merge(l2_predictions, on = 'id')
l2_final = pd.concat([validation, evaluation])
l2_final.to_csv('/content/drive/My Drive/colab/kaggle_M5/dataapplication/output/sub_stacking_l2.csv', index = False)
| M5_baseline_groupKfold_custom_lossfunction_20200525.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Berkeley Institute for Data Science
#
# ## Jupyter Notebook Tips and Tricks
#
# By: [<NAME>](http://jonathanwhitmore.com/) -- Silicon Valley Data Science
#
# - Email: [<EMAIL>](mailto:<EMAIL>)
# - Twitter: [@jbwhitmore](https://twitter.com/jbwhitmore)
# - LinkedIn: [jonathanbwhitmore](https://www.linkedin.com/in/jonathanbwhitmore)
#
# All notebooks available: https://github.com/jbwhit/berkeley-jupyter-notebook
# ## Table of Contents
#
# 1. Overview
# 1. Tips and tricks
# 1. Visualization and code organization
# 1. Some basics
# 1. More basics
# 1. interactive splines
# 1. Pandas Plotting
# 1. SQL Example
# 1. R stuff
# 1. Extras
# ### Themes
#
# - Workflow matters
# - Make repeated actions as automatic as possible
# - Adhere to best practices as best you can (unless you have a really good reason not to).
# 1. You will have to collaborate on teams -- code quality matters.
# 1. PEP8 things
# - Learn your tools
# - Don't try to take notes, just let this wash over you.
# - Not knowing what's possible is a cognitive blindspot.
# - These Notebooks will be available to you.
| deliver/00-Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="cSUXKo8XjX8U"
import cv2
import numpy as np
import os
import csv
import matplotlib.pyplot as plt
def extract_bv(img):
b,g,r = cv2.split(img)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
contrast_enhanced_green_fundus = clahe.apply(g)
# applying alternate sequential filtering (3 times closing opening)
r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
f5 = clahe.apply(f4)
kernel = np.ones((3,3),np.uint8)
dilation = cv2.dilate(f5,kernel,iterations = 1)
blur = cv2.GaussianBlur(dilation,(3,3),0)
kernels = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(blur, cv2.MORPH_OPEN, kernel)
blur1 = cv2.GaussianBlur(opening,(5,5),0)
full = cv2.morphologyEx(blur1, cv2.MORPH_OPEN, kernel)
erosion_img = cv2.erode(full,kernel,iterations = 1)
dilation_img = cv2.dilate(erosion_img,kernel,iterations = 1)
erosion_img1 = cv2.erode(dilation_img,kernels,iterations = 1)
closing = cv2.morphologyEx(erosion_img1, cv2.MORPH_CLOSE, kernel)
ret,thresh2 = cv2.threshold(closing,25,255,cv2.THRESH_BINARY_INV)
final =255-thresh2
g=closing
maxg = np.max(g)
t = maxg/2
epst = 0.01
while 1:
ml = g[g<=t].mean()
mh = g[g>t].mean()
t_new = (ml+mh)/2
if abs(t-t_new)<epst:
break
t=t_new
#print(t)
thresh=int(t)
ret,hee=cv2.threshold(g,thresh,255,cv2.THRESH_BINARY)
kernel1 = np.ones((5,5),np.uint8)
kernel2 = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(hee, cv2.MORPH_OPEN, kernel2)
hee = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel1)
return hee
def ShowResizedIm(img,windowname,scale):
"""
opencv imshow resized image on a new window
Parameters:
-img: image
-window: window name
-scale: size of the display image will be divided by this value(ex. scale=2 will make image 2 time smaller)
"""
cv2.namedWindow(windowname, cv2.WINDOW_NORMAL) # Create window with freedom of dimensions
height, width = img.shape[:2] #get image dimension
cv2.resizeWindow(windowname,int(width/scale) ,int(height/scale)) # Resize image
cv2.imshow(windowname, img) # Show image
# -
import glob
#=========USER START================
#folder path
path = 'RAW_FUNDUS_INPUT/*.jpg'
save_path = 'bv namkao v2/'
#=========USER END================
# + colab={} colab_type="code" id="xC6rpeySjX8c"
image_list = []
for filename in glob.glob(path):
image_list.append(filename)
for i in image_list:
c = cv2.imread(i)
#=========PUT YOUR CODE HERE==========
#=====================================
x = extract_bv(c)
#x = cv2.bitwise_not(x)
kernel = np.ones((5,5),np.uint8)
#x = cv2.erode(x,kernel,iterations = 1)
x = cv2.dilate(x,kernel,iterations = 1)
x = cv2.erode(x,kernel,iterations = 1)
kernel1 = np.ones((3,3),np.uint8)
kernel2 = np.ones((11,11),np.uint8)
#x = cv2.morphologyEx(x, cv2.MORPH_OPEN, kernel2)
x = cv2.dilate(x,kernel,iterations = 1)
x = cv2.erode(x,kernel,iterations = 3)
x = cv2.morphologyEx(x, cv2.MORPH_CLOSE, kernel2)
x = cv2.dilate(x,kernel,iterations = 1)
#ShowResizedIm(x,"หี",2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#break
cv2.imwrite(save_path+ i.replace(path.replace("/*.jpg","")+"\\",""),x)
# -
| .ipynb_checkpoints/Blood extraction2v-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 05 : Advantageous Actor-Critic (AAC) - demo
#
# +
import torch
import torch.nn as nn
torch.manual_seed(torch.randint(10000,())) # random seed for pythorch random generator
import time
import numpy as np
import os
import pickle
import gym
import matplotlib
import matplotlib.pyplot as plt
from IPython import display
from collections import namedtuple
import random
from itertools import count
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
# -
# # Dataset
# +
#Env parameters
env_seed = 1
render = True # display on
render = False # display off
#Initialize the environment with the same seed/initialization value
env = gym.make('CartPole-v0')
env.seed(env_seed)
#Reset the environment
state = env.reset()
print('init state:',state)
#Rollout one episode until it finishes
for t in count():
action = torch.LongTensor(1).random_(0,2).item() # randomly generated action=a in {0,1}
state, reward, done, _ = env.step(action) # receive next state=s' and reward=r
print('t=',t, 'action=',action, 'state=',np.array_str(state, precision=5), 'reward=',reward, 'done=',done )
if render:
env.render() # see the state
if done:
break
# -
# # Define the policy network
# +
# class of policy network
class ActorCritic_NN(nn.Module):
def __init__(self, net_parameters):
super(ActorCritic_NN, self).__init__()
input_dim = net_parameters['input_dim']
hidden_dim = net_parameters['hidden_dim']
output_dim = net_parameters['output_dim']
# policy network
self.fc1_p = nn.Linear(input_dim, hidden_dim)
self.fc2_p = nn.Linear(hidden_dim, output_dim)
# action-value function network
self.fc1_q_a = nn.Linear(input_dim, hidden_dim)
self.fc2_q_a = nn.Linear(hidden_dim, output_dim)
# state-value function network
self.fc1_q_s = nn.Linear(input_dim, hidden_dim)
self.fc2_q_s = nn.Linear(hidden_dim, 1)
def forward_policy(self, x):
x = torch.relu(self.fc1_p(x))
actions_score = self.fc2_p(x)
actions_prob = torch.softmax(actions_score, dim=1)
return actions_prob
def forward_Q_a(self, x):
x = torch.relu(self.fc1_q_a(x))
Q_scores = self.fc2_q_a(x) # scores over actions
return Q_scores
def forward_Q_s(self, x):
x = torch.relu(self.fc1_q_s(x))
Q_scores = self.fc2_q_s(x) # scores over actions
return Q_scores
def select_action(self, state): # select action w/ policy network
probs = self.forward_policy(state) # probability of action a in state s
bernoulli_sampling = torch.distributions.Categorical(probs)
action = bernoulli_sampling.sample() # sample action a with Bernoulli sampling
return action
def loss(self, batch):
gamma = opt_parameters['gamma']
nb_episodes_per_batch = len(batch.states)
batch_losses = []
for episode in range(nb_episodes_per_batch):
episode_states = torch.stack( batch.states[episode] ).float() #size=B x 4
episode_next_states = torch.stack( batch.next_states[episode] ).float() #size=B x 4
episode_actions = torch.stack( batch.actions[episode] ).long() #size=B
episode_rewards = - torch.stack( batch.rewards[episode] ).float() #size=B
episode_dones = torch.stack( batch.dones[episode] ).float() #size=B
R = 0; policy_loss = []; rewards = []
for r in batch.rewards[episode][::-1]: # compute the discarded award at each time step
R = r + gamma * R
rewards.insert(0, R)
episode_discounted_rewards = torch.tensor(rewards).float() #size=B
episode_next_actions = self.select_action(episode_next_states) #size=B
Q = self.forward_Q_a(episode_states).gather(dim=1,index=episode_actions.unsqueeze(1)) # Qv(a|s), size=B x 1
Q_target = episode_rewards.unsqueeze(1) + gamma * \
self.forward_Q_a(episode_next_states).gather(dim=1,index=episode_next_actions.unsqueeze(1)) * episode_dones.unsqueeze(1)
Q_state = self.forward_Q_s(episode_states)
logP = torch.log( actorcritic_net.forward_policy(episode_states).gather(dim=1,index=episode_actions.unsqueeze(1)) )
loss1 = ( -logP * (Q-Q_state).detach() ).mean()
loss2 = nn.MSELoss()(Q,Q_target.detach())
loss3 = nn.MSELoss()(Q_state,episode_discounted_rewards.unsqueeze(1).detach())
loss = loss1 + loss2 + loss3
batch_losses.append(loss)
loss = torch.stack(batch_losses).mean()
return loss
# class of rollout episodes
class Rollout_Episodes():
def __init__(self,):
super(Rollout_Episodes, self).__init__()
def rollout_batch_episodes(self, env, opt_parameters, actorcritic_net, write_memory=True):
# storage structure of all episodes (w/ different lengths)
nb_episodes_per_batch = opt_parameters['nb_episodes_per_batch']
env_seeds = opt_parameters['env_seed']
batch = DotDict()
batch.states=[]; batch.actions=[]; batch.next_states=[]; batch.rewards=[]; batch.dones=[]
batch_episode_lengths = []
for episode in range(nb_episodes_per_batch):
states=[]; actions=[]; next_states=[]; rewards=[]; dones = []
env.seed(env_seeds[episode].item()) # start with random seed
state = env.reset() # reset environment
for t in range(1000): # rollout one episode
state_pytorch = torch.from_numpy(state).float().unsqueeze(0) # state=s
action = actorcritic_net.select_action(state_pytorch).item() # select action=a from state=s
next_state, reward, done, _ = env.step(action) # receive next state=s' and reward=r
done_mask = 0.0 if done else 1.0
states.append(torch.tensor(state))
actions.append(torch.tensor(action))
next_states.append(torch.tensor(next_state))
rewards.append(torch.tensor(reward))
dones.append(torch.tensor(done_mask))
state = next_state
if done:
batch_episode_lengths.append(t)
break
batch.states.append(states)
batch.actions.append(actions)
batch.next_states.append(next_states)
batch.rewards.append(rewards)
batch.dones.append(dones)
return batch_episode_lengths, batch
# network parameters
net_parameters = {}
net_parameters['input_dim'] = 4
net_parameters['hidden_dim'] = 128
net_parameters['output_dim'] = 2
# instantiate network
actorcritic_net = ActorCritic_NN(net_parameters)
print(actorcritic_net)
# instantiate rollout
rollout_policy_net = Rollout_Episodes()
# optimization parameters
opt_parameters = {}
opt_parameters['nb_episodes_per_batch'] = 3
opt_parameters['env_seed'] = torch.LongTensor(opt_parameters['nb_episodes_per_batch']).random_(1,10000)
env = gym.make('CartPole-v0')
batch_episode_lengths, batch = rollout_policy_net.rollout_batch_episodes(env, opt_parameters, actorcritic_net)
#print('batch:',batch)
print('batch_episode_lengths:',batch_episode_lengths)
# -
# # Test forward pass
# +
# optimization parameters
opt_parameters = {}
opt_parameters['lr'] = 0.001
opt_parameters['nb_episodes_per_batch'] = 3
opt_parameters['env_seed'] = torch.LongTensor(opt_parameters['nb_episodes_per_batch']).random_(1,10000)
opt_parameters['gamma'] = 0.99
batch_episode_lengths, batch = rollout_policy_net.rollout_batch_episodes(env, opt_parameters, actorcritic_net)
#print('batch:',batch)
print('batch_episode_lengths:',batch_episode_lengths)
# -
# # Test backward pass
# +
# Loss
loss = actorcritic_net.loss(batch)
print('loss:',loss)
# Backward pass
lr = opt_parameters['lr']
optimizer = torch.optim.Adam(actorcritic_net.parameters(), lr=lr)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# -
# # Train one epoch
# +
def train_one_epoch(env, actorcritic_net, opt_parameters):
"""
train one epoch
"""
actorcritic_net.train()
epoch_loss = 0
nb_data = 0
epoch_episode_length = 0
epoch_episode_lengths = []
nb_batches_per_epoch = opt_parameters['nb_batches_per_epoch']
for iter in range(nb_batches_per_epoch):
batch_episode_lengths, batch = \
rollout_policy_net.rollout_batch_episodes(env, opt_parameters, actorcritic_net)
loss = actorcritic_net.loss(batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
nb_data += len(batch_episode_lengths)
epoch_episode_length += torch.tensor(batch_episode_lengths).float().sum()
epoch_episode_lengths.append(epoch_episode_length)
epoch_loss /= nb_data
epoch_episode_length /= nb_data
return epoch_loss, epoch_episode_length, epoch_episode_lengths
# -
# # Train NN
# +
# network parameters
net_parameters = {}
net_parameters['input_dim'] = 4
net_parameters['hidden_dim'] = 256
net_parameters['output_dim'] = 2
# instantiate network
actorcritic_net = ActorCritic_NN(net_parameters)
print(actorcritic_net)
# optimization parameters
opt_parameters = {}
opt_parameters['lr'] = 0.0005
opt_parameters['nb_episodes_per_batch'] = 1
opt_parameters['nb_batches_per_epoch'] = 50
opt_parameters['env_seed'] = torch.LongTensor(opt_parameters['nb_episodes_per_batch']).random_(1,10000)
opt_parameters['gamma'] = 0.99
optimizer = torch.optim.Adam(actorcritic_net.parameters(), lr=opt_parameters['lr'] )
# select maximum episode length to learn
env = gym.make('CartPole-v0')
env._max_episode_steps = 400 # 200 400
env.spec.reward_threshold = 0.975* env._max_episode_steps
print('env._max_episode_steps',env._max_episode_steps)
# train loop
all_epoch_lengths = []
start = time.time()
for epoch in range(500):
# train one epoch
epoch_train_loss, epoch_episode_length, epoch_episode_lengths = train_one_epoch(env, actorcritic_net, opt_parameters)
# stop training when reward is high
if epoch_episode_length > env.spec.reward_threshold:
print('Training done.')
print("Last episode length is {}, epoch is {}".
format(epoch_episode_length, epoch))
break
# print intermediate info
if not epoch%1:
print('Epoch: {}, time: {:.4f}, train_loss: {:.4f}, episode_length: {:.4f}'.format(epoch, time.time()-start, epoch_train_loss, epoch_episode_length))
# plot all epochs
all_epoch_lengths.append(epoch_episode_length)
if not epoch%1:
plt.figure(2)
plt.title('Training...')
plt.xlabel('Epochs')
plt.ylabel('Length of episodes batch')
plt.plot(torch.Tensor(all_epoch_lengths).numpy())
plt.pause(0.001)
display.clear_output(wait=True)
# -
# Final plot
plt.figure(2)
plt.title('Training...')
plt.xlabel('Epochs')
plt.ylabel('Length of episodes batch')
plt.plot(torch.Tensor(all_epoch_lengths).numpy())
print("Last episode length is {}, epoch is {}".format(epoch_episode_length, epoch))
# # Run it longer
# +
env._max_episode_steps = 5000
state = env.reset() # reset environment
for t in range(env._max_episode_steps): # rollout one episode until it finishes or stop after 200 steps
state_pytorch = torch.from_numpy(state).float().unsqueeze(0) # state=s
action = actorcritic_net.eval().select_action(state_pytorch).item()
state, reward, done, _ = env.step(action) # receive next state=s' and reward=r
env.render() # visualize state
if done:
print(t)
break
# -
| codes/labs_lecture15/lab05_AAC/AAC_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/medinadiegoeverardo/DS-Unit-2-Regression-Classification/blob/master/module4/4_medinadiego_assignment_regression_classification_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Regression & Classification, Module 4
#
#
# ## Assignment
#
# - [ ] Watch Aaron's [video #1](https://www.youtube.com/watch?v=pREaWFli-5I) (12 minutes) & [video #2](https://www.youtube.com/watch?v=bDQgVt4hFgY) (9 minutes) to learn about the mathematics of Logistic Regression.
# - [ ] [Sign up for a Kaggle account](https://www.kaggle.com/), if you don’t already have one. Go to our Kaggle InClass competition website. You will be given the URL in Slack. Go to the Rules page. Accept the rules of the competition.
# - [ ] Do train/validate/test split with the Tanzania Waterpumps data.
# - [ ] Begin with baselines for classification.
# - [ ] Use scikit-learn for logistic regression.
# - [ ] Get your validation accuracy score.
# - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
# ---
#
#
# ## Stretch Goals
#
# - [ ] Add your own stretch goal(s) !
# - [ ] Clean the data. For ideas, refer to [The Quartz guide to bad data](https://github.com/Quartz/bad-data-guide), a "reference to problems seen in real-world data along with suggestions on how to resolve them." One of the issues is ["Zeros replace missing values."](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values)
# - [ ] Make exploratory visualizations.
# - [ ] Do one-hot encoding. For example, you could try `quantity`, `basin`, `extraction_type_class`, and more. (But remember it may not work with high cardinality categoricals.)
# - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).
# - [ ] Get and plot your coefficients.
# - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
#
# ---
#
# ## Data Dictionary
#
# ### Features
#
# Your goal is to predict the operating condition of a waterpoint for each record in the dataset. You are provided the following set of information about the waterpoints:
#
# - `amount_tsh` : Total static head (amount water available to waterpoint)
# - `date_recorded` : The date the row was entered
# - `funder` : Who funded the well
# - `gps_height` : Altitude of the well
# - `installer` : Organization that installed the well
# - `longitude` : GPS coordinate
# - `latitude` : GPS coordinate
# - `wpt_name` : Name of the waterpoint if there is one
# - `num_private` :
# - `basin` : Geographic water basin
# - `subvillage` : Geographic location
# - `region` : Geographic location
# - `region_code` : Geographic location (coded)
# - `district_code` : Geographic location (coded)
# - `lga` : Geographic location
# - `ward` : Geographic location
# - `population` : Population around the well
# - `public_meeting` : True/False
# - `recorded_by` : Group entering this row of data
# - `scheme_management` : Who operates the waterpoint
# - `scheme_name` : Who operates the waterpoint
# - `permit` : If the waterpoint is permitted
# - `construction_year` : Year the waterpoint was constructed
# - `extraction_type` : The kind of extraction the waterpoint uses
# - `extraction_type_group` : The kind of extraction the waterpoint uses
# - `extraction_type_class` : The kind of extraction the waterpoint uses
# - `management` : How the waterpoint is managed
# - `management_group` : How the waterpoint is managed
# - `payment` : What the water costs
# - `payment_type` : What the water costs
# - `water_quality` : The quality of the water
# - `quality_group` : The quality of the water
# - `quantity` : The quantity of water
# - `quantity_group` : The quantity of water
# - `source` : The source of the water
# - `source_type` : The source of the water
# - `source_class` : The source of the water
# - `waterpoint_type` : The kind of waterpoint
# - `waterpoint_type_group` : The kind of waterpoint
#
# ### Labels
#
# There are three possible values:
#
# - `functional` : the waterpoint is operational and there are no repairs needed
# - `functional needs repair` : the waterpoint is operational, but needs repairs
# - `non functional` : the waterpoint is not operational
#
# ---
#
# ## Generate a submission
#
# Your code to generate a submission file may look like this:
#
# ```python
# # estimator is your model or pipeline, which you've fit on X_train
#
# # X_test is your pandas dataframe or numpy array,
# # with the same number of rows, in the same order, as test_features.csv,
# # and the same number of columns, in the same order, as X_train
#
# y_pred = estimator.predict(X_test)
#
#
# # Makes a dataframe with two columns, id and status_group,
# # and writes to a csv file, without the index
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission['status_group'] = y_pred
# submission.to_csv('your-submission-filename.csv', index=False)
# ```
#
# If you're working locally, the csv file is saved in the same directory as your notebook.
#
# If you're using Google Colab, you can use this code to download your submission csv file.
#
# ```python
# from google.colab import files
# files.download('your-submission-filename.csv')
# ```
#
# ---
# + colab_type="code" id="o9eSnDYhUGD7" outputId="1ad38afd-b72e-4990-ed82-b8a8c638f240" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
# !git pull origin master
# Install required python packages
# !pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
# + colab_type="code" id="ipBYS77PUwNR" colab={}
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + colab_type="code" id="QJBD4ruICm1m" colab={}
# Read the Tanzania Waterpumps data
# train_features.csv : the training set features
# train_labels.csv : the training set labels
# test_features.csv : the test set features
# sample_submission.csv : a sample submission file in the correct format
import pandas as pd
train_features = pd.read_csv('../data/waterpumps/train_features.csv')
train_labels = pd.read_csv('../data/waterpumps/train_labels.csv')
test_features = pd.read_csv('../data/waterpumps/test_features.csv')
sample_submission = pd.read_csv('../data/waterpumps/sample_submission.csv')
assert train_features.shape == (59400, 40)
assert train_labels.shape == (59400, 2)
assert test_features.shape == (14358, 40)
assert sample_submission.shape == (14358, 2)
# + id="HK_f2ZiIA2CT" colab_type="code" outputId="2919363f-3efb-4648-8c19-c0fe52521799" colab={"base_uri": "https://localhost:8080/", "height": 422}
train_features.head()
# + colab_type="code" outputId="e316c6d1-5d49-44b7-b966-f85c1e9d79e0" id="c-XWQLEYu3Ik" colab={"base_uri": "https://localhost:8080/", "height": 201}
train_labels.head() # undo later
# + id="w9mayNQoapet" colab_type="code" colab={}
mode_map = {'functional': 0, 'non functional': 1, 'functional needs repair': 2}
train_labels['status_group'] = train_labels['status_group'].replace(mode_map)
# train_labels.head()
# functional: 0
# non functional: 1
# functional needs repair: 2
# + colab_type="code" id="2Amxyx3xphbb" colab={}
from sklearn.model_selection import train_test_split
x_training, x_validation = train_test_split(train_features, random_state=10)
# + id="nuZeFLeibYYj" colab_type="code" colab={}
# train_labels.status_group.value_counts(normalize=True)
# + [markdown] id="Ex-FFD3vSMIE" colab_type="text"
# ### Adding y_variable to training and validation
# + id="RvCkpdWs6Gmi" colab_type="code" colab={}
# splitting train_labels to add them to x_training and x_validation!
# + id="bBq2l0SX5r7J" colab_type="code" colab={}
training_y_labels, validation_y_labels = train_test_split(train_labels, random_state=10)
# + id="_6SX0s9etuRj" colab_type="code" colab={}
x_training = x_training.merge(training_y_labels, on='id')
x_validation = x_validation.merge(validation_y_labels, on='id')
# + id="WPcJPmVRmpM1" colab_type="code" outputId="cf8f27c7-703a-496e-b90d-f88408e3637e" colab={"base_uri": "https://localhost:8080/", "height": 404}
x_training.head()
# + id="gh90pZQT6I-2" colab_type="code" colab={}
# both have the same shape as their corresponding sets
# + id="KIp_hsB96n7e" colab_type="code" outputId="873939de-d36d-4628-9d9a-defc7fd28d03" colab={"base_uri": "https://localhost:8080/", "height": 49}
print(training_y_labels.shape)
print(validation_y_labels.shape)
# + id="g4fY139pTB9n" colab_type="code" outputId="d2a6f7b9-acfe-404f-a69c-290cc53c1ae5" colab={"base_uri": "https://localhost:8080/", "height": 49}
print(x_training.shape)
print(x_validation.shape)
# + [markdown] id="erG6i95ciVZo" colab_type="text"
# ### Baselines
# + id="qxQHqznZj-X2" colab_type="code" outputId="0da75da5-5ce3-441c-f658-19ae55951abd" colab={"base_uri": "https://localhost:8080/", "height": 49}
y_train = x_training['status_group']
y_train.mode()
# + id="6GLI_RL_Y4bQ" colab_type="code" outputId="a0abe9ed-e06a-4946-dd37-ed9a44a8bbdf" colab={"base_uri": "https://localhost:8080/", "height": 32}
from sklearn.metrics import mean_absolute_error
# baseline
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train) # both have to be in Series
mae = mean_absolute_error(y_train, y_pred) # for it to work
print('MAE. Not accuracy metric: ' + str(mae))
# + id="2MFkfrBjmljw" colab_type="code" outputId="8dc0dd25-8d1e-4d54-9ae6-cd0fa332d8e9" colab={"base_uri": "https://localhost:8080/", "height": 422}
x_validation.head()
# + id="y8nEf1hwh-mT" colab_type="code" outputId="7fef65f4-b0e8-4499-c24c-6c584bd4b312" colab={"base_uri": "https://localhost:8080/", "height": 49}
from sklearn.metrics import accuracy_score
acurracy_s = accuracy_score(y_train, y_pred)
print('Training accuracy score: ', str(acurracy_s))
# how much does it differ from validation dataset?
y_val = x_validation['status_group']
majority_class_2 = y_val.mode()
y_predict = [majority_class_2] * len(y_val)
ac_v = accuracy_score(y_val, y_predict)
print('Validation accuracy score: ', str(ac_v))
# + [markdown] id="RZm-QqbDk9NO" colab_type="text"
# ### Linear Reg
# + id="_qJlvbicsh53" colab_type="code" outputId="9af04b64-d7ea-4e90-9e22-38b9d367d470" colab={"base_uri": "https://localhost:8080/", "height": 98}
features = ['id', 'construction_year', 'longitude', 'latitude']
x_training[features].isnull().sum()
# 0 null values, no need to use imputer for lin reg?
# + id="dsYG5ggepiha" colab_type="code" outputId="86de9d2a-27eb-40b3-d219-c211b26bee66" colab={"base_uri": "https://localhost:8080/", "height": 49}
from sklearn.linear_model import LinearRegression
model_linear = LinearRegression()
# not using imputer, encoder here
# reminder!
# y_train = x_training['status_group']
# y_val = x_validation['status_group']
features = ['id', 'construction_year', 'longitude', 'latitude']
x_train = x_training[features] # these for now
x_val = x_validation[features]
# In Lecture, training feature was used (population)
# y_train = y_training['status_group']
# y_validation = y_validation['status_group']
model_linear.fit(x_train, y_train) # not train_labels
model_linear.predict(x_val)
# + id="DKEX1Vgsth93" colab_type="code" outputId="9ddbf201-4076-42e6-d97c-00b57fada104" colab={"base_uri": "https://localhost:8080/", "height": 32}
model_linear.coef_
# + [markdown] id="UKyfMVAO0pIp" colab_type="text"
# ### Logistic, Imputing, and encoding
# + id="eo56H8LOn9nI" colab_type="code" outputId="9831f0f4-d64e-41fc-cd2e-6a2a3fe7786b" colab={"base_uri": "https://localhost:8080/", "height": 962}
x_training.describe(include=['O']).T
# + id="Ih8xosTauegp" colab_type="code" colab={}
# reducing training column cardinality
date_recorded_top = x_training['date_recorded'].value_counts()[:50].index
x_training.loc[~x_training['date_recorded'].isin(date_recorded_top), 'date_recorded'] = 'N/A'
funder_top = x_training['funder'].value_counts()[:50].index
x_training.loc[~x_training['funder'].isin(funder_top), 'funder'] = 'N/A'
ward_top = x_training['ward'].value_counts()[:50].index
x_training.loc[~x_training['ward'].isin(ward_top), 'ward'] = 'N/A'
installer_top = x_training['installer'].value_counts()[:50].index
x_training.loc[~x_training['installer'].isin(installer_top), 'installer'] = 'N/A'
scheme_name_top = x_training['scheme_name'].value_counts()[:50].index
x_training.loc[~x_training['scheme_name'].isin(scheme_name_top), 'scheme_name'] = 'N/A'
# + id="TfpZIWgl1fm7" colab_type="code" colab={}
# reducing validation column cardinality
funder_top_v = x_validation['funder'].value_counts()[:50].index
x_validation.loc[~x_validation['funder'].isin(funder_top_v), 'funder'] = 'N/A'
funder_top_v = x_validation['funder'].value_counts()[:50].index
x_validation.loc[~x_validation['funder'].isin(funder_top_v), 'funder'] = 'N/A'
ward_top_v = x_validation['ward'].value_counts()[:50].index
x_validation.loc[~x_validation['ward'].isin(ward_top), 'ward'] = 'N/A'
installer_top_v = x_validation['installer'].value_counts()[:50].index
x_validation.loc[~x_validation['installer'].isin(installer_top_v), 'installer'] = 'N/A'
scheme_name_top_v = x_validation['scheme_name'].value_counts()[:50].index
x_validation.loc[~x_validation['scheme_name'].isin(scheme_name_top_v), 'scheme_name'] = 'N/A'
# + id="MxlxN4X-0uha" colab_type="code" colab={}
# dropping those with extremely high cardinality
to_drop = ['wpt_name', 'subvillage']
x_training = x_training.drop(to_drop, axis=1)
x_validation = x_validation.drop(to_drop, axis=1)
# + id="nh52FNxqyB3I" colab_type="code" colab={}
# tried to write a function..
# def changing_cardinality(column, number_of_card, placeholder):
# filtered = df[column].value_counts()[:number_of_card].index
# changed = [df.loc[~df[column].isin(filtered), column] == placeholder]
# return changed
# x_training['date_recorded'] = x_training['date_recorded'].apply(changing_cardinality('date_recorded', 50, 'N/A'))
# + id="6eJCEYUBo9Gp" colab_type="code" outputId="654ddf01-faba-43eb-9e09-c0fe323d7f7d" colab={"base_uri": "https://localhost:8080/", "height": 243}
x_training.describe(include=['O'])
# + id="YPRVYXrbosUS" colab_type="code" colab={}
# all_columns = x_training.describe(include=['O'])
# total_col_list = list(all_columns.columns)
total_col_list = ['date_recorded', 'funder', 'installer', 'region', 'ward', 'recorded_by', 'scheme_management','scheme_name', 'water_quality', 'quality_group', 'quantity',
'quantity_group', 'source', 'source_type', 'source_class', 'waterpoint_type', 'waterpoint_type_group']
# numerics
numerics = ['longitude', 'latitude', 'region_code', 'district_code', 'population', 'construction_year']
total_col_list.extend(numerics)
# + id="OKQTe1CV1NHi" colab_type="code" colab={}
# total_col_list
# + id="pk-_T7ZlkHlm" colab_type="code" outputId="1332fb44-0bfa-41f5-d4ba-62dda97aea17" colab={"base_uri": "https://localhost:8080/", "height": 32}
from sklearn.linear_model import LogisticRegression
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
# model_log = LogisticRegressionCV(cv=5, n_jobs=-1, random_state=42)
model_log = LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=300)
imputer = SimpleImputer()
encoder = ce.OneHotEncoder(use_cat_names=True)
scaler = StandardScaler()
# reminder!
# y_train = x_training['status_group']
# y_val = x_validation['status_group']
features = total_col_list
x_train = x_training[features]
x_val = x_validation[features]
x_train_encoded = encoder.fit_transform(x_train)
x_train_imputed = imputer.fit_transform(x_train_encoded)
x_train_scaled = scaler.fit_transform(x_train_imputed)
x_val_encoded = encoder.transform(x_val)
x_val_imputed = imputer.transform(x_val_encoded)
x_val_scaled = scaler.transform(x_val_imputed)
model_log.fit(x_train_scaled, y_train)
print('Validation accuracy score', model_log.score(x_val_scaled, y_val))
# + id="lBy2HV5r6GNa" colab_type="code" outputId="b751c9aa-5c64-4741-a25d-af3063d0f662" colab={"base_uri": "https://localhost:8080/", "height": 32}
X_test = test_features[features]
X_test_encoded = encoder.transform(X_test)
X_test_imputed = imputer.transform(X_test_encoded)
X_test_scaled = scaler.transform(X_test_imputed)
y_pred = model_log.predict(X_test_scaled)
print(y_pred)
# + id="iqyjvL9R7V8K" colab_type="code" colab={}
submission = sample_submission.copy()
submission['status_group'] = y_pred
# + id="rYEYmPqr7uDG" colab_type="code" colab={}
mode_map = {0: 'functional', 1: 'non functional', 2: 'functional needs repair'}
submission['status_group'] = submission['status_group'].replace(mode_map)
# + id="yl5bqWtX7tKA" colab_type="code" colab={}
submission.to_csv('medinadiegokaggle.csv', index=False)
from google.colab import files
files.download('medinadiegokaggle.csv')
| module4/4_medinadiego_assignment_regression_classification_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Facebook Prophet
# FbProphet is a robust library for time series data analysis and forecast, developed by facebook's core data science team. It is based on Generalized Additive Model(GAM).There are 3 major component of it:
# - Trend Component {<b>g(t)</b>}
# - Seasonal Component {<b>s(t)</b>}
# - holiday component {<b>h(t)</b>}
#
# <b>y(t)=g(t)+s(t)+h(t)+E</b>, E=error caused by unsual changes not accomodated by model
import pandas as pd
from fbprophet import Prophet
data=pd.read_csv("data.csv")
data.head()
data=data[['Date','close']]
data.columns=['ds','y']
data.ds=pd.to_datetime(data.ds)
data.head()
data.shape
data.isna().sum()
import matplotlib.pyplot as plt
plt.figure(figsize=(12,8))
plt.plot(data.set_index(['ds']))
# ## Smoothing the curve by resampling it in week frequency.
data.set_index(['ds'],inplace=True)
data.y=data.y.resample("W").mean()
data.dropna(inplace=True)
data.head(10)
plt.figure(figsize=(12,8))
plt.plot(data)
# ### Columns names should be 'ds' and 'y' to be prophet training compatible. Here, ds represent datestamp and y represents training value
data['ds']=data.index
data.head()
#Prophet model
model=Prophet(n_changepoints=35,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
changepoint_prior_scale=0.4).add_seasonality(
name='yearly',
period=365.25,
fourier_order=10)
model.fit(data)
# ## Inference
future=model.make_future_dataframe(periods=60,freq="W")
forecast=model.predict(future)
fig=model.plot(forecast) #The model seems to fit well with our data
# #### Graphs below give good insights of our data.
fig2=model.plot_components(forecast)
# ## In cells below, lets divide our data into training and test set, and see how our model performs on test data.
data.shape
data_train=data.iloc[:300].copy()
data_train.tail()
model2=Prophet(n_changepoints=35,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
changepoint_prior_scale=0.4).add_seasonality(
name='yearly',
period=365.25,
fourier_order=10)
model2.fit(data_train)
future=model2.make_future_dataframe(periods=79,freq="W") #test count=79
forecast=model.predict(future)
fig3=model2.plot(forecast) ##The model worked well for test data, graphs look similar to the original one.
| FbProphet_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#combat(dat, batch, mod, numCovs=None)
# +
from sklearn import decomposition
from sklearn import datasets
import matplotlib as mpl
import matplotlib.cm as cm
import combat as cb
from scipy.spatial.distance import pdist
np.random.seed(314)
def make_pca_plot(X, y, axis, title=""):
# reduce gene matrix to 2 dimensions for plotting
pca = decomposition.PCA(n_components=2)
pca.fit(X)
X_trans = pca.transform(X)
# find nice colors and plot
norm = mpl.colors.Normalize(vmin=np.min(y), vmax=np.max(y))
m = cm.ScalarMappable(norm=norm, cmap=cm.spectral)
axis.scatter(X_trans[:, 0], X_trans[:, 1], c=m.to_rgba(y), edgecolor='none')
# find batch centers and plot them
all_batch_reps = []
for val in np.unique(y):
Z = X_trans[y==val, : ]
Z = X_trans[np.ix_((y==val))]
rep = np.mean(Z, axis=0)
all_batch_reps.append(rep)
axis.add_artist(plt.Circle(rep, 5, color=m.to_rgba(val)))
axis.set_title(title)
legend(numpoints=1)
# get total distance between all batch representatives
all_batch_reps = np.array(all_batch_reps)
return np.sum(pdist(all_batch_reps))
data = pd.read_csv("bladder-expr.txt", sep="\t")
pheno = pd.read_csv("bladder-pheno.txt", sep="\t")
corr = cb.combat(data, pheno["batch"])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,6))
total_batch_dist = make_pca_plot(data.values.T, pheno["batch"], ax1, "Before Batch Correction")
total_batch_dist_corr = make_pca_plot(corr.values.T, pheno["batch"], ax2, "After Batch Correction")
print(total_batch_dist)
print(total_batch_dist_corr)
plt.show()
# -
| hello_genomics/test pycombat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The role of notebooks
#
# Notebooks are great for illustrations and examples that at the same time serve as integration tests.
# In this library template, notebooks will be executed with pytest (thus on every
# commit in your CI/CD pipeline). The results of the executions will be saved to the docs directory and converted to
# static websites through nbconvert. The static websites are then added to the documentation under the
# _Guides and Tutorials_ section. These websites will be deployed to gitlab/github pages on push to develop.
# For azure-devops, the CI pipeline currently does not include a deployment of the pages - pull requests are welcome :)
# ## Before running the notebook
#
# Install the library and its dependencies with, if you haven't done so already
# ```
# pip install -e .
# ```
# from the root directory. You can also execute this command directly in the notebook but will need to reload the
# kernel afterwards
# + pycharm={"name": "#%%\n"}
# Here an illustration of your library
from {{cookiecutter.project_name}}.sample_package.sample_module import hello_stranger
hello_stranger()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Interactive Documentation
#
# Note that since notebooks are rendered to html+javascript, you can embed interactive components like maps, videos and
# widgets into your documentation, as long as the interaction does not require re-execution of cells.
# Below an example of an interactive map created with plotly
# + pycharm={"name": "#%%\n"}
# slightly adjusted example from https://plotly.com/python/lines-on-mapbox/
import pandas as pd
import plotly.express as px
us_cities = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/us-cities-top-1k.csv")
us_cities = us_cities.query("State in ['New York', 'Ohio']")
fig = px.scatter_mapbox(us_cities, lat="lat", lon="lon", color="State", zoom=3, height=300)
fig.update_layout(mapbox_style="stamen-terrain", mapbox_zoom=4, mapbox_center_lat = 41,
margin={"r":0,"t":0,"l":0,"b":0})
fig.show()
# + pycharm={"name": "#%%\n"}
| {{cookiecutter.project_name}}/notebooks/library_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:udacity]
# language: python
# name: conda-env-udacity-py
# ---
# +
# analyse data from csv. how can I improve it?
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# smooth function, adapted from scipy formula at http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat':
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return pd.Series(y)
columns = ['Center Image','Left Image','Right Image','Steering Angle','Throttle', 'Break', 'Speed']
df = pd.read_csv('driving_log.csv', names=columns)
df.columns
df.describe()
# +
from datetime import datetime
def to_time(s):
s = s.replace('/Users/lucasosouza/Documents/CarND/P3/IMG/center_', '')
s = s.replace('.jpg', '')
s = datetime.strptime(s, '%Y_%m_%d_%H_%M_%S_%f')
return s
def seconds_in_timedelta(td):
return td.seconds
seconds_in_timedelta(to_time(df.iloc[0, 0]) - to_time(df.iloc[0, 0]))
# -
df['time'] = df['Center Image'].apply(to_time)
df['time_diff'] = (df['time'] - df['time'].shift(1)).fillna(value=0).apply(seconds_in_timedelta)
plt.ylim((0,1))
df['time_diff'].plot()
## need to divide into groups
df['group'] = 0
i = 1
for row in df.iterrows():
if row[1]['time_diff'] > 0:
i+=1
df.ix[row[0], 'group'] = i
df.head()
vc = df['group'].value_counts()
len(vc), vc.mean()
vc[5:7]
# 131 recordings, with an average of 134 frames each (that is about 13 seconds average)
binwidth=100
plt.ylim(0,5)
plt.hist(vc, bins=range(min(vc), max(vc) + binwidth, binwidth));
# df[df['group']==vc.iloc[0]]['Steering Angle']
plt.figure(figsize=(12,6))
df.ix[df['group']==vc.index[0]]['Steering Angle'].plot()
# sns.lmplot(data=df.ix[df['group']==vc.index[0], :], y='Steering Angle', x='time_diff')
plt.figure(figsize=(12,6))
df.ix[df['group']==vc.index[1]]['Steering Angle'].plot()
angles = df.ix[df['group']==vc.index[1]]['Steering Angle']
plt.figure(figsize=(12,6))
pd.Series(smooth(angles, window_len=100))[:vc[1]].plot()
# ### Converting the data and exporting
# +
## steps
## convert data for each group
## save it in a separate column
## save it in pickle file
# -
smooth_window_len=30
for group, count in zip(vc.index, vc):
if count>smooth_window_len/2:
smoothed = smooth(df.ix[df['group']==group, 'Steering Angle'], window_len=smooth_window_len)[:count]
df.ix[df['group']==group, 'Smoothed Angle'] = smoothed
else:
df.ix[df['group']==group, 'Smoothed Angle'] = df.ix[df['group']==group, 'Steering Angle']
#print(df.ix[df['group']==group, 'Smoothed Angle'].shape, smoothed.shape) # = smooth
df[['Smoothed Angle', 'Steering Angle']].describe()
plt.figure(figsize=(12,6))
smooth(df.ix[(df['group']==vc.index[2]) & (df['Steering Angle']<0), 'Steering Angle'], window_len=10).plot()
plt.figure(figsize=(12,6))
smooth(df.ix[(df['group']==vc.index[3]) & (df['Steering Angle']>0), 'Steering Angle'], window_len=10).plot()
sns.violinplot(df.ix[(df['Steering Angle'] < .9) & (df['Steering Angle'] > -.9), 'Steering Angle'])
# cut sharp angles == all angles above .9
sns.violinplot(df.ix[df['Steering Angle'] != 0, 'Steering Angle'])
| P3-Behavorial Cloning/dataAnalysisv2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Calculate trajectory properties
# #### List of tasks accomplished in this Jupyter Notebook:
# - Add the size of each larvae into the trajectory CSV file
# - Convert pixel locations to mm
# - Zero out any off-screen values to the limits of the behavior arena
# - Calculate instantaneous speed in mm per second
# - Calculate instananeous angle (from horizontal, countercounterclockwise, in degrees)
# - Calculate instantaneous change in heading (angle)
# - Calculate the predicted concentration of chemical in the arena experienced by the larva at each time point
# - Calculate the change in concentration relative to the previous timestep
import numpy as np
import pandas as pd
import scipy.interpolate
import os
# +
def get_larva_size(animal, master_df):
''' Reference a master dataframe to find the size of each animal '''
temp = master_df[master_df["animal_ID"] == animal]
# Check that there is only one animal with this ID number
assert len(temp) == 1
mm = temp["larvae_length_mm"].values
# Check that the size given by the master dataframe is a single number
assert mm.size == 1
return mm[0]
def hypotenuse(x1, y1, x0, y0):
''' Returns the length of the straight line vector between two points '''
hyp = np.hypot(x1-x0, y1-y0)
return hyp
def get_angle(x1, y1, x0, y0):
''' Calculate the angle from horizontal, counterclockwise '''
angle = np.rad2deg(np.arctan2(y1-y0, x1-x0))
return angle
def get_angle_delta(ang1, ang0):
if (str(ang1) == 'nan') | (str(ang0) == 'nan'):
return np.nan
diff = ang1-ang0
if diff >= 180:
diff = -1 * (360-diff)
elif diff <= -180:
diff = 360 + diff
return diff
def get_angle_delta_vector(angles1, angles0):
diffs = []
for ang1, ang0 in zip(angles1, angles0):
diffs.append(get_angle_delta(ang1, ang0))
return diffs
def get_bin(x, y):
''' Return the bin ID (rounded to nearest mm) '''
# Bins are numbered starting at 0 so we lower to the number below:
bin_x = min(79, np.floor(x))
bin_y = min(29, np.floor(y))
bin_val = bin_y*80 + bin_x
bin_text = "bin_"+str(int(bin_val))
return bin_text
def get_bin_vector(xs, ys):
vals = []
for x, y in zip(xs, ys):
vals.append(get_bin(x, y))
return vals
def get_concentration(bin_text, frame, ref_df):
''' Return the expected concentration experienced by the larva '''
f = ref_df["frames"].values
c = ref_df[bin_text].values
interp = scipy.interpolate.interp1d(f, c, kind='linear')
frame = min(max(f), frame)
return interp(frame)
def get_concentration_vector(bin_texts, frames, ref_df):
vals = []
for bin_text, frame in zip(bin_texts, frames):
vals.append(get_concentration(bin_text, frame, ref_df))
return vals
# +
master_df = pd.read_csv("./data/experiment_IDs/cleaned_static_data.csv")
master_df = master_df[master_df['dead'] != 'yes']
animals = master_df["animal_ID"].values
# Dataframe to use in finding the concentration values
ref_df = pd.read_csv("./data/fluorescein/bin_concentration_by_time_no_larvae.csv")
fps = 2 # frames per second in video
sh = 1 # direction in which to shift vector for delta calculations
walldist = 10 # maximum mm away from wall to count as "next to wall"
for animal in animals:
aID = animal[:9]
pos = animal[10:]
read = "./data/trajectories/video_csvs/"
save = "./data/trajectories/video_calculations/"
for val in ["A", "E"]:
readname = read + aID + "-" + val + "-" + pos + ".csv"
savename = save + aID + "-" + val + "-" + pos + ".csv"
if not os.path.isfile(savename):
try:
df = pd.read_csv(readname)
# Add the size of each larvae into the trajectory CSV file
size = get_larva_size(animal, master_df)
df['larvae_length_mm'] = size
# Convert pixel locations to mm using known arena dimensions
# (80 x 30 mm arena)
df['pos_x_mm'] = 80*df['position_x']/df['pixel_width']
df['pos_y_mm'] = 30*df['position_y']/df['pixel_height']
# Zero out any off-screen values to the limits of the behavior arena
df['pos_x_mm'] = df['pos_x_mm'].clip(lower=0, upper=80)
df['pos_y_mm'] = df['pos_y_mm'].clip(lower=0, upper=30)
# Calculate instantaneous speed in mm per second
# Mltiply distance by fps to get speed per second
df["speed_mm_s"] = hypotenuse(df['pos_x_mm'], df['pos_y_mm'],
df['pos_x_mm'].shift(sh), df['pos_y_mm'].shift(sh))*fps
# Calculate speed in body lengths per second
df["speed_BL"] = df['speed_mm_s']/size
# Calculate instananeous angle (from horizontal, counterclockwise, in degrees)
df["angle_counterclock"] = get_angle(df['pos_x_mm'], df['pos_y_mm'],
df['pos_x_mm'].shift(sh), df['pos_y_mm'].shift(sh))
# Calculate instantaneous change in heading (angle)
df["angle_delta"] = get_angle_delta_vector(df["angle_counterclock"],
df["angle_counterclock"].shift(1))
# Get the unique bin ID per time frame
df["bin_ID"] = get_bin_vector(df["pos_x_mm"], df["pos_y_mm"])
# Calculate the expected concentration
df["concentration"] = get_concentration_vector(df["bin_ID"], df["frames"], ref_df)
df["concentration_delta"] = df["concentration"] - df["concentration"].shift(1)
# Calculate if larvae are moving
df["moving"] = df["speed_mm_s"] >= 1
# Calculate if larvae is executing a turn
df["turn"] = abs(df["angle_delta"]) >= 30
df.to_csv(savename, index=None)
except:
print(readname)
print("--- All files finished ---")
# -
| 2_calculate_trajectory_properties.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/21_export_map_to_html_png.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
#
# Uncomment the following line to install [geemap](https://geemap.org) if needed.
# +
# # !pip install geemap
# -
# # Google Earth Engine Python Tutorials
#
# * GitHub: https://github.com/giswqs/geemap
# * Notebook examples: https://github.com/giswqs/geemap/blob/master/examples/README.md#tutorials
# * Video tutorials: https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3
#
#
# **Tutorial 21 - How to export Earth Engine maps as HTML and images**
# ## Import libraries
import ee
import geemap
# ## Video tutorial on YouTube
geemap.show_youtube('h0pz3S6Tvx0')
# ## Update the geemap package
#
# If you run into errors with this notebook, please uncomment the line below to update the [geemap](https://github.com/giswqs/geemap#installation) package to the latest version from GitHub.
# Restart the Kernel (Menu -> Kernel -> Restart) to take effect.
# +
# geemap.update_package()
# -
# ## Create an interactive map
Map = geemap.Map(toolbar_ctrl=True, layer_ctrl=True)
Map
# +
# Add Earth Engine dataset
dem = ee.Image('USGS/SRTMGL1_003')
landcover = ee.Image("ESA/GLOBCOVER_L4_200901_200912_V2_3").select('landcover')
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003')
states = ee.FeatureCollection("TIGER/2018/States")
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5'],
}
# Add Earth Engine layers to Map
Map.addLayer(dem, vis_params, 'SRTM DEM', True, 0.5)
Map.addLayer(landcover, {}, 'Land cover')
Map.addLayer(
landsat7,
{'bands': ['B4', 'B3', 'B2'], 'min': 20, 'max': 200, 'gamma': 1.5},
'Landsat 7',
)
Map.addLayer(states, {}, "US States")
# -
# ## Exporting maps as HTML
#
# You can either click the camera icon on toolbar to export maps or use the following script.
import os
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
html_file = os.path.join(download_dir, 'my_map.html')
Map.to_html(outfile=html_file, title='My Map', width='100%', height='880px')
# ## Exporting maps as PNG/JPG
#
# Make sure you click the fullscreen button on the map to maximum the map.
png_file = os.path.join(download_dir, 'my_map.png')
Map.to_image(outfile=png_file, monitor=1)
jpg_file = os.path.join(download_dir, 'my_map.jpg')
Map.to_image(outfile=jpg_file, monitor=1)
| examples/notebooks/21_export_map_to_html_png.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: qiskitdevl
# language: python
# name: qiskitdevl
# ---
# # Quantum Teleportation
# This notebook demonstrates quantum teleportation. We first use Qiskit's built-in simulator to test our quantum circuit, and then try it out on a real quantum computer.
# ## The concept
# Alice wants to send quantum information to Bob. Specifically, suppose she wants to send the state
# $\vert\psi\rangle = \alpha\vert0\rangle + \beta\vert1\rangle$
# to Bob. This entails passing on information about $\alpha$ and $\beta$ to Bob.
#
# There exists a theorem in quantum mechanics which states that you cannot simply make an exact copy of an unknown quantum state. This is known as the no-cloning theorem. As a result of this we can see that Alice can't simply generate a copy of $\vert\psi\rangle$ and give the copy to Bob. Copying a state is only possible with a classical computation.
#
# However, by taking advantage of two classical bits and entanglement, Alice can transfer the state $\vert\psi\rangle$ to Bob. We call this teleportation as at the end Bob will have $\vert\psi\rangle$ and Alice won't anymore. Let's see how this works in some detail.
# ## How does quantum teleportation work?
# **Step 1**: Alice and Bob create an entangled pair of qubits and each one of them holds on to one of the two qubits in the pair.
#
# The pair they create is a special pair called a Bell pair. In quantum circuit language, the way to create a Bell pair between two qubits is to first transfer one of them to the Bell basis ($|+\rangle$ and $|-\rangle$) by using a Hadamard gate, and then to apply a CNOT gate onto the other qubit controlled by the one in the Bell basis.
#
# Let's say Alice owns $q_1$ and Bob owns $q_2$ after they part ways.
#
# **Step 2**: Alice applies a CNOT gate on $q_1$, controlled by $\vert\psi\rangle$ (the qubit she is trying to send Bob).
#
# **Step 3**: Next, Alice applies a Hadamard gate to $|\psi\rangle$, and applies a measurement to both qubits that she owns - $q_1$ and $\vert\psi\rangle$.
#
# **Step 4**: Then, it's time for a phone call to Bob. She tells Bob the outcome of her two qubit measurement. Depending on what she says, Bob applies some gates to his qubit, $q_2$. The gates to be applied, based on what Alice says, are as follows :
#
# 00 $\rightarrow$ Do nothing
#
# 01 $\rightarrow$ Apply $X$ gate
#
# 10 $\rightarrow$ Apply $Z$ gate
#
# 11 $\rightarrow$ Apply $ZX$ gate
#
# *Note that this transfer of information is classical.*
#
# And voila! At the end of this protocol, Alice's qubit has now teleported to Bob.
# ## How will we test this result on a real quantum computer?
# In this notebook, we will give Alice a secret state $\vert\psi\rangle$. This state will be generated by applying a series of unitary gates on a qubit that is initialized to the ground state, $\vert0\rangle$. Go ahead and fill in the secret unitary that will be applied to $\vert0\rangle$ before passing on the qubit to Alice.
secret_unitary = 'hz'
# If the quantum teleportation circuit works, then at the output of the protocol discussed above will be the same state passed on to Alice. Then, we can undo the applied secret_unitary (by applying its conjugate transpose), to yield the $\vert0\rangle$ that we started with.
#
# We will then do repeated measurements of Bob's qubit to see how many times it gives 0 and how many times it gives 1.
# ### What do we expect?
# In the ideal case, and assuming our teleportation protocol works, we will always measure 0 from Bob's qubit because we started off with $|0\rangle$.
#
# In a real quantum computer, errors in the gates will cause a small fraction of the results to be 1. We'll see how it looks.
# ## 1. Simulating the teleportation protocol
# make the imports that are necessary for our work
import qiskit as qk
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit import execute, Aer
from qiskit import IBMQ
from qiskit.tools.visualization import plot_histogram
# simple function that applies a series of unitary gates from a given string
def apply_secret_unitary(secret_unitary, qubit, quantum_circuit, dagger):
functionmap = {
'x':quantum_circuit.x,
'y':quantum_circuit.y,
'z':quantum_circuit.z,
'h':quantum_circuit.h,
't':quantum_circuit.t,
}
if dagger: functionmap['t'] = quantum_circuit.tdg
if dagger:
[functionmap[unitary](qubit) for unitary in secret_unitary]
else:
[functionmap[unitary](qubit) for unitary in secret_unitary[::-1]]
# +
# Create the quantum circuit
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
''' Qubit ordering as follows (classical registers will just contain measured values of the corresponding qubits):
q[0]: qubit to be teleported (Alice's first qubit. It was given to her after the application of a secret unitary
which she doesn't know)
q[1]: Alice's second qubit
q[2]: Bob's qubit, which will be the destination for the teleportation
'''
# Apply the secret unitary that we are using to generate the state to teleport. You can change it to any unitary
apply_secret_unitary(secret_unitary, q[0], qc, dagger = 0)
qc.barrier()
# Next, generate the entangled pair between Alice and Bob (Remember: Hadamard followed by CX generates a Bell pair)
qc.h(q[1])
qc.cx(q[1], q[2])
qc.barrier()
# Next, apply the teleportation protocol.
qc.cx(q[0], q[1])
qc.h(q[0])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.cx(q[1], q[2])
qc.cz(q[0], q[2])
qc.barrier()
'''
In principle, if the teleportation protocol worked, we have q[2] = secret_unitary|0>
As a result, we should be able to recover q[2] = |0> by applying the reverse of secret_unitary
since for a unitary u, u^dagger u = I.
'''
apply_secret_unitary(secret_unitary, q[2], qc, dagger=1)
qc.measure(q[2], c[2])
# -
# It's always a good idea to draw the circuit that we have generated in code. Let's draw it below.
qc.draw(output='mpl')
# +
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(qc, backend, shots=1024)
sim_result = job_sim.result()
measurement_result = sim_result.get_counts(qc)
print(measurement_result)
plot_histogram(measurement_result)
# -
# **Note that the results on the x-axis in the histogram above are ordered as $c_2c_1c_0$. We can see that only results where $c_2 = 0$ appear, indicating that the teleporation protocol has worked.**
# ## 2. Teleportation on a real quantum computer
# You will now see how the teleportation algorithm works on a real quantum computer. Recall that we need one qubit for $\vert\psi\rangle$, one qubit for Alice, and one qubit for Bob, for a total of three qubits.
# First, see what devices we are allowed to use by loading our saved accounts
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
# +
# get the least-busy backend at IBM and run the quantum circuit there
from qiskit.providers.ibmq import least_busy
backend = least_busy(provider.backends(simulator=False))
job_exp = execute(qc, backend=backend, shots=8192)
exp_result = job_exp.result()
exp_measurement_result = exp_result.get_counts(qc)
print(exp_measurement_result)
plot_histogram(exp_measurement_result)
# -
# **As we see here, there are a few results that contain the case when $c_2 = 1$ in a real quantum computer. These arise due to errors in the gates that were applied. Another source of error is the way we're checking for teleportation - we need the series of operators on $q_2$ to be exactly the inverse unitary of those that we applied to $q_0$ at the beginning.**
#
# In contrast, our simulator in the earlier part of the notebook had zero errors in its gates, and allowed error-free teleportation.
error_rate_percent = sum([exp_measurement_result[result] for result in exp_measurement_result.keys() if result[0]=='1']) \
* 100./ sum(list(exp_measurement_result.values()))
print("The experimental error rate : ", error_rate_percent, "%")
| content/ch-algorithms/teleportation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="bDRpQj9bbGWH" colab={"base_uri": "https://localhost:8080/"} outputId="f8d32d6c-cc14-441f-f5b7-c3bdd8c75e9c"
from google.colab import drive
drive.mount('/content/drive')
# + id="2PcJMKv0l4Z8"
# ! pip install pyts
# + id="bpBOHVmfUoJF"
import scipy.io
import matplotlib.pyplot as plt
data = scipy.io.loadmat('/content/drive/My Drive/DATA.mat')
# + id="q5KNAwWVVkMm" outputId="3dfbcb94-b58a-4c3c-dcf6-12bd4632fb44" colab={"base_uri": "https://localhost:8080/"}
data.keys()
# + id="HmUC-tPuVwf_" outputId="8ffa85e6-15ce-4968-eaa0-d399c0acac80" colab={"base_uri": "https://localhost:8080/"}
H = data['H']
print(H.shape)
S = data['S']
print(S.shape)
CHAN = data['CHAN']
print(CHAN.shape)
Fs = data['Fs']
print(Fs.shape)
# + id="nHTiE4YRgL7f" outputId="03651d22-b84c-4c3b-9de7-599e5566a711" colab={"base_uri": "https://localhost:8080/"}
H = H.reshape(14)
H.shape
print(H.shape)
print(H[0].shape)
print(H[0][0].shape)
# + id="jUcyDOLiiQS3" outputId="0e1c9f8d-0992-41fd-ed6c-2df18acd0e9f" colab={"base_uri": "https://localhost:8080/"}
S = S.reshape(14)
print(S.shape)
print(S[0].shape)
print(S[0][0].shape)
# + id="n7vSFCw4r9Aj"
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import GramianAngularField
from pyts.image import MarkovTransitionField
normal=[]
for j in range(14):
for i in range(19):
X = H[j][i,1:15000]
X = X.reshape(1,-1)
# MTF transformation
# mtf = MarkovTransitionField(image_size=24)
# X_mtf = mtf.fit_transform(X)
gasf = GramianAngularField(image_size=112, method='summation')
X_gasf = gasf.fit_transform(X)
normal.append(X_gasf)
# + id="6injaSm3C6-R"
for j in range(14):
for i in range(19):
X = H[j][i,1:15000]
X = X.reshape(1,-1)
gasf = GramianAngularField(image_size=112, method='difference')
X_gasf = gasf.fit_transform(X)
normal.append(X_gasf)
# + id="zt4YSns2lTg1"
schizo=[]
for j in range(14):
for i in range(19):
X = S[j][i,1:15000]
X = X.reshape(1,-1)
gasf = GramianAngularField(image_size=112, method='summation')
X_gasf = gasf.fit_transform(X)
schizo.append(X_gasf)
# + id="VyTgoPTBDNX-"
for j in range(14):
for i in range(19):
X = S[j][i,1:15000]
X = X.reshape(1,-1)
gasf = GramianAngularField(image_size=112, method='difference')
X_gasf = gasf.fit_transform(X)
schizo.append(X_gasf)
# + id="f_i42nW3DWIk" outputId="d0d4a326-2ff3-4702-e857-494d33f7c851" colab={"base_uri": "https://localhost:8080/"}
#create labels
import numpy as np
n = np.zeros((532,))
s = np.ones((532,))
labels = np.concatenate((n,s), axis = 0)
print(len(labels))
print(np.unique(labels))
# + id="5RDZxXrhl4N5" outputId="ce6d59a2-a08f-428d-bd32-70421e0e9e0f" colab={"base_uri": "https://localhost:8080/"}
data = []
for img in normal:
data.append(img)
for img in schizo:
data.append(img)
len(data)
# + id="yc4GB4-2fHop"
data = np.array(data)
data = data.reshape(-1, 112, 112, 1)
labels = np.array(labels)
# + id="LKR7BB_E5Zm8" outputId="60f43432-11f1-475c-ef82-c950e19471a0" colab={"base_uri": "https://localhost:8080/"}
#split data into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(data, labels, test_size=0.2,
random_state=42,
shuffle=True,
stratify=labels)
print(X_train.shape)
print(X_test.shape)
# + id="tjDN5X7aewwR"
import tensorflow as tf
import cv2
def gabor_filter(x):
x = tf.cast(x, dtype=tf.float32)
# x = tf.image.rgb_to_grayscale(x)
params = {'ksize':(3, 3), 'sigma':1.0, 'theta': 0, 'lambd':5.0, 'gamma':0.02}
kernel = cv2.getGaborKernel(**params)
kernel = tf.expand_dims(kernel, 2)
kernel = tf.expand_dims(kernel, 3)
kernel = tf.cast(kernel, dtype=tf.float32)
return tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
gabor_layer = tf.keras.layers.Lambda(gabor_filter)
# + colab={"base_uri": "https://localhost:8080/"} id="9IdC2eloeyqZ" outputId="ade816b5-6d04-4901-a963-5145135c94ce"
#define model
import tensorflow as tf
from numpy import mean
from numpy import std
from scipy.io import loadmat
import numpy as np
from pandas import read_csv
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import Conv1D, Conv2D
from tensorflow.keras.layers import MaxPooling1D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
from matplotlib import pyplot
from sklearn.model_selection import StratifiedKFold, train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Input, Dense, LSTM, MaxPooling1D, Conv1D
from tensorflow.keras.models import Model
from tensorflow.keras import initializers
import cv2
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=(112, 112, 1)))
model.add(gabor_layer)
model.add(Conv2D(8, kernel_size=(3, 3), strides=(1, 1),
activation='relu'))
model.add(Conv2D(8, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(16, (2, 2), activation='relu'))
model.add(Conv2D(16, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (2, 2), activation='relu'))
model.add(Conv2D(32, (2, 2), activation='relu'))
# model.add(fft_layer)
model.add(Dropout(0.25))
model.add(Conv2D(64, (2, 2), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# from keras.utils import plot_model
# plot_model(model)
# + id="anrF_5h0Ez9p" outputId="b1f5605d-0014-4528-e92b-06800a147149" colab={"base_uri": "https://localhost:8080/"}
from tensorflow import keras
opt = keras.optimizers.Adam(learning_rate=0.001)
#compiling the CNN
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs=200, batch_size=4, verbose=1, validation_data=(X_test, Y_test))
# + id="6cuoyXSJL6VC" outputId="902d9607-9ef3-4d76-83bf-f51ab27ef223" colab={"base_uri": "https://localhost:8080/"}
from sklearn.metrics import classification_report
import numpy as np
y_pred = np.around(model.predict(X_test))
rep = classification_report(Y_test, y_pred)
print(rep)
| colab/train_wavelet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bmcs_env
# language: python
# name: bmcs_env
# ---
# # 5.1 Bond behavior governed by damage
#
# * Define a bond-slip law governed by damage and loading history using unloading.
# * What is different in comparison to elastic-plastic models?
# * For what materials is unloading to zero realistic?
# * Example of a CFRP behavior
# %matplotlib widget
import matplotlib.pyplot as plt
import ibvpy.api as ib
# ## Construct the bond slip model
bs1 = ib.MATSBondSlipMultiLinear(s_data="0,0.1,0.2,0.8", tau_data="0,8,4,1")
bs1.interact()
bs1 = ib.MATSBondSlipMultiLinear()
bs1.sim.tline.step=0.0005
bs1
bs1.mats_eval.omega_fn_type = 'jirasek'
bs1.mats_eval
bs1.mats_eval.omega_fn.trait_set(s_f = 0.001, plot_max=0.05)
bs1.mats_eval.omega_fn
bs1.mats_eval.omega_fn.plot(plt.axes())
bs1.w_max = 0.0015
bs1.loading_scenario.trait_set(loading_type='cyclic',
amplitude_type='increasing',
loading_range='symmetric',
)
bs1.loading_scenario.trait_set(number_of_cycles=10,
maximum_loading=1,
unloading_ratio =0.0)
bs1.loading_scenario.plot(plt.axes())
plt.grid(True)
bs1.sim.run()
ax = plt.axes()
ax.grid(True)
bs1.hist.plot_Pw(ax,1)
# ### Evolution of the damage
bs2 = BondSlipModel(mats_eval_type='damage')
bs2.sim.tline.step = 0.0005
bs2
bs2.mats_eval.omega_fn_type = 'abaqus'
bs2.mats_eval
bs2.mats_eval.omega_fn
bs2.mats_eval.omega_fn.plot(plt.axes())
bs2.loading_scenario.trait_set(loading_type='cyclic',
amplitude_type='increasing',
loading_range='symmetric',
)
bs2.loading_scenario.trait_set(number_of_cycles=8,
maximum_loading=0.0025,
unloading_ratio =0.0)
bs2.sim.run()
bs2.loading_scenario.plot(plt.axes())
plt.grid(True)
ax = plt.axes()
ax.grid(True)
bs2.hist.plot_Pw(ax,1)
# ## Tasks and Questions
# ### How to identify a damage function given an identified bond-slip curve experiment?
# ### Test the Abaqus damage function and the meaning of its parameters
| tour5_damage_bond/5_2_Damage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# %env CUDA_VISIBLE_DEVICES=2
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
import pandas as pd
from cuml.manifold.umap import UMAP as cumlUMAP
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms
# ### load data
DATASET_ID = 'batsong_segmented'
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'fruitbat.pickle'
syllable_df = pd.read_pickle(df_loc)
syllable_df[:3]
np.shape(syllable_df.spectrogram.values[0])
# ### project
specs = list(syllable_df.spectrogram.values)
specs = [i/np.max(i) for i in tqdm(specs)]
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
min_dist = 0.5
cuml_umap = cumlUMAP(min_dist = min_dist)
embedding = cuml_umap.fit_transform(specs_flattened)
syllable_df['umap'] = list(embedding)
fig, ax = plt.subplots()
ax.scatter(embedding[:,0], embedding[:,1], s=1, color='k', alpha = 0.005)
ax.set_xlim([-8,8])
ax.set_ylim([-8,8])
# ### Save
ensure_dir(DATA_DIR / 'embeddings' / DATASET_ID / 'full')
syllable_df.to_pickle(DATA_DIR / 'embeddings' / DATASET_ID / (str(min_dist) + '_full.pickle'))
| notebooks/02.5-make-projection-dfs/higher-spread/.ipynb_checkpoints/batsong-umap-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Vizualization of Spectrum Assignments
import numpy as np
import matplotlib.pyplot as plt
spectrum = np.loadtxt("Task2Code/V3.spec.out")
# +
plt.title('FCF versus Spectral Intensity : Spectrum')
plt.xlabel('Ep - E0')
plt.ylabel('FCF')
plt.plot(spectrum[:,0],spectrum[:,1], color='blue')
# -
sticks = np.loadtxt("Task2Code/V3.sticks.out")
# +
from matplotlib.collections import LineCollection
fig, ax = plt.subplots()
ax.set_xlim(left=-50, right=max(sticks[:,0])+100)
ax.set_ylim(0, max(sticks[:,1]) + 0.1)
y_start = np.zeros(len(sticks[:,0]))
starts = list(zip(sticks[:,0],y_start))
ends = list(zip(sticks[:,0],sticks[:,1]))
segs = []
for i in range(len(ends)):
segs.append([starts[i], ends[i]])
plt.title('FCF versus Spectral Intensity: Sticks')
plt.xlabel('Ep - E0')
plt.ylabel('FCF')
line_segments = LineCollection(segs, linewidths=(3, 3, 3, 3) )
ax.add_collection(line_segments)
plt.show()
| Project_3_Franck_Condon_Factors/Task2_viz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing
df= pd.read_csv(r'C:\Users\jhaar\Downloads\ICU_DATA__11.csv')
df.shape
df
df.describe()
df.info()
import seaborn as sns
sns.heatmap(df.isnull())
df.corr()['Occupancy']
y=df['Occupancy']
x=df.drop(['Occupancy'],axis=1)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=0.2)
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression(random_state=0,solver='lbfgs')
classifier.fit(x_train,y_train)
from sklearn.metrics import accuracy_score
score
from sklearn.neighbors import KNeighborsClassifier
list_1=[]
for i in range(1,11):
knn=KNeighborsClassifier(n_neighbors=i)
knn.fit(x_train,y_train)
pred_1=knn.predict(x_test)
scores=accuracy_score(y_test,pred_1)
list_1.append(scores)
import matplotlib.pyplot as plt
plt.plot(range(1,11),list_1)
plt.xlabel('k values')
plt.ylabel('accuracy scores')
plt.show()
# +
#from the given figure k=3 gives best accuracy score
# -
print(max(list_1))
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier()
rfc.fit(x_train,y_train)
pred_2=rfc.predict(x_test)
score_2=accuracy_score(y_test,pred_2)
score_2
# +
#from all the model random forest classifier gives the best accuracy score
# -
new_df=pd.DataFrame({'actual':y_test,
'predicted':pred_2})
new_df
| ICU BED PRECDICTION (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/zerotodeeplearning/ztdl-masterclasses/blob/master/notebooks/Sentiment_Classification_with_Recurrent_Neural_Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="2bwH96hViwS7"
# #### Copyright 2020 Catalit LLC.
# + colab={} colab_type="code" id="bFidPKNdkVPg"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="DvoukA2tkGV4"
# # Sentiment Classification with Recurrent Neural Networks
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="Pg-dOwjgius9" outputId="c3c0da53-3c77-4665-ea87-373dd504cdfb"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import gzip
import os
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# -
# Data loading and prepping is the same as in the [Word Embeddings class](https://github.com/zerotodeeplearning/ztdl-masterclasses#word-embeddings).
url = "https://raw.githubusercontent.com/zerotodeeplearning/ztdl-masterclasses/master/data/"
# +
pos_path = tf.keras.utils.get_file(
'rotten_tomatoes_positive_reviews.txt',
url + 'rotten_tomatoes_positive_reviews.txt.gz',
extract=True)
neg_path = tf.keras.utils.get_file(
'rotten_tomatoes_negative_reviews.txt',
url + 'rotten_tomatoes_negative_reviews.txt.gz',
extract=True)
with gzip.open(pos_path) as fin:
pos_rev = fin.readlines()
pos_rev = [r.decode('utf-8') for r in pos_rev]
with gzip.open(neg_path) as fin:
neg_rev = fin.readlines()
neg_rev = [r.decode('utf-8') for r in neg_rev]
docs = np.array(pos_rev + neg_rev)
y = np.array([1]*len(pos_rev) + [0]*len(neg_rev))
docs_train, docs_test, y_train, y_test = train_test_split(docs, y, test_size=0.15, random_state=0)
# -
max_features = 20000
# +
tokenizer = Tokenizer(
num_words=max_features,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`\'{|}~\t\n',
lower=True,
split=" ",
char_level=False,
oov_token=None,
document_count=0,
)
tokenizer.fit_on_texts(docs_train)
# -
seq_train = tokenizer.texts_to_sequences(docs_train)
seq_test =tokenizer.texts_to_sequences(docs_test)
# +
maxlen=58
X_train = pad_sequences(seq_train, maxlen=maxlen)
X_test = pad_sequences(seq_test, maxlen=maxlen)
# -
# ### Exercise 1
#
# Let's build a model that leverages recurrent layers to classify sentiment.
#
# - Define a new `Sequential` model that uses `LSTM` or `GRU` layers after the `Embedding` layer
# - Start with the simplest model possible and gradually increase the complexity
# - Train the model and compare the performance of the models developed in the [Word Embeddings class](https://github.com/zerotodeeplearning/ztdl-masterclasses#word-embeddings) with this one.
#
# Your code will look like:
#
# ```python
# model = Sequential([
# Embedding(# YOUR CODE HERE
# # YOUR CODE HERE
# ])
# ```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Dense, Dropout, LSTM, GRU
| notebooks/Sentiment_Classification_with_Recurrent_Neural_Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def mine():
a=['http://www.17k.com/book/2397656.html','http://www.17k.com/book/2689736.html']
for i in range(2):
url=a[i]
import requests
header ={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3423.2 Safari/537.36'}
response = requests.get(url,headers=header)
with open('C:\\Users\\dell\\Desktop\\ccc'+str(i)+'.html','a',encoding='utf-8')as f:
f.write(response.text)
print(i)
mine()
| text5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import argparse, os
import cv2
import numpy as np
import imageio
import time
def savePic(picture,fileName,extention,outPath):
outPath = outPath+fileName+'.'+extention # combines the path with the name and extention of the file
print(outPath)
try:
#imageio.imwrite(outPath,picture,format=extention)# old way
cv2.imwrite(outPath,picture)#saves Pictures
except:
print('Failed while saving picture: '+fileName+' to '+ outPath+' sorry :(') #writes an error
print('--------------------')
def readPicture(picturepath):
# open ImageObject
img = cv2.imread(picturepath, cv2.IMREAD_UNCHANGED)#cv2.IMREAD_UNCHANGED is important that the output is (x,y,ChannelRGB)
#print(img.shape)
#alternative
#img=imageio.imread(picturepath) #liest Bild von picturepath
return(img)
def RGBtoYUV(img): #changeing the img picture from RGB- to YUV-Color space
pictureYUV = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
return pictureYUV
path = input('Path to pictures who should be converted defaut: ./hdrInput/: ') or './hdrInput/'
if not os.path.exists(path):
os.mkdir(path)
inputextention = input('What fileextention do the to converting pictures have? [default: png]') or 'png'
allFilesInDir = sum(1 for f in os.listdir(path) if f.endswith('.'+inputextention)) #summ all ending with extention
print('There are: '+str(allFilesInDir)+' files with the extention '+inputextention+' in the folder')
outputextention = input('Please type in the output format default: png ') or 'png'
####This file creates Y.png U.png and V.png cromagan pictures
#TO DO normal Pic to YUV einzelbildern
allFilesInDir = sum(1 for f in os.listdir(path) if f.endswith('.'+inputextention)) #summ all ending with extention
i = 1 # start at the 0th element in the file system
while (i <= allFilesInDir): # for all the inputdata in the folder do
name = os.listdir(path)[i] #finding the name at the current position and save it
picpath = path + name #combining filename and path
print(picpath) # prints the path and filename
pic = readPicture(picpath) # imports the picture and saves it in pic as matrix
print('waring, Lossy RGB to YU-V conversion')
#print(pic.shape)
yuvPic = RGBtoYUV(pic)#.astype(np.uint8) # converts the picture to YUV
u = (yuvPic[:,:,2]) #orders the color channels to the right output value
v = (yuvPic[:,:,1])
y = (yuvPic[:,:,0])
namePic = (name.split('.')[0]) #cuts out the extention
if (inputextention == 'png'):
outputpath = './sdrOut/'
if (inputextention != 'png'):
outputpath = './hdrOut/'
savePic(u,(str(i)+'-u_'+namePic),outputextention,outputpath)#saves final U singel channel Picture
savePic(v,(str(i)+'-v_'+namePic),outputextention,outputpath)#saves final v singel channel Picture
savePic(y,(str(i)+'-y_'+namePic),outputextention,outputpath)#saves final Y singel channel Picture
i = i + 1
print(name.split('.')[0])
| convertPicturesToYUV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Random Forest model for Healthcare Fraud Detection
# ### <NAME>, <NAME> and <NAME>
#
# This notedbook describes development of a random forest classification model to detect potentially fraudulent healthcare providers.
# Import pandas package and scikitlearn metrics reports. Read in file created by provider_inout_mods. Also read in the mean residual by provider created by notebook 'Provider_claim_data_regression' and is described elsewhere.
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
x_train_inout_mod = pd.read_csv('x_train_inout_mod.csv')
provider_reimb_residuals=pd.read_csv('provider_groups_residual.csv')
provider_reimb_residuals =provider_reimb_residuals.drop(columns=['Unnamed: 0'])
provider_reimb_residuals.columns
# Rename columns in provider_reimb_residuals and drop extra column brought in with file read-in. Merge the two files on provider to combine the data.
provider_reimb_residuals.columns=['Provider','MeanResidualReimbursement','logOPAnnualReimbursement',\
'logIPAnnualReimbursement','$PerClaimDay','total_claim']
x_train_inout_mod = x_train_inout_mod.drop(columns = 'Unnamed: 0')
x_train_inout_mod = pd.merge(x_train_inout_mod,provider_reimb_residuals,on='Provider')
# We also bring in data from the market basket analysis, which found a higher fraction of diabetes and ischemic heart patients for fraudulent providers. We include that as a feature in this model.
diabetes_frac = pd.read_csv('diabetes_frac.csv')
diabetes_frac.columns
diabetes_frac = diabetes_frac.drop(columns = ['Unnamed: 0'])
x_train_inout_mod = pd.merge(x_train_inout_mod,diabetes_frac, on='Provider')
# Confirm all columns are numeric, except 'Provider;.
import numpy as np
x_train_inout_mod.select_dtypes(exclude='number').columns
# Move 'PotentialFraud" label data to target array. Drop from features matrix in next cell.
x_train_inout_mod.columns
# We create the target or response array and confirm here that we have the same number of fraudulent providers across the training dataset.
y = x_train_inout_mod['PotentialFraud']
y.value_counts()
# We drop the 'PotentialFraud' column since it is the target column.
X = x_train_inout_mod.drop(columns = ['PotentialFraud'])
# Import test_train_split from sklearn and split matrices into training and test sets for validation.
from sklearn import model_selection as ms
X_train, X_test, y_train, y_test = ms.train_test_split(X, y,
test_size=0.20, random_state=42)
# Create provider_claim matrices to be able to merge later to perform cost calculations. Scale train and test columns from original X matrix. Model will be scaled from this.
# +
X = X.drop(columns=['Provider','total_claim'])
provider_claim_trn=X_train[['Provider','total_claim']]
X_train=X_train.drop(columns=['Provider','total_claim'])
X_train=(X_train-X.min(axis=0))/(X.max(axis=0)-X.min(axis=0))
print(X_train.shape)
provider_claim_test=X_test[['Provider','total_claim']]
X_test=X_test.drop(columns=['Provider','total_claim'])
X_test=(X_test-X.min(axis=0))/(X.max(axis=0)-X.min(axis=0))
print(X_test.shape)
# -
# Confirm there are no more NAs.
X_test=X_test.fillna(0)
c = np.sum(X_test.isnull())
c[c>0]
# Import ensamble model and create instance of random forest model. Run first instance. Previous trials with weighting that running with class_weight equal to balanced and then slightly underweighting with the sample_weight option in the fit gave better results.
# rfparams_dict = {}
from sklearn import ensemble
randomForest = ensemble.RandomForestClassifier()
randomForest.set_params(class_weight = 'balanced',random_state=42, n_estimators=110, max_features=15, \
min_samples_leaf = 12, min_samples_split=3,criterion='gini',oob_score=True)
sample_weight = np.array([1 if x==0 else 0.9 for x in y_train])
randomForest.fit(X_train, y_train,sample_weight=sample_weight) # fit
print(confusion_matrix(y_test, randomForest.predict(X_test)))
print(classification_report(y_test, randomForest.predict(X_test)))
randomForest
# Run a cross-validation grid search to optimize parameter settings.
# %%time
from sklearn.model_selection import GridSearchCV
grid_para_forest = [{
"n_estimators": range(80,151,25),
"criterion": ["gini",'entropy'],
"min_samples_leaf": range(12,31,5),
"min_samples_split": range(2,9,2),
"random_state": [42],
'max_features':range(8,21,4)}]
grid_search_forest = GridSearchCV(randomForest, grid_para_forest, scoring='f1_weighted', cv=5, n_jobs=3)
grid_search_forest.fit(X_train, y_train)
bst_prm = grid_search_forest.best_params_
randomForest.set_params(class_weight = 'balanced',min_samples_split=bst_prm['min_samples_split'],random_state=42,
n_estimators=bst_prm['n_estimators'], max_features=bst_prm['max_features'], \
criterion = bst_prm['criterion'], min_samples_leaf = bst_prm['min_samples_leaf'])
randomForest.fit(X_train, y_train,sample_weight=None)
print(confusion_matrix(y_test, randomForest.predict(X_test)))
print(classification_report(y_test, randomForest.predict(X_test)))
# We print out the set of best parameters and compare their performance against the prior 'naive' model. We see the F1 score has dropped slightly. We also see the model parameter selection has tended more toward overfitting with the smallest number of samples per leaf and samples per split chosen. We notice the grid search chose entropy loss.
print(bst_prm)
print(confusion_matrix(y_test, randomForest.predict(X_test)))
print(classification_report(y_test, randomForest.predict(X_test)))
# We choose to stay with the original parameters (e.g. 'gini' loss function, instead of entropy) and other selections. We know that the performance of the random forest is also dependent on the random number generator. To introduce a measure of noise into the model training we fit the model for various values of the random state, and then save the F1 score, the confusion matrix, and a dataframe of labeled feature importances for each iteration, to allow a more representative view of feature importances.
sample_weight = np.array([1 if x==0 else 0.9 for x in y_train])
rndm_score_dict = {}
for i in range(8):
rnint = np.random.randint(0,1000000)
randomForest.set_params(bootstrap=True, ccp_alpha=0.0, class_weight='balanced',
criterion='gini', max_depth=None, max_features=15,
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=12, min_samples_split=3,
min_weight_fraction_leaf=0.0, n_estimators=110,
n_jobs=None, oob_score=True, random_state=rnint, verbose=0,
warm_start=False)
randomForest.fit(X_train, y_train,sample_weight=sample_weight)
print(confusion_matrix(y_test, randomForest.predict(X_test)))
print(classification_report(y_test, randomForest.predict(X_test)))
rndm_score_dict[rnint]=[confusion_matrix(y_test, randomForest.predict(X_test)),\
''.join([classification_report(y_test, randomForest.predict(X_test))[x] for x in range(148,152)]),\
pd.DataFrame(list(zip(X_train.columns, randomForest.feature_importances_))).sort_values(by = 1, ascending=False)]
# Here we calculate a composite confusion matrix (easier for me to read) to understand the true range of likely performance in classification. We see an average F1 score of 0.64, and identification of 80 of the 105 fraudulent providers in the test set.
import statistics
med_true_neg = statistics.median([rndm_score_dict[x][0][0][0] for x in rndm_score_dict.keys()])
std_true_neg = np.std([rndm_score_dict[x][0][0][0] for x in rndm_score_dict.keys()])
med_false_pos = statistics.median([rndm_score_dict[x][0][0][1] for x in rndm_score_dict.keys()])
std_false_pos = np.std([rndm_score_dict[x][0][0][1] for x in rndm_score_dict.keys()])
med_false_neg = statistics.median([rndm_score_dict[x][0][1][0] for x in rndm_score_dict.keys()])
std_false_neg = np.std([rndm_score_dict[x][0][1][0] for x in rndm_score_dict.keys()])
med_true_pos = statistics.median([rndm_score_dict[x][0][1][1] for x in rndm_score_dict.keys()])
std_true_pos = np.std([rndm_score_dict[x][0][1][1] for x in rndm_score_dict.keys()])
med_f1 = statistics.median([float(rndm_score_dict[x][1]) for x in rndm_score_dict.keys()])
std_f1 = np.std([float(rndm_score_dict[x][1]) for x in rndm_score_dict.keys()])
# print(med_f1)
print(' median, std F1 score for fraud ',(med_f1,std_f1))
print(' true neg false pos')
print((med_true_neg,std_true_neg),(med_false_pos,std_false_pos))
print(' false neg true pos')
print((med_false_neg,std_false_neg),(med_true_pos,std_true_pos))
# Here we calculate the average feature importance across all the random number iterations, from the feature importance dataframes created in each iteration. We then view the bottom 20 (lowest feature importance) features for the model.
RF_Feature_Imp_Ave = rndm_score_dict[187403][2][[0]]
for key in rndm_score_dict.keys():
RF_Feature_Imp_Ave = pd.merge(RF_Feature_Imp_Ave,rndm_score_dict[key][2], on=0)
RF_Feature_Imp_Ave['RF_Feature_Imp_Ave']=RF_Feature_Imp_Ave.mean(axis=1)
RF_Feature_Imp_Ave = RF_Feature_Imp_Ave.sort_values(by='RF_Feature_Imp_Ave', ascending=False)
RF_Feature_Imp_Ave = RF_Feature_Imp_Ave.drop(columns=['1_x','1_y','1_x','1_y','1_y','1_y'])
RF_Feature_Imp_Ave.tail(20)
# We did use the RFECV (reduced feature engine, cross-validate) but found in several instances that it would remove features that had been quite important in the feature importance tables created in the prior step. For that reason we removed this step and reduced the features by simply removing the bottom 25 features with the lowest average feature importance.
# +
# # %%time
# sample_weight = np.array([1 if x==0 else 0.9 for x in y_train])
# randomForest.set_params(bootstrap=True, ccp_alpha=0.0, class_weight='balanced',
# criterion='gini', max_depth=None, max_features=15,
# max_leaf_nodes=None, max_samples=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=12, min_samples_split=3,
# min_weight_fraction_leaf=0.0, n_estimators=110,
# n_jobs=None, oob_score=True, random_state=rnint, verbose=0,
# warm_start=False)
# from sklearn.feature_selection import RFECV
# rfecv = RFECV(randomForest, step=1, min_features_to_select=15, cv=3, scoring='f1_weighted', verbose=0, \
# n_jobs=3)
# rfecv = rfecv.fit(X_train, y_train)
# a = [X_train.columns[i] for i in range(len(X_train.columns)) if rfecv.support_[i]]
# rfminfeatures = rfecv.estimator_
# lilx_train = X_train[a]
# rfminfeatures.fit(lilx_train, y_train)
# lilx_test= X_test[a]
# print(' 0 1 predicted is columns')
# print(confusion_matrix(y_test, rfminfeatures.predict(lilx_test)))
# print(classification_report(y_test, rfminfeatures.predict(lilx_test)))
# -
RF_Feature_Imp_Ave.to_csv('rf_feature_importance.csv')
# Here we identify the features (bottom 25 by average feature importance) to be removed from the reduced feature model.
a = RF_Feature_Imp_Ave.tail(25)
drop_list = list(a[0])
drop_list
# We remove the bottom 25 features and then iterate across multiple random numbers to generate an average F1 score, average confusion matrix and average feature importance for the reduced model. We see the average F1 score for the reduced feature model remains unchanged, as does the average confusion matrix performance.
# +
X_train_reduced = X_train.drop(columns=drop_list)
X_test_reduced = X_test.drop(columns=drop_list)
sample_weight = np.array([1 if x==0 else 0.9 for x in y_train])
rndm_score_red_dict = {}
for i in range(8):
rnint = np.random.randint(0,1000000)
randomForest.set_params(bootstrap=True, ccp_alpha=0.0, class_weight='balanced',
criterion='gini', max_depth=None, max_features=15,
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=12, min_samples_split=3,
min_weight_fraction_leaf=0.0, n_estimators=110,
n_jobs=None, oob_score=True, random_state=rnint, verbose=0,
warm_start=False)
randomForest.fit(X_train_reduced, y_train,sample_weight=sample_weight)
# print(confusion_matrix(y_test, randomForest.predict(X_test_reduced)))
# print(classification_report(y_test, randomForest.predict(X_test_reduced)))
rndm_score_red_dict[rnint]=[confusion_matrix(y_test, randomForest.predict(X_test_reduced)),\
''.join([classification_report(y_test, randomForest.predict(X_test_reduced))[x] for x in range(148,152)]),\
pd.DataFrame(list(zip(X_train_reduced.columns, randomForest.feature_importances_))).sort_values(by = 1, ascending=False)]
med_true_neg = statistics.median([rndm_score_red_dict[x][0][0][0] for x in rndm_score_red_dict.keys()])
std_true_neg = np.std([rndm_score_red_dict[x][0][0][0] for x in rndm_score_red_dict.keys()])
med_false_pos = statistics.median([rndm_score_red_dict[x][0][0][1] for x in rndm_score_red_dict.keys()])
std_false_pos = np.std([rndm_score_red_dict[x][0][0][1] for x in rndm_score_red_dict.keys()])
med_false_neg = statistics.median([rndm_score_red_dict[x][0][1][0] for x in rndm_score_red_dict.keys()])
std_false_neg = np.std([rndm_score_red_dict[x][0][1][0] for x in rndm_score_red_dict.keys()])
med_true_pos = statistics.median([rndm_score_red_dict[x][0][1][1] for x in rndm_score_red_dict.keys()])
std_true_pos = np.std([rndm_score_red_dict[x][0][1][1] for x in rndm_score_red_dict.keys()])
med_f1 = statistics.median([float(rndm_score_red_dict[x][1]) for x in rndm_score_red_dict.keys()])
std_f1 = np.std([float(rndm_score_red_dict[x][1]) for x in rndm_score_red_dict.keys()])
# print(med_f1)
print('Metrics for reduced random forest on test set, minus bottom 25 features')
print(len(X_train_reduced.columns))
print(' median, std F1 score for fraud ',(med_f1,std_f1))
print(' true neg false pos')
print((med_true_neg,std_true_neg),(med_false_pos,std_false_pos))
print(' false neg true pos')
print((med_false_neg,std_false_neg),(med_true_pos,std_true_pos))
print('metrics for train set with reduced features')
print(confusion_matrix(y_train, randomForest.predict(X_train_reduced)))
print(classification_report(y_train, randomForest.predict(X_train_reduced)))
# -
# We now calculate the average feature importance across all the random iterations, and find the Range of Claim Durations, the number of claims, the range of reimbursements and the number of patients are the most important features in this model. These are roughly in accordance with the other tree-based models we've examined, including gradient boost, adaboost and logit boost.
RF_Red_Feature_Imp_Ave = rndm_score_red_dict[653683][2][[0]]
for key in rndm_score_red_dict.keys():
RF_Red_Feature_Imp_Ave = pd.merge(RF_Red_Feature_Imp_Ave,rndm_score_red_dict[key][2], on=0)
RF_Red_Feature_Imp_Ave['RF_Feature_Imp_Ave']=RF_Red_Feature_Imp_Ave.mean(axis=1)
RF_Red_Feature_Imp_Ave = RF_Red_Feature_Imp_Ave.sort_values(by='RF_Feature_Imp_Ave', ascending=False)
RF_Red_Feature_Imp_Ave = RF_Red_Feature_Imp_Ave.drop(columns=['1_x','1_y','1_x','1_y','1_y','1_y'])
RF_Red_Feature_Imp_Ave.to_csv('RF_Red_Feature_Imp_Ave.csv')
RF_Red_Feature_Imp_Ave.head(20)
X_train_reduced.to_csv('rf_reduced_feature_set')
y_train.to_csv('rf_reduced_label_set')
# Finally we attempt to develop a cost model to quantify the relative performance of each model. We read in the total claims data since we have decided to measure the dollar amount of claims of the fraudulent providers and the amount of that money that this model has identified as reimbursed to fraudulent providers.
data = pd.read_csv('./data/combinedData.csv')
# Sum the money reimbursed to all providers, to be able to quantify the amount of money reimbursed to fraudulent providers.
data = data[data['Set']=='Train']
data1 = data.groupby('Provider').agg('sum')['InscClaimAmtReimbursed'].reset_index()
data1.columns=['Provider','TotalClaim']
data1
provider_claim_test.columns
# The model presented is slightly different from this one, but essentially we attempt to acknowledge a cost associated with all invetigations, and impose an extra cost for false positive identifications of innocent providers as fraudulent. We attempted to maximize the amount of money identified as from fraudulent providers, while also trying to maximize the ratio of the recovered money to the amount spent to get that money.
# +
a = pd.DataFrame({'actual':y_test,'predict':randomForest.predict(X_test_reduced),'total_claim': provider_claim_test['total_claim']})
print(confusion_matrix(y_test, randomForest.predict(X_test_reduced)))
totalclaims = np.sum(a['total_claim'])
totaldefrauded=100*np.sum(a[a['actual']==1]['total_claim'])/totalclaims
print('total claims for test set are ${:,.0f}'.format(totalclaims))
print('total fraudulent claims are %i' %totaldefrauded,'% of total claims')
totalcost=100*np.sum(a[a['predict']==1]['predict'])*100000/totalclaims
print('total investigation cost at 100K per %i' %totalcost,'% of total claims')
totalfalsepos=100*np.sum(a[(a['predict']==1) & a['actual']==0]['predict'])*100000/totalclaims
print('total legal costs for false positives at 100K per are %i' %totalfalsepos,'% of total claims')
totalrecovered=100*np.sum(a[(a['predict']==1) & a['actual']==1]['total_claim'])/totalclaims
print('total recovered claims are %i' %totalrecovered,'% of total claims')
print('total net benefit of model as Pct of total claims is %i' %(totalrecovered-(totalcost+totalfalsepos)),'% of total claims')
# -
RF_Feature_Imp_Ave.to_csv('rf_feature_importance.csv')
| doug/Random_forest_model_for_healthcare_fraud_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Phonological
#
# Learning phonological representations isn't as far from learning semantic representations as often thought. The data for both come in superficially linear but underlyingly hierarchically structured sequences. In the case of phonological representations, the feature representations are more agreed upon. For this reason, I want to learn and evaluate phonological representations using similar methods.
#
# [PHOIBLE](http://phoible.org/) is a great resource. It includes feature representations for over 2000 segments. More info on the features is [here](https://github.com/phoible/dev/tree/master/raw-data/FEATURES).
# !wget -q -O raw_phonological_features.tsv https://raw.githubusercontent.com/phoible/dev/master/raw-data/FEATURES/phoible-segments-features.tsv
import pandas as pd
import numpy as np
raw = pd.read_csv('raw_phonological_features.tsv', sep='\t', index_col=0).T
raw.head()
# I need to change the values in the dataframe from strings to ints. Some values are combinations of pluses and minuses. I'm not entirely sure what that means, but for now I'm going to treat them all as a 0.
np.unique(raw.values)
mapping = {'+':1, '-':0, '0':0}
for value in ['+,-', '+,-,+', '+,-,+,-', '+,-,-', '-,+', '-,+,+','-,+,-']:
mapping[value] = 0
raw.replace(mapping, inplace=True)
raw.to_csv('phonological_features.csv')
| semrep/data/evaluation/phonological/phonological.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Visualizing 4D probabilistic atlas maps
# =======================================
#
# This example shows how to visualize probabilistic atlases made of 4D images.
# There are 3 different display types:
#
# 1. "contours", which means maps or ROIs are shown as contours delineated by colored lines.
#
# 2. "filled_contours", maps are shown as contours same as above but with fillings inside the contours.
#
# 3. "continuous", maps are shown as just color overlays.
#
# A colorbar can optionally be added.
#
# The :func:`nilearn.plotting.plot_prob_atlas` function displays each map
# with each different color which are picked randomly from the colormap
# which is already defined.
#
# See `plotting` for more information to know how to tune the parameters.
#
#
# +
# Load 4D probabilistic atlases
from nilearn import datasets
# Harvard Oxford Atlasf
harvard_oxford = datasets.fetch_atlas_harvard_oxford('cort-prob-2mm')
harvard_oxford_sub = datasets.fetch_atlas_harvard_oxford('sub-prob-2mm')
# Multi Subject Dictionary Learning Atlas
msdl = datasets.fetch_atlas_msdl()
# Smith ICA Atlas and Brain Maps 2009
smith = datasets.fetch_atlas_smith_2009()
# ICBM tissue probability
icbm = datasets.fetch_icbm152_2009()
# Allen RSN networks
allen = datasets.fetch_atlas_allen_2011()
# Pauli subcortical atlas
subcortex = datasets.fetch_atlas_pauli_2017()
# Visualization
from nilearn import plotting
atlas_types = {'Harvard_Oxford': harvard_oxford.maps,
'Harvard_Oxford sub': harvard_oxford_sub.maps,
'MSDL': msdl.maps, 'Smith 2009 10 RSNs': smith.rsn10,
'Smith2009 20 RSNs': smith.rsn20,
'Smith2009 70 RSNs': smith.rsn70,
'Smith2009 20 Brainmap': smith.bm20,
'Smith2009 70 Brainmap': smith.bm70,
'ICBM tissues': (icbm['wm'], icbm['gm'], icbm['csf']),
'Allen2011': allen.rsn28,
'Pauli2017 Subcortical Atlas': subcortex.maps,
}
for name, atlas in sorted(atlas_types.items()):
plotting.plot_prob_atlas(atlas, title=name)
# An optional colorbar can be set
plotting.plot_prob_atlas(smith.bm10, title='Smith2009 10 Brainmap (with'
' colorbar)',
colorbar=True)
print('ready')
plotting.show()
# -
for name, atlas in sorted(atlas_types.items()):
| experiments/.ipynb_checkpoints/0.Atlas-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Introduction
# ### What's version control?
#
# Version control is a tool for __managing changes__ to a set of files.
#
# There are many different __version control systems__:
#
# - Git
# - Mercurial (`hg`)
# - CVS
# - Subversion (`svn`)
# - ...
# ### Why use version control?
#
# - Better kind of __backup__.
# - Review __history__ ("When did I introduce this bug?").
# - Restore older __code versions__.
# - Ability to __undo mistakes__.
# - Maintain __several versions__ of the code at a time.
# Git is also a __collaborative__ tool:
#
# - "How can I share my code?"
# - "How can I submit a change to someone else's code?"
# - "How can I merge my work with Sue's?"
#
# ### Git != GitHub
#
# - __Git__: version control system tool to manage source code history.
#
# - __GitHub__: hosting service for Git repositories.
# ### How do we use version control?
#
# Do some programming, then commit our work:
#
# `my_vcs commit`
#
# Program some more.
#
# Spot a mistake:
#
# `my_vcs rollback`
#
# Mistake is undone.
# ### What is version control? (Team version)
#
# Sue | James
# ------------------ |------
# `my_vcs commit` | ...
# ... | Join the team
# ... | `my_vcs checkout`
# ... | Do some programming
# ... | `my_vcs commit`
# `my_vcs update` | ...
# Do some programming|Do some programming
# `my_vcs commit` | ...
# `my_vcs update` | ...
# `my_vcs merge` | ...
# `my_vcs commit` | ...
# ### Scope
#
# This course will use the `git` version control system, but much of what you learn will be valid with other version control
# tools you may encounter, including subversion (`svn`) and mercurial (`hg`).
# ## Practising with Git
# ### Example Exercise
#
# In this course, we will use, as an example, the development of a few text files containing a description of a topic of your choice.
#
# This could be your research, a hobby, or something else. In the end, we will show you how to display the content of these files as a very simple website.
# ### Programming and documents
#
# The purpose of this exercise is to learn how to use Git to manage program code you write, not simple text website content, but we'll just use these text files instead of code for now, so as not to confuse matters with trying to learn version control while thinking about programming too.
#
# In later parts of the course, you will use the version control tools you learn today with actual Python code.
# ### Markdown
#
# The text files we create will use a simple "wiki" markup style called [markdown](http://daringfireball.net/projects/markdown/basics) to show formatting. This is the convention used in this file, too.
#
# You can view the content of this file in the way Markdown renders it by looking on the [web](https://github.com/UCL/ucl_software_carpentry/blob/master/git/git_instructions.md), and compare the [raw text](https://raw.github.com/UCL/ucl_software_carpentry/master/git/git_instructions.md).
# ### Displaying Text in this Tutorial
#
# This tutorial is based on use of the Git command line. So you'll be typing commands in the shell.
# To make it easy for me to edit, I've built it using Jupyter notebook.
# Commands you can type will look like this, using the %%bash "magic" for the notebook.
# + attributes={"classes": [" Bash"], "id": ""} language="bash"
# echo some output
# -
# with the results you should see below.
# In this document, we will show the new content of an edited document like this:
# %%writefile somefile.md
Some content here
# But if you are following along, you should edit the file using a text editor.
# On windows, we recommend [Notepad++](https://notepad-plus-plus.org).
# On mac, we recommend [Atom](https://atom.io)
# ### Setting up somewhere to work
# + attributes={"classes": [" Bash"], "id": ""} language="bash"
# rm -rf learning_git/git_example # Just in case it's left over from a previous class; you won't need this
# mkdir -p learning_git/git_example
# cd learning_git/git_example
# -
# I just need to move this Jupyter notebook's current directory as well:
import os
top_dir = os.getcwd()
top_dir
git_dir = os.path.join(top_dir, 'learning_git')
git_dir
working_dir=os.path.join(git_dir, 'git_example')
os.chdir(working_dir)
# ## Solo work
#
# ### Configuring Git with your name and email
#
# First, we should configure Git to know our name and email address:
# + attributes={"classes": [" Bash"], "id": ""} language="bash"
# git config --global user.name "jack89roberts"
# git config --global user.email "<EMAIL>"
# -
# ### Initialising the repository
#
# Now, we will tell Git to track the content of this folder as a git "repository".
# + attributes={"classes": [" Bash"], "id": ""} language="bash"
# pwd # Note where we are standing-- MAKE SURE YOU INITIALISE THE RIGHT FOLDER
# git init
# -
# As yet, this repository contains no files:
# + language="bash"
# ls
# + attributes={"classes": [" Bash"], "id": ""} language="bash"
# git status
| ch02git/01Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # License
# ***
# Copyright 2017 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# # Monotonic Gradient Boosting using XGBoost
# ***
# http://xgboost.readthedocs.io/en/latest//tutorials/monotonic.html
# Monotonicity is an important facet of intepretability. Monotonicity constraints ensure that the modeled relationship between inputs and the target move in only direction, i.e. as an input increases the target can only increase or as input increases the target can only decrease. Such monotonic relationships are usually easier to explain and understand than non-monotonic relationships.
# ## Preliminaries: imports, start h2o, load and clean data
# imports
import h2o
from h2o.estimators.xgboost import H2OXGBoostEstimator
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import pandas as pd
import xgboost as xgb
# start h2o
h2o.init()
h2o.remove_all()
# #### Load and prepare data for modeling
# load clean data
path = '../../03_regression/data/train.csv'
frame = h2o.import_file(path=path)
# assign target and inputs
y = 'SalePrice'
X = [name for name in frame.columns if name not in [y, 'Id']]
# #### Monotonic constraints are easier to understand for numeric inputs without missing values
# +
# determine column types
# impute
reals, enums = [], []
for key, val in frame.types.items():
if key in X:
if val == 'enum':
enums.append(key)
else:
reals.append(key)
_ = frame[reals].impute(method='median')
# -
# split into training and validation
train, valid = frame.split_frame([0.7], seed=12345)
# for convenience create a tuple for xgboost monotone_constraints parameter
mono_constraints = tuple(int(i) for i in np.ones(shape=(int(1), len(reals))).tolist()[0])
# ## Train a monotonic predictive model
# * In this XGBoost GBM all the modeled relationships between the inputs and the target are forced to be monotonically increasing.
# #### Log transform for better regression results and easy RMSLE in XGBoost
# +
# Check log transform - looks good
# %matplotlib inline
train['SalePrice'].log().as_data_frame().hist()
# Execute log transform
train['SalePrice'] = train['SalePrice'].log()
valid['SalePrice'] = valid['SalePrice'].log()
print(train[0:3, 'SalePrice'])
# -
# #### Train XGBoost with monotonicity Constraints
# +
ave_y = train['SalePrice'].mean()[0]
# XGBoost uses SVMLight data structure, not Numpy arrays or Pandas data frames
dtrain = xgb.DMatrix(train.as_data_frame()[reals],
train.as_data_frame()['SalePrice'])
dvalid = xgb.DMatrix(valid.as_data_frame()[reals],
valid.as_data_frame()['SalePrice'])
# tuning parameters
params = {
'objective': 'reg:linear',
'booster': 'gbtree',
'eval_metric': 'rmse',
'eta': 0.005,
'subsample': 0.1,
'colsample_bytree': 0.8,
'max_depth': 5,
'reg_alpha' : 0.01,
'reg_lambda' : 0.0,
'monotone_constraints':mono_constraints,
'base_score': ave_y,
'silent': 0,
'seed': 12345,
}
# watchlist is used for early stopping
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
# train model
xgb_model1 = xgb.train(params,
dtrain,
1000,
evals=watchlist,
early_stopping_rounds=50,
verbose_eval=True)
# -
# #### Plot variable importance
_ = xgb.plot_importance(xgb_model1)
# ## Examine monotonic behavior with partial dependence and ICE
# * Partial dependence is used to view the global, average behavior of a variable under the monotonic model.
# * ICE is used to view the local behavior of a single instance and single variable under the monotonic model.
# * Overlaying partial dependence onto ICE in a plot is a convenient way to validate and understand both global and local monotonic behavior.
# #### Helper function for calculating partial dependence
def par_dep(xs, frame, model, resolution=20, bins=None):
""" Creates Pandas dataframe containing partial dependence for a single variable.
Args:
xs: Variable for which to calculate partial dependence.
frame: H2OFrame for which to calculate partial dependence.
model: XGBoost model for which to calculate partial dependence.
resolution: The number of points across the domain of xs for which to calculate partial dependence.
Returns:
Pandas dataframe containing partial dependence values.
"""
# don't show progress bars for parse
h2o.no_progress()
# init empty Pandas frame w/ correct col names
par_dep_frame = pd.DataFrame(columns=[xs, 'partial_dependence'])
# cache original data
col_cache = h2o.deep_copy(frame[xs], xid='col_cache')
# determine values at which to calculate partial dependency
if bins == None:
min_ = frame[xs].min()
max_ = frame[xs].max()
by = (max_ - min_)/resolution
bins = np.arange(min_, max_, by)
# calculate partial dependency
# by setting column of interest to constant
for j in bins:
frame[xs] = j
dframe = xgb.DMatrix(frame.as_data_frame(),)
par_dep_i = h2o.H2OFrame(model.predict(dframe).tolist())
par_dep_j = par_dep_i.mean()[0]
par_dep_frame = par_dep_frame.append({xs:j,
'partial_dependence': par_dep_j},
ignore_index=True)
# return input frame to original cached state
frame[xs] = h2o.get_frame('col_cache')
return par_dep_frame
# #### Calculate partial dependence for 3 important variables
par_dep_OverallCond = par_dep('OverallCond', valid[reals], xgb_model1)
par_dep_GrLivArea = par_dep('GrLivArea', valid[reals], xgb_model1)
par_dep_LotArea = par_dep('LotArea', valid[reals], xgb_model1)
# #### Helper function for finding decile indices
def get_quantile_dict(y, id_, frame):
""" Returns the percentiles of a column y as the indices for another column id_.
Args:
y: Column in which to find percentiles.
id_: Id column that stores indices for percentiles of y.
frame: H2OFrame containing y and id_.
Returns:
Dictionary of percentile values and index column values.
"""
quantiles_df = frame.as_data_frame()
quantiles_df.sort_values(y, inplace=True)
quantiles_df.reset_index(inplace=True)
percentiles_dict = {}
percentiles_dict[0] = quantiles_df.loc[0, id_]
percentiles_dict[99] = quantiles_df.loc[quantiles_df.shape[0]-1, id_]
inc = quantiles_df.shape[0]//10
for i in range(1, 10):
percentiles_dict[i * 10] = quantiles_df.loc[i * inc, id_]
return percentiles_dict
# #### Calculate deciles of SaleProce
quantile_dict = get_quantile_dict('SalePrice', 'Id', valid)
# #### Calculate values for ICE
# +
bins_OverallCond = list(par_dep_OverallCond['OverallCond'])
bins_GrLivArea = list(par_dep_GrLivArea['GrLivArea'])
bins_LotArea = list(par_dep_LotArea['LotArea'])
for i in sorted(quantile_dict.keys()):
col_name = 'Percentile_' + str(i)
par_dep_OverallCond[col_name] = par_dep('OverallCond',
valid[valid['Id'] == int(quantile_dict[i])][reals],
xgb_model1,
bins=bins_OverallCond)['partial_dependence']
par_dep_GrLivArea[col_name] = par_dep('GrLivArea',
valid[valid['Id'] == int(quantile_dict[i])][reals],
xgb_model1,
bins=bins_GrLivArea)['partial_dependence']
par_dep_LotArea[col_name] = par_dep('LotArea',
valid[valid['Id'] == int(quantile_dict[i])][reals],
xgb_model1,
bins=bins_LotArea)['partial_dependence']
# -
# #### Plot Partial Dependence and ICE
# +
# OverallCond
fig, ax = plt.subplots()
par_dep_OverallCond.drop('partial_dependence', axis=1).plot(x='OverallCond', colormap='gnuplot', ax=ax)
par_dep_OverallCond.plot(title='Partial Dependence and ICE for OverallCond',
x='OverallCond',
y='partial_dependence',
style='r-',
linewidth=3,
ax=ax)
_ = plt.legend(bbox_to_anchor=(1.05, 0),
loc=3,
borderaxespad=0.)
# +
# GrLivArea
fig, ax = plt.subplots()
par_dep_GrLivArea.drop('partial_dependence', axis=1).plot(x='GrLivArea', colormap='gnuplot', ax=ax)
par_dep_GrLivArea.plot(title='Partial Dependence and ICE for GrLivArea',
x='GrLivArea',
y='partial_dependence',
style='r-',
linewidth=3,
ax=ax)
_ = plt.legend(bbox_to_anchor=(1.05, 0),
loc=3,
borderaxespad=0.)
# +
# LotArea
fig, ax = plt.subplots()
par_dep_LotArea.drop('partial_dependence', axis=1).plot(x='LotArea', colormap='gnuplot', ax=ax)
par_dep_LotArea.plot(title='Partial Dependence and ICE for LotArea',
x='LotArea',
y='partial_dependence',
style='r-',
linewidth=3,
ax=ax)
_ = plt.legend(bbox_to_anchor=(1.05, 0),
loc=3,
borderaxespad=0.)
# -
# #### Shutdown H2O
h2o.cluster().shutdown(prompt=True)
| 10_model_interpretability/src/mono_xgboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Jaztinn/OOP-58002/blob/main/OOP_Concepts_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="OIQjDXrTTq8j"
# Class with Multiple Objects
# + id="OFTVl-mPUDDs"
class Birds:
def __init__(self,bird_name):
self.bird_name = bird_name
def flying_birds(self):
print(f"{self.bird_name} flies above clouds")
def non_flying_birds(self):
print(f"{self.bird_name} is the national bird of the Philippines")
vulture = Birds("Griffon Vulture")
crane = Birds("Common Crane")
emu = Birds ("Emu")
vulture.flying_birds()
crane.flying_birds()
emu.non_flying_birds()
# + [markdown] id="-37AaSCrUkGb"
# Encapsulation with Private Attributes
# + colab={"base_uri": "https://localhost:8080/"} id="9wq6rAHqUokV" outputId="d6a0bdaf-21fd-4528-d5fb-fdd675b1ee8a"
class foo:
def __init__(self,a,b):
self._a = a
self._b = b
def add(self):
return self._a +self._b
foo_object = foo(3,4)
#foo_object.add()
foo_object.a=6
foo_object.add()
# + colab={"base_uri": "https://localhost:8080/", "height": 132} id="OHxO2iYsVAZE" outputId="80ad55cd-2695-4600-8145-78341fd5f8d4"
ass Counter:
def __init__(self):
self.current = 0
def increment(self):
self.current += 1
def value(self):
return self.current
def reset(self):
self.current = 0
counter = Counter()
#counter.increment()
#counter.increment()
#counter.increment()
print(counter._current)
#print(counter.value())
# + [markdown] id="Am_a5Y82VCza"
# ## Inheritance
# + colab={"base_uri": "https://localhost:8080/"} id="AC3nDmU2VHeJ" outputId="ee201191-e07f-418c-c55a-bec81f4893e4"
class Person: #parent class
def __init__(self,fname,sname):
self.fname = fname
self.sname = sname
def printname(self):
print(self.fname,self.sname)
x = Person("<NAME>","Espares")
x.printname()
class Teacher(Person):
pass
y = Teacher("Maria","Sayo")
y.printname()
# + [markdown] id="89_5KBSqVTU2"
# # Polymorphism
# + colab={"base_uri": "https://localhost:8080/"} id="Vdt0DuuAVVS0" outputId="4129223e-595d-4e81-dab6-70fe5c6c8c38"
class RegularPolygon:
def __init__(self,side):
self._side = side
class Square(RegularPolygon):
def area (self):
return self._side * self._side
class EquilateralTriangle(RegularPolygon):
def area(self):
return self._side * self._side * 0.433
obj1 = Square(4)
obj2 = EquilateralTriangle(3)
print(obj1.area())
print(obj2.area())
# + [markdown] id="QmyQlNovVkxC"
# # Application 1
# 30% Prelim + 30% Midterm + 40% Final =
# Sem Grade
#
# Create a Python program that displays the name of three students (Student 1, Student 2, Student 3) and their grades.
# Create a class name Person and attributes - std1, std2, std3, pre, mid, fin.
# Compute the average of each term grade using Grade() method.
# Information about student's grades must be hidden from others.
# + colab={"base_uri": "https://localhost:8080/"} id="mvN0dLlUV6fS" outputId="6b964c0e-bc6f-4151-a43d-28b810b1ef5e"
#Application 1
class Person:
def __init__(self,std1,std2,std3,pre,mid,fin):
self._std1 = std1
self._std2 = std2
self._std3 = std3
self._pre = pre
self._mid = mid
self._fin = fin
def Grade(self):
print("_______________________________")
print("Student Name =", self._std1, self._std2, self._std3)
print("_______________________________")
print("Prelim Grade =", int(self._pre))
print("Midterm Grade =", int(self._mid))
print("Final Grade =", int(self._fin))
print("_______________________________")
print("Semestral Grade =", int(self._pre*.30) + int(self._mid*.30) + int(self._fin*.40))
student1 = Person("<NAME>","","", 89, 90, 87)
student2 = Person("","<NAME>","", 96, 94, 90)
student3 = Person("","","<NAME>", 85, 84, 84)
name = input(str("Enter your Last name: "))
if name == "Luffy" or name == "luffy":
student1.Grade()
elif name == "Espares" or name == "espares":
student2.Grade()
elif name == "Ball" or name == "ball":
student3.Grade()
else:
print("\n!!No Database for this Student!!")
| OOP_Concepts_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python_AI_CV
# language: python
# name: cv_ml_kr_skl_torch_tf
# ---
import numpy as np
import torch
import matplotlib.pyplot as plt
# %matplotlib inline
from itertools import product
# ## CV核心基础WEEK3 :经典机器学习(一)
# ### Pipeline:
# 1. 监督学习与非监督学习
# 2. 第一个可训练的监督学习模型:线性回归模型的3类解法
# 3. 使用线性模型,解决字符分类问题
# 4. 逻辑回归模型
#
# ### 作业:
# * 编写计算机视觉的第1版程序:用线性回归模型,解决数字图片分类问题,
# * 要求:用pytorch 的auto_grad功能。
#
# #### 步骤:
# 1. 生成10张图片,对应0,1,2,3,4,5,6,7,8,9.
# 2. 对这10张图片提取特征x。
# 3. 用一个线性判别器f(x)来决策输出结果y。
# 4. 判别器的训练要使用梯度下降法,写代码的时候要用到pytorch 的auto_grad功能。
#
#
#
# #### 达到作用:
# * 当x是 “0”图片对应的特征时,y=f(x)=0
# ...
# * 当x是 “9”图片对应的特征时,y=f(x)=9
# ___
#
# _可参考代码:_
# - /week3/recognize_computer_vision_linear_model.py,线性模型解决图片识别问题课程代码
# - /week3/how_to_use_auto_grad.py,测试pytorch auto_grad使用方法
# - /week3/data_display.ipynb 数据显示
# - /week3/week2作业答案课堂讲解.ipynb
# - /week3/auto_grad使用时的注意事项.ipynb
# - /week3/auto_grad形式的梯度下降.ipynb
# - /week3/running_jupyter.pdf , jupyter运行命令
# - jupyter常用效率快捷键:https://zhuanlan.zhihu.com/p/143919082
#
def generate_data():
# 本函数生成0-9,10个数字的图片矩阵
image_data=[]
num_0 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,1,0,0,1,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_0)
num_1 = torch.tensor(
[[0,0,0,1,0,0],
[0,0,1,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,1,1,1,0],
[0,0,0,0,0,0]])
image_data.append(num_1)
num_2 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,0,1,0,0],
[0,0,1,0,0,0],
[0,1,1,1,1,0],
[0,0,0,0,0,0]])
image_data.append(num_2)
num_3 = torch.tensor(
[[0,0,1,1,0,0],
[0,0,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_3)
num_4 = torch.tensor(
[
[0,0,0,0,1,0],
[0,0,0,1,1,0],
[0,0,1,0,1,0],
[0,1,1,1,1,1],
[0,0,0,0,1,0],
[0,0,0,0,0,0]])
image_data.append(num_4)
num_5 = torch.tensor(
[
[0,1,1,1,0,0],
[0,1,0,0,0,0],
[0,1,1,1,0,0],
[0,0,0,0,1,0],
[0,1,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_5)
num_6 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,0,0],
[0,1,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_6)
num_7 = torch.tensor(
[
[0,1,1,1,1,0],
[0,0,0,0,1,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_7)
num_8 = torch.tensor(
[[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,1,0,0,1,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0]])
image_data.append(num_8)
num_9 = torch.tensor(
[[0,0,1,1,1,0],
[0,1,0,0,1,0],
[0,0,1,1,1,0],
[0,1,0,0,1,0],
[0,0,0,0,1,0],
[0,0,0,0,0,0]])
image_data.append(num_9)
image_label=[0,1,2,3,4,5,6,7,8,9]
return image_data,image_label
def get_feature(x):
return x.flatten().view(1,36).float()
def model(feature, weights):
y = -1
feature = torch.cat((feature, torch.tensor(1.0).view(1,1)), 1) # 连接两个张量,最后一个参数为1,代表列上叠加,为0,代表行上叠加
y = torch.mm(feature, weights)
return y
loss_fn = torch.nn.MSELoss() # 损失函数MSE
'''
对每幅图像做自动求导
'''
def linearRegression(image_data, image_labels, weights):
epochs = 1000
loss_all = []
for epoch in range(epochs):
loss = 0
for i in range(0, 10):
feature = get_feature(image_data[i])
y = model(feature, weights)
label = torch.tensor(image_labels[i]).view(1,1).float()
loss += loss_fn(y, label)
loss /= 10.
loss.backward()
with torch.no_grad():
weights -= weights.grad * 0.005
weights.grad.zero_()
print("epoch=%s,loss=%s,weights=%s"%(epoch,loss,weights.view(37)))
loss_all.append(loss)
loss = 0
return weights, loss_all
if __name__ == "__main__":
weights = torch.randn((37,1), dtype=torch.float32, requires_grad=True)
image_data, image_label = generate_data()
# 打印出0的图像
print("数字0对应的图片是:")
print(image_data[0])
print("-"*20)
# 打印出8的图像
print("数字8对应的图片是:")
print(image_data[8])
print("-"*20)
# 模型训练
weights , loss_all = linearRegression(image_data, image_label, weights)
plt.figure()
plt.plot(loss_all)
plt.show()
print("对每张图片进行识别")
for i in range(0, 10):
x = image_data[i]
feature = get_feature(x)
y = torch.floor((model(feature, weights) + 0.5)).int()
print("图像[%s]得分类结果是:[%s],它得特征是[%s]"%(i,y,feature))
| week3/homework_week3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Trying to add multiple enemies.
import pygame
import random
pygame.init()
win = pygame.display.set_mode((500,480))
pygame.display.set_caption("Goblin's Field")
screen_width = 480
bg = pygame.image.load("resources/bg.jpg")
clock = pygame.time.Clock()
bulletSound = pygame.mixer.Sound("resources/bullet.wav")
hitSound = pygame.mixer.Sound("resources/hit.wav")
music = pygame.mixer.music.load("resources/music.mp3")
pygame.mixer.music.play(-1)
class Player():
walkRight = [pygame.image.load("resources/R1.png"), pygame.image.load("resources/R2.png"), pygame.image.load("resources/R3.png"), pygame.image.load("resources/R4.png"), pygame.image.load("resources/R5.png"), pygame.image.load("resources/R6.png"), pygame.image.load("resources/R7.png"), pygame.image.load("resources/R8.png"), pygame.image.load("resources/R9.png")]
walkLeft = [pygame.image.load("resources/L1.png"), pygame.image.load("resources/L2.png"), pygame.image.load("resources/L3.png"), pygame.image.load("resources/L4.png"), pygame.image.load("resources/L5.png"), pygame.image.load("resources/L6.png"), pygame.image.load("resources/L7.png"), pygame.image.load("resources/L8.png"), pygame.image.load("resources/L9.png")]
def __init__(self,x,y,w,h):
self.x = x
self.y = y
self.h = h
self.w = w
self.vel = 10
self.isJump = False
self.jumpCount = 10
self.left = False
self.right = False
self.walkCount = 0
self.standing = True
self.hitbox = (self.x+20, self.y+8, 28, 60)
def draw(self,win):
if self.walkCount+1 >= 27:
self.walkCount = 0
if not self.standing:
if self.left:
win.blit(self.walkLeft[self.walkCount//3], (self.x,self.y))
self.walkCount += 1
elif self.right:
win.blit(self.walkRight[self.walkCount//3], (self.x,self.y))
self.walkCount += 1
else:
if self.right:
win.blit(self.walkRight[0], (self.x,self.y))
else:
win.blit(self.walkLeft[0], (self.x,self.y))
self.hitbox = (self.x+20, self.y+8, 28, 60)
def hit(self):
self.x = 60
self.walkcount = 0
font1 = pygame.font.SysFont("comicsans",100)
text = font1.render("-2" , 1, (255,0,0))
win.blit(text, ((250-text.get_width()/2),200))
pygame.display.update()
i = 0
while i <= 5:
pygame.time.delay(10)
i += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
i = 251
pygame.quit()
def game_over(self):
font_over = pygame.font.SysFont("comicsans",40)
text_over = font_over.render("Game Over! Score: " + str(score), 1, (0,0,0))
win.blit(text_over, ((250-text_over.get_width()/2),250-text_over.get_height()/2))
pygame.display.update()
i = 0
while i <= 500:
pygame.time.delay(10)
i += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
i = 251
pygame.quit()
else:
pygame.quit()
class Projectile():
def __init__(self,x,y,radius,color,facing):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.facing = facing
self.vel = 15 * facing
def draw(self,win):
pygame.draw.circle(win,self.color,(self.x,self.y),self.radius)
class Enemy:
walkRight = [pygame.image.load("resources/R1E.png"), pygame.image.load("resources/R2E.png"), pygame.image.load("resources/R3E.png"), pygame.image.load("resources/R4E.png"), pygame.image.load("resources/R5E.png"), pygame.image.load("resources/R6E.png"), pygame.image.load("resources/R7E.png"), pygame.image.load("resources/R8E.png"), pygame.image.load("resources/R9E.png"), pygame.image.load("resources/R10E.png"), pygame.image.load("resources/R11E.png")]
walkLeft = [pygame.image.load("resources/L1E.png"), pygame.image.load("resources/L2E.png"), pygame.image.load("resources/L3E.png"), pygame.image.load("resources/L4E.png"), pygame.image.load("resources/L5E.png"), pygame.image.load("resources/L6E.png"), pygame.image.load("resources/L7E.png"), pygame.image.load("resources/L8E.png"), pygame.image.load("resources/L9E.png"), pygame.image.load("resources/L10E.png"), pygame.image.load("resources/L11E.png")]
enemy_count = 0
visible_goblin = 10
def __init__(self,x,y,w,h,end):
self.x = x
self.y = y
self.h = h
self.w = w
self.start = 50
self.end = end
self.walkCount = 0
self.vel = 7
self.path = [self.start,self.end]
self.hitbox = (self.x+13, self.y, 40, 60)
self.health = 10
self.visible = True
Enemy.enemy_count += 1
def draw(self,win):
self.move()
if self.visible:
if self.walkCount+1 >= 33:
self.walkCount = 0
if self.vel > 0:
win.blit(self.walkRight[self.walkCount // 3], (self.x, self.y))
self.walkCount += 1
else:
win.blit(self.walkLeft[self.walkCount // 3], (self.x, self.y))
self.walkCount += 1
self.hitbox = (self.x+13, self.y, 40, 60)
pygame.draw.rect(win, (255,0,0), (self.hitbox[0], self.hitbox[1]-20,50,8))
pygame.draw.rect(win, (0,0,255), (self.hitbox[0], self.hitbox[1]-20,self.health*5,8))
def move(self):
if self.vel > 0:
if self.x + self.vel < self.path[1]:
self.x += self.vel
else:
self.vel *= -1
self.walkCount = 0
else:
if self.x - self.vel > self.path[0]:
self.x += self.vel
else:
self.vel *= -1
self.walkCount = 0
def hit(self):
if self.health > 1:
self.health -= 1
else:
self.visible = False
Enemy.visible_goblin -= 1
enemies.remove(self)
man = Player(50,400,54,64)
shootLoop = 0
score = 0
font = pygame.font.SysFont("comicsans", 30, True, True)
def reDrawGameWindow():
win.blit(bg, (0,0))
text = font.render("Score: " + str(score), 1, (0,0,0))
win.blit(text, (20,20))
man.draw(win)
for goblin in enemies:
goblin.draw(win)
for bullet in bullets:
bullet.draw(win)
pygame.display.update()
bullets = []
run = True
enemies = []
enemyLoop = 0
while run:
clock.tick(27)
if enemyLoop > 0:
enemyLoop += 1
if enemyLoop > 110:
enemyLoop = 0
if Enemy.enemy_count <11 and enemyLoop == 0:
enemies.append(Enemy(random.randrange(50,451),405,64,64,450))
enemyLoop += 1
if Enemy.enemy_count == 10 and Enemy.visible_goblin == 0:
man.game_over()
run = False
break
for goblin in enemies:
if goblin.visible:
if man.hitbox[1] < goblin.hitbox[1] + goblin.hitbox[3] and man.hitbox[1] + man.hitbox[3]> goblin.hitbox[1]:
if man.hitbox[0] + man.hitbox[2] > goblin.hitbox[0] and man.hitbox[0] < goblin.hitbox[0] + goblin.hitbox[2]:
man.hit()
hitSound.play()
score -= 2
if shootLoop > 0:
shootLoop += 1
if shootLoop > 3:
shootLoop = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
for bullet in bullets:
for goblin in enemies:
if goblin.visible:
if bullet.y - bullet.radius < goblin.hitbox[1] + goblin.hitbox[3] and bullet.y + bullet.radius > goblin.hitbox[1]:
if bullet.x + bullet.radius > goblin.hitbox[0] and bullet.x -bullet.radius < goblin.hitbox[0] + goblin.hitbox[2]:
goblin.hit()
hitSound.play()
if bullet in bullets:
bullets.remove(bullet)
score += 1
if bullet.x < 500 and bullet.x > 0:
bullet.x += bullet.vel
else:
bullets.remove(bullet)
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE] and shootLoop == 0:
if man.left:
facing = -1
else:
facing = 1
if len(bullets) < 5:
bullets.append(Projectile(round(man.x + man.w//2), round(man.y + man.h//2), 3, (0,0,0), facing))
bulletSound.play()
shootLoop = 1
if keys[pygame.K_LEFT] and (man.x-man.vel) > 0:
man.x -= man.vel
man.left = True
man.right = False
man.standing = False
elif keys[pygame.K_RIGHT] and (man.x+man.h+man.vel) < screen_width:
man.x += man.vel
man.left = False
man.right = True
man.standing = False
else:
man.standing = True
man.walkCount = 0
if not man.isJump:
if keys[pygame.K_UP]:
man.isJump = True
man.walkCount = 0
else:
if man.jumpCount >= -10:
neg = 1
if man.jumpCount < 0:
neg = -1
man.y -= (man.jumpCount ** 2) *0.5 * neg
man.jumpCount -= 1
else:
man.isJump = False
man.jumpCount = 10
reDrawGameWindow()
pygame.quit()
# -
| 11. Multiple Goblins & Final Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Neural Network in TensorFlow
# In this notebook, we adapt our [Keras Deep Net](https://github.com/the-deep-learners/deep-learning-illustrated/blob/master/notebooks/deep_net_in_keras.ipynb) to TensorFlow.
# [](https://colab.research.google.com/github/the-deep-learners/deep-learning-illustrated/blob/master/notebooks/deep_net_in_tensorflow.ipynb)
# #### Load dependencies
import tensorflow as tf
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.optimizers import SGD
from tensorflow.python.keras.utils import to_categorical
# #### Load data
(X_train, y_train), (X_valid, y_valid) = mnist.load_data()
# #### Preprocess data
X_train = X_train.reshape(60000, 784).astype('float32')
X_valid = X_valid.reshape(10000, 784).astype('float32')
X_train /= 255
X_valid /= 255
n_classes = 10
y_train = to_categorical(y_train, n_classes)
y_valid = to_categorical(y_valid, n_classes)
# #### Design neural network architecture
# +
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(784,)))
model.add(BatchNormalization())
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# -
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=2, verbose=1, validation_data=(X_valid, y_valid))
| notebooks/deep_net_in_tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 2
#
# As explained in the [Before week 1: How to take this class](https://nbviewer.org/github/suneman/socialdata2022/blob/main/lectures/How_To_Take_This_Class.ipynb) notebook, each week of this class is an Jupyter notebook like this one. In order to follow the class, you simply start reading from the top, following the instructions.
#
# Hint: you can ask us - Anna or any of the friendly Teaching Assistants - for help at any point if you get
# stuck!
#
# **New Info**: Remember that this week is also the time to learn a bit about how the the assignments and the final project work. So if you havn't already, check out the [Before week 2: Assignments and Final Project](https://github.com/suneman/socialdata2022/blob/main/lectures/Assignments_And_Final_Project.ipynb) notebook.
# ## Overview
#
# Today's lecture has 3 parts.
# * First we'll give you an introduction to data visualization with a little data visualization exercise and a video from Sune.
# * As the main event, we will work with crime-data and generate a large number of interesting and informative plots.
# * Finally - in the last part - we'll play around with visualizing the geo-data contained in the CSV file.
# ## Part 1: A little visualization exercise
#
# Start by downloading these four datasets: [Data 1](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data1.tsv), [Data 2](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data2.tsv), [Data 3](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data3.tsv), and [Data 4](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data4.tsv). The format is `.tsv`, which stands for _tab separated values_.
# As you will later realize, these are famous datasets!
# Each file has two columns (separated using the tab character). The first column is $x$-values, and the second column is $y$-values.
#
# It's ok to just download these files to disk by right-clicking on each one, but if you use Python and `urllib` or `urllib2` to get them, I'll really be impressed. If you don't know how to do that, I recommend opening up Google and typing "download file using Python" or something like that. When interpreting the search results remember that _stackoverflow_ is your friend.
#
# Now, to the exercise:
#
# > *Exercise 1.1*:
# >
# > * Using the `numpy` function `mean`, calculate the mean of both $x$-values and $y$-values for each dataset.
# > * Use python string formatting to print precisely two decimal places of these results to the output cell. Check out [this _stackoverflow_ page](http://stackoverflow.com/questions/8885663/how-to-format-a-floating-number-to-fixed-width-in-python) for help with the string formatting.
# > * Now calculate the variance for all of the various sets of $x$- and $y$-values, by using the `numpy` function `var`. Print it to three decimal places.
# > * Use `numpy` to calculate the [Pearson correlation](https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient) between $x$- and $y$-values for all four data sets (also print to three decimal places).
# > * The next step is use _linear regression_ to fit a straight line $f(x) = a x + b$ through each dataset and report $a$ and $b$ (to two decimal places). An easy way to fit a straight line in Python is using `scipy`'s `linregress`. It works like this
# > ```
# > from scipy import stats
# > slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# >```
# > * Comment on the results from the previous steps. What do you observe?
# > * Finally, it's time to plot the four datasets using `matplotlib.pyplot`. Use a two-by-two [`subplot`](http://matplotlib.org/examples/pylab_examples/subplot_demo.html) to put all of the plots nicely in a grid and use the same $x$ and $y$ range for all four plots. And include the linear fit in all four plots. (To get a sense of what I think the plot should look like, you can take a look at my version [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/anscombe.png).)
# > * Explain - in your own words - what you think my point with this exercise is (see below for tips on this).
#
#
# Get more insight in the ideas behind this exercise by reading [here](https://en.wikipedia.org/wiki/Anscombe%27s_quartet). Here you can also get an explanation of why the datasets are actually famous - I mean they have their own Wikipedia page!!
#
# And the video below generalizes in the coolest way imaginable. It's a treat, but don't watch it until **after** you've done the exercises - and read the Wikipedia page. **Note:** Uncomment the line in the cell below to watch the video!
import pandas as pd
read_dataset = lambda num : pd.read_csv("../files/data{0}.tsv".format(num), sep='\t', header=None, names=["x", "y"])
data = [read_dataset(i) for i in range(1, 5)]
# Exercise 1: Mean of x-values and y-values in each dataset
for i in range(0, 4):
mean = data[i].mean()
print(f"data{i}.tsv\tx: {mean[0]:.2f}, y: {mean[1]:.2f}")
# Exercise 1: Variance of x-values and y-values in each dataset
for i in range(0, 4):
variance = data[i].var()
print(f"data{i}.tsv\tx: {variance[0]:.2f}, y: {variance[1]:.2f}")
def pearson(dataset):
std = dataset.std()
covariance = dataset.cov()["x"][1]
return covariance / std["x"] * std["y"]
# Exercise 1: Pearson correlation between x-values and y-values in each dataset
for i in range(0, 4):
p = pearson(data[i])
print(f"data{i}.tsv{p:10.3f}")
# Exercise 1: Linear regression on each dataset
from scipy import stats
for i in range(0, 4):
slope, intercept, r_value, p_value, std_err = stats.linregress(data[i]["x"],data[i]["y"])
print(f"data{i}.tsv\tf(x)={slope:.2f}*x + {intercept:.2f}")
# Up until this point the simple descriptive statistics of these datasets produced the same results.
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
fig, ax = plt.subplots(2, 2, figsize=(10, 7), constrained_layout=True)
for x in range(0, 2):
for y in range(0, 2):
n = x + y * 2
subplot = ax[y, x]
subplot.plot(data[n]["x"], data[n]["y"], 'ko')
linear_fit = np.poly1d(np.polyfit(data[n]["x"], data[n]["y"], 1))
fit_range = np.arange(4, 16)
subplot.plot(fit_range, linear_fit(fit_range), color="red")
subplot.title.set_text(f"Data {n + 1}")
subplot.set_xlabel("x")
subplot.set_ylabel("y")
subplot.set_xlim(0, 20)
subplot.set_ylim(2, 14)
subplot.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
plt.show()
# -
# **Purpose of Exercise 1:** Demonstrate how simple metrics computed on a dataset are often not enough to fully describe its distribution. This highlights the importance of visualization.
from IPython.display import YouTubeVideo
#Uncomment the following line to watch the video
YouTubeVideo("DbJyPELmhJc",width=800, height=450)
# Now that you get a better sense of why data visualization is an important and powerful tool, you are ready to get a small intro on the topic! Again, don't watch the video until **after** you've done exercise 1.1
#
# [](https://www.youtube.com/watch?v=9D2aI30AMhM)
# > *Excercise 1.2:* Questions for the lecture
# > * What is the difference between *data* and *metadata*? How does that relate to the GPS tracks-example?
# > * Sune says that the human eye is a great tool for data analysis. Do you agree? Explain why/why not. Mention something that the human eye is very good at. Can you think of something that [is difficult for the human eye](http://cdn.ebaumsworld.com/mediaFiles/picture/718392/84732652.jpg). Explain why your example is difficult.
# > * Simpson's paradox is hard to explain. Come up with your own example - or find one on line.
# > * In your own words, explain the differnece between *exploratory* and *explanatory* data analysis.
#
# **Data** is a collection of facts.
# **Metadata** provides information about other data and helps make sense of datasets.
#
# The human eye is a good tool for data analysis because it enables us to quickly grasp the main properties of a dataset. Using our eyes we can also easily identify extreme cases and some patterns.
# However, small details and randomness is difficult to analyse with only our eyes, as we can get lost in the details.
#
# **Simpson's paradox example:**
# In the case of the UC Berkeley gender bias, the overall aggregated data showed that men were more likely to be admitted to the university than women. However. the pooled and corrected data showed a small but statistically significant bias in favor of women.
#
# **Exploratory data analysis** is used to spot patterns, formulate hypotheses and obtain insight about the data. Exploratory analysis preceeds explanatory analysis.
#
# **Explanatory data analysis** is an important tool to represent knowledge and communicate more effectively.
# ## Part 2: Visualizing patterns in the data
#
# Visualizing data is a powerful technique that helps us exploiting the human eye, and make complex patterns easier to identify.
#
# Let's see if we can detect any interesting patterns in the big crime-data file from San Francisco you downloaded last week. We'll again only look at the focus-crimes.
focuscrimes = set(['WEAPON LAWS', 'PROSTITUTION', 'DRIVING UNDER THE INFLUENCE', 'ROBBERY', 'BURGLARY', 'ASSAULT', 'DRUNKENNESS', 'DRUG/NARCOTIC', 'TRESPASS', 'LARCENY/THEFT', 'VANDALISM', 'VEHICLE THEFT', 'STOLEN PROPERTY', 'DISORDERLY CONDUCT'])
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
# > *Exercise 2.1*: More temporal patterns. Last time we plotted the development over time (how each of the focus crimes changed over time, year-by-year). Today we'll start by looking at the developments across the months, weekdays, and across the 24 hours of the day.
# >
# > **Note:** restrict yourself to the dataset of *entire years*.
# >
# > * *Weekly patterns*. Basically, we'll forget about the yearly variation and just count up what happens during each weekday. [Here's what my version looks like](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/weekdays.png). Some things make sense - for example `drunkenness` and the weekend. But there are some aspects that were surprising to me. Check out `prostitution` and mid-week behavior, for example!?
# > * *The months*. We can also check if some months are worse by counting up number of crimes in Jan, Feb, ..., Dec. Did you see any surprises there?
# > * *The 24 hour cycle*. We can also forget about weekday and simply count up the number of each crime-type that occurs in the dataset from midnight to 1am, 1am - 2am ... and so on. Again: Give me a couple of comments on what you see.
# > * *Hours of the week*. But by looking at just 24 hours, we may be missing some important trends that can be modulated by week-day, so let's also check out the 168 hours of the week. So let's see the number of each crime-type Monday night from midninght to 1am, Monday night from 1am-2am - all the way to Sunday night from 11pm to midnight.
#
data = pd.read_csv("Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv")
data["Date"] = pd.to_datetime(data["Date"], format="%m/%d/%Y")
complete_years = data.loc[(data["Date"] >= "2003-01-01") & (data["Date"] < "2018-01-01")]
complete_years.head()
# +
#Weekly patterns
import matplotlib.pyplot as plt
fig, axes = plt.subplots(7, 2, figsize=(10, 18), sharex=True)
fig.tight_layout(w_pad=3)
figidx = 0
for c in sorted(focuscrimes):
category_data = data.loc[data["Category"] == c]
n = pd.Categorical(category_data['DayOfWeek'], categories=cats, ordered=True)
daily_crimes = n.value_counts()
y = int(figidx / 7)
x = int(figidx % 7)
m = daily_crimes.values.max()
subplot = axes[x, y]
subplot.bar(daily_crimes.index.categories, daily_crimes.values, 0.6)
subplot.tick_params(axis='x', labelrotation=90)
subplot.text(0.05, 0.9, c, transform=subplot.transAxes)
subplot.set_ylabel("Crime count")
subplot.set_ylim(None, m * 1.4)
figidx += 1
plt.setp(axes[-1, :], xlabel="Day of week")
fig.suptitle("Number of crimes per week-day by category")
fig.subplots_adjust(top=0.90)
plt.show()
# -
month_cats = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
data['MonthOfYear'] = pd.Categorical(data['Date'].dt.month_name(), categories=month_cats, ordered=True)
# +
fig, axes = plt.subplots(7, 2, figsize=(10, 18), sharex=True)
fig.tight_layout(w_pad=3)
figidx = 0
for c in sorted(focuscrimes):
category_data = data.loc[data["Category"] == c]
monthly_crimes = category_data.groupby('MonthOfYear').size()
y = int(figidx / 7)
x = int(figidx % 7)
m = monthly_crimes.values.max()
subplot = axes[x, y]
subplot.bar(monthly_crimes.index, monthly_crimes.values, 0.6)
subplot.tick_params(axis='x', labelrotation=90)
subplot.text(0.05, 0.9, c, transform=subplot.transAxes)
subplot.set_ylabel("Crime count")
subplot.set_ylim(None, m * 1.4)
figidx += 1
plt.setp(axes[-1, :], xlabel="Month")
fig.suptitle("Number of crimes per month of year by category")
fig.subplots_adjust(top=0.90)
plt.show()
# -
# The above plots illustrate the number of crimes per month of the year by category. Overall, February seems to have the lowest amount of crimes in many categories. Crime rate seem to also drop during June.
data['Time'] = pd.to_datetime(data['Time'])
# Generate labels
labels = []
for i in range(0, 24):
labels.append(f"{i}:00 - {i}:59")
# +
fig, axes = plt.subplots(7, 2, figsize=(10, 18), sharex=True)
fig.tight_layout(w_pad=3)
figidx = 0
for c in sorted(focuscrimes):
category_data = data.loc[data["Category"] == c]
hourly_crimes = category_data['Time'].dt.hour.value_counts()
hourly_crimes.sort_index()
y = int(figidx / 7)
x = int(figidx % 7)
m = hourly_crimes.values.max()
subplot = axes[x, y]
subplot.bar(labels, hourly_crimes.values, 0.6)
subplot.tick_params(axis='x', labelrotation=90)
subplot.text(0.05, 0.9, c, transform=subplot.transAxes)
subplot.set_ylabel("Crime count")
subplot.set_ylim(None, m * 1.4)
figidx += 1
plt.setp(axes[-1, :], xlabel="Hour")
fig.suptitle("Number of crimes per hour of the day by category")
fig.subplots_adjust(top=0.90)
plt.show()
# -
# The highest crime rates are after midnight and before 7:00.
# However, each category's distribution differs. For example assault rates only slightly fall during the day, (from midnight to 19:00), while disorderly conduct rates significantly decrease after 2:00.
category_data = data.loc[data["Category"] == c]
week_hour_crimes = category_data.groupby(['DayOfWeek', 'Time']).size().reset_index()
week_hour_crimes.head()
d = category_data.groupby((category_data['Date'].dt.dayofweek - 1) * 24 + category_data['Time'].dt.hour).size()
d.value_counts().sort_index()
# +
fig, axes = plt.subplots(14, 1, figsize=(20, 18), sharex=True)
fig.tight_layout()
for c, idx in sorted(focuscrimes):
category_data = data.loc[data["Category"] == c]
hourly_crimes = category_data['Time'].dt.hour.value_counts()
hourly_crimes.sort_index()
m = hourly_crimes.values.max()
subplot = axes[idx, 1]
subplot.bar(labels, hourly_crimes.values, 0.6)
subplot.tick_params(axis='x', labelrotation=90)
subplot.text(0.05, 0.9, c, transform=subplot.transAxes)
subplot.set_ylabel("Crime count")
subplot.set_ylim(None, m * 1.4)
plt.setp(axes[-1, :], xlabel="Hour")
fig.suptitle("Number of crimes per hour of the day by category")
fig.subplots_adjust(top=0.90)
plt.show()
# -
# The next thing we'll be looking into is how crimes break down across the 10 districts in San Francisco.
#
# > *Exercise 2.2*: The types of crime and how they take place across San Francisco's police districts.
# >
# > * So now we'll be combining information about `PdDistrict` and `Category` to explore differences between SF's neighborhoods. First, simply list the names of SF's 10 police districts.
# > * Which has the most crimes? Which has the most focus crimes?
# > * Next, we want to generate a slightly more complicated graphic. I'm interested to know if there are certain crimes that happen much more in certain neighborhoods than what's typical. Below I describe how to get that plot going:
# > - First, we need to calculate the relative probabilities of seeing each type of crime in the dataset as a whole. That's simply a normalized version of [this plot](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/CrimeOccurrencesByCategory.png). Let's call it `P(crime)`.
# > - Next, we calculate that same probability distribution _but for each PD district_, let's call that `P(crime|district)`.
# > - Now we look at the ratio `P(crime|district)/P(crime)`. That ratio is equal to 1 if the crime occurs at the same level within a district as in the city as a whole. If it's greater than one, it means that the crime occurs _more frequently_ within that district. If it's smaller than one, it means that the crime is _rarer within the district in question_ than in the city as a whole.
# > - For each district plot these ratios for the 14 focus crimes. My plot looks like this
# > 
# > - Comment on the top crimes in _Tenderloin_, _Mission_, and _Richmond_. Does this fit with the impression you get of these neighborhoods on Wikipedia?
#
# **Comment**. Notice how much awesome datascience (i.e. learning about interesting real-world crime patterns) we can get out by simply counting and plotting (and looking at ratios). Pretty great, right? However, when generating meaningful visualizations, we need to be wary of *perceptual errors*. We'll have a look at this in the final exercise while also having fun with some geodata!
data.head()
# ## Part 3: Visualizing geodata with Plotly
#
# So visualizing geodata used to be difficult, but with `Plotly` things have gotten easier.
#
# Like matplotlib, Plotly is an [open-source data visualization library](https://plotly.com/python/), but it's aimed at making interactive visualizations that can be rendered in a web browser (or jupyter notebook). You can read about it and learn how to install it [here](https://plotly.com/python/getting-started/).
#
# That means that we can easily draw on the fact that the crime data has lots of exciting geo-data attached. The map we're going to be creating is called a **[choropleth map](https://en.wikipedia.org/wiki/Choropleth_map)** (more on these later), which is basically a map, where we color in shapefiles (more on this below) based on some value that we care about. We'll take our inspiration from Plotly's gentle intro to [Choropleth maps](https://plotly.com/python/mapbox-county-choropleth/)
#
# The thing we want to look into is the SF police districts, shown below (image stolen from [this page](https://hoodline.com/2015/07/citywide-sfpd-redistricting-to-take-effect-sunday/)).
#
# 
#
# But because we are cool programmers, we want to create our own maps, **with our own information on them**. Let's do it!
#
# > *Exercise 3a*: Let's plot a map with some random values in it.
# >
# > What we need to do to get going is to create some random data. Below is a little dictionary with a random value for each district that you can use if you want your plots to look like mine.
randomdata = {'CENTRAL': 0.8903601342256143,
'SOUTHERN': 0.8642882941363439,
'BAYVIEW': 0.925634097746596,
'MISSION': 0.7369022697287458,
'PARK': 0.9864113307070926,
'RICHMOND': 0.5422239624697017,
'INGLESIDE': 0.5754056712571605,
'TARAVAL': 0.5834730737348696,
'NORTHERN': 0.08148199528212985,
'TENDERLOIN': 0.37014287986350447};
# > *Exercise 3a* continued:
# >
# > For this exercise, we'll use use the random values above and we'll also need some *shape-files*.
# > [Shapefiles can have many different formats](https://en.wikipedia.org/wiki/Shapefile). Because we are brilliant teachers and an all-round standup people, we are sharing the shapefiles as [`geojson`](https://en.wikipedia.org/wiki/GeoJSON), which is an easy-to-use format for shapefiles based on `json`.
# >
# > * Download the SFPD District shapefiles **[here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/sfpd.geojson)**
# > * Now that you have the shapefiles, you can follow the example here: https://plotly.com/python/mapbox-county-choropleth/ but with the following modifications:
# > * In the example the `id` is a so-called FIPS code. In our case the `id` is the `DISTRICT`
# > * You will have to convert the dictionary of random values I included above to a Pandas dataframe with the right column headings.
# > * The data used in the example has a range between zero and 12. Your data is between $[0,1]$. So you'll need to modify the plotting command to accound for that change.
# > * You should also change the map to display the right zoom level.
# > * And the map should center on San Francisco's `lat` and `lon`.
# > * Now you can create your map.
#
# Mine looks something like this.
#
# 
#
# You're encouraged to play around with other settings, color schemes, etc.
# > *Exercise 3b:* But it's crime-data. Let's do something useful and **visualize where it is safest to leave your car on a Sunday**.
# >
# > Take a moment to congratulate yourself. You now know how to create cool plots!
# > * Now, we can focus on our main goal: *determine the districts where you should (and should not) leave your car on Sundays*. (Or stated differently, count up the number of thefts.)
# > * To do so, first:
# > * Filter the crime dataset by the `DayOfWeek` category and also choose the appropriate crime category.
# > * Aggregate data by police district.
# > * To create the plot, remember that your range of data-values is different from before, so you'll have to change the plotly command a bit.
# > * **Based on your map and analysis, where should you park the car for it to be safest on a Sunday? And where's the worst place?**
# > * Using visualizatios can help us uncover powerful data-patterns. However, when designing visualizations, we need to be aware of several illusions that can lead viewers to misinterpret the data we are showing (i.e. *perceptual errors*):
# > * Try to change the range of data-values in the plot above. Is there a way to make the difference between district less evident?
# > * Why do you think perceptual errors are a problem? Try to think of a few examples. You can have a look at this [article](https://www.businessinsider.com/fox-news-obamacare-chart-2014-3?r=US&IR=T) to get some inspiration.
# > * *Try this for Extra credit:*
# > * Create plots for the same crime type, but different days, and comment on the results.
| lectures/Week2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import json
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
dataset = "athletes"
key_idx = -1
# +
keys = ["random", "cluster_random", "influencer","cluster_influencer"]
df_name = "outputs/%s_%s.csv"%(dataset, keys[key_idx])
df = pd.read_csv(df_name, index_col="index")
df = df.drop(["label"], axis=1)
print(df.shape)
df_means = pd.read_csv("master/outputs/cluster_means/%s_means.csv"%dataset)
df_emb = pd.read_csv("master/outputs/embeddings/%s_embedding.csv"%dataset)
df_edges = pd.read_csv("master/data/%s_edges.csv"%dataset, index_col="node_1")
with open('master/outputs/assignments/%s.json'%dataset) as json_file:
labels = json.load(json_file)
df_label = pd.DataFrame({"index" : list(labels.keys()), "label" : list(labels.values())})
df_label["index"] = df_label["index"].astype(int)
df_label = df_label.set_index("index")
df_label = df_label.sort_index()
print(df_label.shape)
n_class = len(np.unique(df_label.label))
# -
pca = PCA().fit(df_emb)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
print("explained var %.2f"%sum(pca.explained_variance_ratio_[:3]))
pca = PCA(3)
projected = pca.fit_transform(df_emb)
df_emb = pd.DataFrame(projected, index=df_emb.index)
# +
df_emb["norm"] = np.linalg.norm(df_emb, axis=1)
df_emb["label"] = df_label.label
df_emb.sort_values("norm", inplace=True)
df_emb.head()
# -
df_emb = df_emb.reset_index()
df_label = df_emb[["label"]]
df_emb = df_emb.drop(["norm", "label"], axis=1)
colors_choice = ["C0", "C1", "C2", "C3", "C4", "C8", "C9"]
colors = [colors_choice[i%len(colors_choice)] for i in range(n_class)]
colors.append("k")
df_label.head()
# +
duration = 3
fps = 48
columns = df.columns
end = len(df_emb)
# end = duration * fps
batch = int(end / (duration * fps))
print("batch", batch)
frames = []
for i in range(end // batch + 1):
idx_end = min(end - 1, (i + 1) * batch)
frames.append(idx_end)
alpha = 0.05
# +
fig = plt.figure(figsize=(8, 5))
ax = Axes3D(fig)
i = 0
col = df[columns[-1]]
labels = df_label.label.values.copy()
# labels[col] = len(colors)
ax.cla()
ax.scatter(df_emb.iloc[:i, 1], df_emb.iloc[:i, 2], df_emb.iloc[:i, 3],
c=labels[:i], cmap=matplotlib.colors.ListedColormap(colors), alpha=alpha)
limit = 14
ax.set_xlim(-limit + 3, limit)
ax.set_ylim(-limit, limit)
ax.set_zlim(-limit + 10, limit - 3)
ax._axis3don = False
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# +
def update(i):
if i % batch == 0:
print('frame %i'%i, end="\r")
ax.cla()
ax.scatter(df_emb.iloc[:i, 1], df_emb.iloc[:i, 2], df_emb.iloc[:i, 3],
c=labels[:i], cmap=matplotlib.colors.ListedColormap(colors), alpha=alpha)
ax.set_xlim(-limit + 3, limit)
ax.set_ylim(-limit, limit)
ax.set_zlim(-limit + 10, limit - 3)
ax._axis3don = False
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
return ax
anim = FuncAnimation(fig, update, frames=frames, interval=1000 / fps)
fname = '%s_%is_%ifps_alpha_%.2f_end%i.gif'%(df_name, duration, fps, alpha, end)
anim.save(fname, dpi=80, writer='imagemagick')
print(fname)
plt.close()
# -
| case_study/gemsec/infection_animation_artwork2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/alvaritoguil1/RentaVble/blob/main/Derivados_01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="XNnlG--tcsBu"
# # Python en Finanzas
# ## Iniciando pyhton
# Lo habitual es hacer un hola mundo cuando se comienza a programar en un lenguaje.
# + colab={"base_uri": "https://localhost:8080/"} id="VspoJcCVdjJT" outputId="a6c18249-64b8-43b0-a4e7-3538f77e1bb5"
print("hola mundo")
# + [markdown] id="36xR6ps4dqwy"
# #Variables
# + colab={"base_uri": "https://localhost:8080/"} id="lJfrbxfvdtWi" outputId="bd166fb2-2464-405e-b3a3-a8457af45a4d"
x=5
print(x)
print(type(x))
| Derivados_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Now that you can create your own line charts, it's time to learn about more chart types!
#
# > By the way, if this is your first experience with writing code in Python, you should be _very proud_ of all that you have accomplished so far, because it's never easy to learn a completely new skill! If you stick with the micro-course, you'll notice that everything will only get easier (while the charts you'll build will get more impressive!), since the code is pretty similar for all of the charts. Like any skill, coding becomes natural over time, and with repetition.
#
# In this tutorial, you'll learn about **bar charts** and **heatmaps**.
#
# # Set up the notebook
#
# As always, we begin by setting up the coding environment. (_This code is hidden, but you can un-hide it by clicking on the "Code" button immediately below this text, on the right._)
# + _kg_hide-input=true _kg_hide-output=true
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
print("Setup Complete")
# -
# # Select a dataset
#
# In this tutorial, we'll work with a dataset from the US Department of Transportation that tracks flight delays.
#
# Opening this CSV file in Excel shows a row for each month (where `1` = January, `2` = February, etc) and a column for each airline code.
#
# 
#
# Each entry shows the average arrival delay (in minutes) for a different airline and month (all in year 2015). Negative entries denote flights that (_on average_) tended to arrive early. For instance, the average American Airlines flight (_airline code: **AA**_) in January arrived roughly 7 minutes late, and the average Alaska Airlines flight (_airline code: **AS**_) in April arrived roughly 3 minutes early.
#
# # Load the data
#
# As before, we load the dataset using the `pd.read_csv` command.
# +
# Path of the file to read
flight_filepath = "data/flight_delays.csv"
# Read the file into a variable flight_data
flight_data = pd.read_csv(flight_filepath, index_col="Month")
# -
# You may notice that the code is slightly shorter than what we used in the previous tutorial. In this case, since the row labels (from the `'Month'` column) don't correspond to dates, we don't add `parse_dates=True` in the parentheses. But, we keep the first two pieces of text as before, to provide both:
# - the filepath for the dataset (in this case, `flight_filepath`), and
# - the name of the column that will be used to index the rows (in this case, `index_col="Month"`).
#
# # Examine the data
#
# Since the dataset is small, we can easily print all of its contents. This is done by writing a single line of code with just the name of the dataset.
# Print the data
flight_data
# # Bar chart
#
# Say we'd like to create a bar chart showing the average arrival delay for Spirit Airlines (_airline code: **NK**_) flights, by month.
# +
# Set the width and height of the figure
plt.figure(figsize=(10,6))
# Add title
plt.title("Average Arrival Delay for Spirit Airlines Flights, by Month")
# Bar chart showing average arrival delay for Spirit Airlines flights by month
sns.barplot(x=flight_data.index, y=flight_data['NK'])
# Add label for vertical axis
plt.ylabel("Arrival delay (in minutes)")
# -
# The commands for customizing the text (title and vertical axis label) and size of the figure are familiar from the previous tutorial. The code that creates the bar chart is new:
#
# ```python
# # Bar chart showing average arrival delay for Spirit Airlines flights by month
# sns.barplot(x=flight_data.index, y=flight_data['NK'])
# ```
# It has three main components:
# - `sns.barplot` - This tells the notebook that we want to create a bar chart.
# - _Remember that `sns` refers to the [seaborn](https://seaborn.pydata.org/) package, and all of the commands that you use to create charts in this course will start with this prefix._
# - `x=flight_data.index` - This determines what to use on the horizontal axis. In this case, we have selected the column that **_index_**es the rows (in this case, the column containing the months).
# - `y=flight_data['NK']` - This sets the column in the data that will be used to determine the height of each bar. In this case, we select the `'NK'` column.
#
# > **Important Note**: You must select the indexing column with `flight_data.index`, and it is not possible to use `flight_data['Month']` (_which will return an error_). This is because when we loaded the dataset, the `"Month"` column was used to index the rows. **We always have to use this special notation to select the indexing column.**
#
# # Heatmap
#
# We have one more plot type to learn about: **heatmaps**!
#
# In the code cell below, we create a heatmap to quickly visualize patterns in `flight_data`. Each cell is color-coded according to its corresponding value.
# +
# Set the width and height of the figure
plt.figure(figsize=(14,7))
# Add title
plt.title("Average Arrival Delay for Each Airline, by Month")
# Heatmap showing average arrival delay for each airline by month
sns.heatmap(data=flight_data, annot=True)
# Add label for horizontal axis
plt.xlabel("Airline")
# -
# The relevant code to create the heatmap is as follows:
# ```python
# # Heatmap showing average arrival delay for each airline by month
# sns.heatmap(data=flight_data, annot=True)
# ```
# This code has three main components:
# - `sns.heatmap` - This tells the notebook that we want to create a heatmap.
# - `data=flight_data` - This tells the notebook to use all of the entries in `flight_data` to create the heatmap.
# - `annot=True` - This ensures that the values for each cell appear on the chart. (_Leaving this out removes the numbers from each of the cells!_)
#
# _What patterns can you detect in the table? For instance, if you look closely, the months toward the end of the year (especially months 9-11) appear relatively dark for all airlines. This suggests that airlines are better (on average) at keeping schedule during these months!_
#
# # What's next?
#
# Create your own visualizations with a **[coding exercise](https://www.kaggle.com/kernels/fork/2951537)**!
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161291) to chat with other Learners.*
| 19-Day-Kaggle-Competition/bar-charts-and-heatmaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WXYZ
# > _Experimental widgets. Probably don't expect them to keep working right now._
#
# Some examples of using `ipywidgets` and JupyterLab a little roughly.
# + language="html"
# <h1><a href="https://jupyter.org" data-commandlinker-command="notebook:restart-run-all" >Restart & Run All</a></h1>
# -
import importnb
with importnb.Notebook():
from Examples import (
Hello_Worlds, JSON_LD_Playground, JSON_Tricks, Dock_Panel, Full_Screen, SVG, DataGrid,
StyleGrid_I, StyleGrid_II, SelectGrid, JSON_Schema_Form, Terminal
)
# # [Hello Worlds](./Examples/Hello%20Worlds.ipynb)
# Transform data into markup, with various interactive stops along the way.
Hello_Worlds.hello_worlds
# # [JSON Tricks](./Examples/JSON%20Tricks.ipynb)
# Interactively learn about JSON Schema and JSON Pointer.
# + jupyter={"source_hidden": true}
JSON_Tricks.json_tricks
# -
# ## [JSON-LD Playground](./Examples/JSON-LD%20Playground.ipynb)
# A recreation of the [JSON-LD Playground](https://json-ld.org/playground/).
# + jupyter={"source_hidden": true}
JSON_LD_Playground.jsonld_playground
# -
# ## [Dock Panel](./Examples/Dock%20Panel.ipynb)
# A partial data model for the Phosphor [Dock Panel](http://phosphorjs.github.io/examples/dockpanel/).
# + jupyter={"source_hidden": true}
Dock_Panel.resizable_dock
# -
# ### Dock Pop
# Like a `DockPanel`, but just puts stuff into the JupyterLab main area.
# + jupyter={"source_hidden": true}
Dock_Panel.make_a_slider
# -
# ## [Full Screen](./Examples/Full%20Screen.ipynb)
# Full screen widgets on <kbd>Click</kbd> or <kbd>Ctrl+Click</kbd>, rebooted from `ipylayoutwidgets`.
# + jupyter={"source_hidden": true}
Full_Screen.sine
# + jupyter={"source_hidden": true}
Full_Screen.dock
# -
# <a name="svg"/>
#
# # [SVG](./Examples/SVG.ipynb)
# Another crossover from `ipylayoutwidgets`. Put some widgets where you want them on an SVG loaded from a file made in [Inkscape](https://inkscape.org/), or [some other source](https://svgwrite.readthedocs.io).
# + jupyter={"source_hidden": true}
Full_Screen.Fullscreen([SVG.svg])
# -
# # [DataGrid](./Examples/DataGrid.ipynb)
# The Phosphor DataGrid.
# + jupyter={"source_hidden": true}
dg1 = DataGrid.make_grid()
dg1
# -
# ### Custom Cell Renderers
# WIP, but usable (maybe)
# + jupyter={"source_hidden": true}
dg2 = StyleGrid_II.make_style_grid_ii()
dg2
# -
# ### SelectGrid
# Very WIP.
# + jupyter={"source_hidden": true}
pg3 = SelectGrid.make_select_grid()
pg3
# -
# # [JSON Schema Form](./Examples/JSON+Schema+Form.ipynb)
f1, fb1 = JSON_Schema_Form.make_a_json_schema_form_playground()
fb1
# # [Terminal](./Examples/Terminal.ipynb)
term = Terminal.make_a_fancy_terminal_demo()
term
| notebooks/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import HTML
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
# %matplotlib notebook
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this Jupyter notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
''')
# -
# 
# <h1><center>Module 01: Introduction </center></h1>
#
# In my experience, (geophysical) digital signal processing (or "DigSig") represents one of the most important sequences of steps that we geophysicists use when taking geophysical data recorded in the field and generating our desired geophysical analysis outputs (i.e., models, images) that are used in publications, reports and theses.
#
# However, there are many challenges that early-career geophysicists face when starting out on this path. In particular, with geophysical data (like in other disciplines) there are many many many different **processing strategies** and **algorithms** to choose from, the **order of processing workflow** must be developed, and this usually involved selecting values for scores of different **parameters**. Is there a right or wrong way to process the data? How does one establish a judicious, well thought out, and computationally efficient workflow? In many ways it might seem to someone coming in (and indeed the same is felt by many other geoscientists that use our end products!) that the following must be true:
#
# $$
# \boxed{\begin{array}{c}\mathrm{Field}\\\mathrm{Geophysical}\\\mathrm{Data}\end{array}}
# \Longrightarrow
# \boxed{\begin{array}{c}\mathrm{Magic\,Black}\\\mathrm{Box\,of\, Geophysical}\\\mathrm{Data\,Processing}\end{array}}
# \Longrightarrow
# \boxed{\begin{array}{c}\mathrm{Top-notch}\\\mathrm{Geophysical}\\\mathrm{Result}\end{array}}
# $$
#
# **Figure 1. What digital signal processing might seem to some at this point. Just say NO! to Black Box**
#
# One of the major goals of this class is to strip away some of the **magic black box** nature of geophysical data procesing, and give you a better conceptual idea of how to analyze your geophysical data. Ideally, this will help you to design better data processing workflows that allow us to achieve our geophsical data processing goals (e.g., noise removal, signal enhancement, interpretable geophysical result).
#
# In the sections below I present two examples that (hopefully!) show the value of applying digital signal processing to geophysical field data.
# ## CASC93 Seismic Experiment
#
# The goal of the CASC93 experiment was to obtain recording of distant (so-called teleseismic) earthquakes that would provide us with some idea about the lithospheric structure in a seismically quiessent zone located in west-central Oregon. The acquisition of this data set on a linear array of 40 three-component seismometers created a fantastic opportunity to apply the concepts 2D migration to study the top 120 km of lithosphere!
#
#
# (a) CASC93 Experiment Location | (b) Earthquake Locations
# - | -
# <img src="Fig/EQMIG1.png" width="450"> | <img src="Fig/EQMIG1B.png" width="400">
#
# **Figure 2. (a) Location of the CASC93 seismic experiment involving 40 three-component broadband seismic stations deployed over 300 km in Oregon. (b) Locations of the large earthquakes (i.e., > 5.5 Mb) used for imaging that originated $30^\circ <\Delta<90^\circ$ from array.**
#
# <img src="Fig/EQMIG2.png" width="400">
#
# **Figure 3. Example of a processed earthquake arrival recorded on a multicomponent seismic array. Recorded earthquake signals were subject to much digital signal processing: automated windowing (based on short-time versus long-time signal magnitudes estimates), bandpass filtering (within the source signal band), polarity filtering (to account for directivity of source), estimation and deconvolution of source signatures (to estimate the impulse response), and regularization into a regular grid for plotting (for ease of applying migration operators)**.
#
# <img src="Fig/EQMIG4.png" width="500">
#
# **Figure 4. Stack of all recorded earthquakes after imaging+inversion. The interpretation of the structure shown in this figure is the Juan de Fuca Plate subducting beneath the North American continental plate. Note that this final step involved applying full-wavefield migration, which is also a digital filter.**
# ## Microseismic Velocity Inversion
#
# You have no doubt heard about the link between subsurface fluid injection activities (i.e., fracing) and induced (micro)seismicity into prolific shale formations of very low permeability. In most cases, the magnitudes of induced microseismic events are quite low (< 1.0 Mw), and it is helpful for the operators to know where and when these events occur because they provide important details as to how the stimulation increases fault connectivity and overall permeability.
#
# There are now a number of specialized microseismic operators that deploy tens-to-hundreds of multi-component seismic stations (see Figure 4) that are sufficiently sensitive to record **very weak microseismic events** (e.g., $-1.25<Mw<0.25$) occurring at 1.0-2.0 km depth.
#
# <img src="Fig/MICRO00.png" width="500">
#
# **Figure 5. Example field deployment of 192 3C sensors over the Marcellus Shale formation in Ohio. The overall area of investigation is 6$\times$6 km$^2$, while the white box shows the 1.5$\times$1.25 km$^2$ area of subsurface stimulated at approximately 2.0 km depth.**
#
# Two windowed raw microseismic events | Events processed into P- and S-wave through DSP
# - | -
# <img src="Fig/MICRO1.png" width="500"> | <img src="Fig/MICRO2.png" width="350">
#
# **Figure 6. (a) Example of a weak (upper panels) and a very weak (lower panels) microseismic event, where the three subpanels represent the vertical, easting and northing components. (b) The same two events after significant digital signal processing: data windowing (again based on long-term versus short-term signal averages), wave-mode separation (to isolate P- and S-wave contributions), band-pass filtering (to within expected range), envelope estimation (to handle S-wave polarities), and second pass of bandpass filtering (to return signals to zero mean).**
#
# Usually, these data sets are acquired in frontier exploration areas that do have no 3D (or even 2D) seismic data, which are essential for generating information as the local 3D velocity structure. (In fact, commonly the only velocity constraint in the area is a single borehole located a few kilometers away that might only be logged in the interval of interest!) Thus, the question is: **how can you locate source of a very noisy microseismic event if you do not know the 3D velocity structure and you do not know at what time and where it occured?**
#
# When at the University of Western Australia (UWA), my PhD student <NAME> and I developed a method to take the processed microseismic signals and go through a process of **adjoint-state inversion** to jointly estimate the 3D velocity structure and optimize microseismic event locations. Figure 6 shows the results before (left two panels) and after (right two panels) applying the velocity inversion method. The upper and lower panels show the P- and S-wave velocity structure, respectively. Overall, through the inversion process there is a significant improvement in the consistency of event locations - and potentially imaging of a stimulated fault.
#
# Before Inversion | After Inversion
# - | -
# <img src="Fig/MICRO3.png" width="400"> | <img src="Fig/MICRO4.png" width="400">
#
# **Figure 7. The inital (a) P-wave and (b) S-wave velocity structure used to generate initial location estimates (white stars). The (c) P-wave and (d) S-wave velocity structure used to generate the final location estimates. Much more consistent locations are now observed, and (almost) all events fall within the expected interval. The quality of the final result is heavily dependent on the digital signal processing choices made during the preprocessing stages.**
#
# # Course Outline
#
# This introductory course on geophysical digital signal processing covers a lot of fundamental mathematical and numerical topics that you will need in your career as a geophysicist or more broadly when playing a quantitative role in the physical sciences.
#
# ### Module 01 - Introduction
#
# The purpose of this module is to emphasize how fundamental DSP has been to your instructor in his career, and to highlight the key topics that will be investigated in this course.
#
# ### Module 02 - Terminology of Digital Signal Processing
#
# This short module introduces some key tems used DSP.
#
# ### Module 03 - Complex Numbers
#
# This module provides a refresher on the manipulation of complex numbers and functions, which are fundamental building blocks for the material developed later on in the course.
#
# ### Module 04 - Fourier Series
#
# This module starts to build up the Fourier machinary by examining continuous, periodic time series. We calculate Fourier spectra that are a very helpful tool for examining signals in a different light.
#
# ### Module 05 - 1D Fourier Transforms
#
# In this module we examine both the analytical and numerical properties of the 1D Fourier transform of continuous (non-periodic) time series. We also look at Fourier transform of a number of important analytic signals as well as that of instrument, earthquakes and seismic data.
#
# ### Module 06 - 2D Fourier Transforms
#
# This module extends the 1D Fourier analysis presented in Module 5 to 2D images. Concepts of 2D spatial filtering are introduced. We investigate the wavenumber structure of 2D magnetic and gravity data sets.
#
# ### Module 07 - Linear Time Invariant (LTI) systems
#
# This module introduces the concepts of linear time-invariant (LTI) systems, including the operations of convolution, correlation, autocorrelation and deconvolution.
#
# ### Module 08 - Discrete Time Signals
#
# This module introduces a number of important concepts regarding discrete time series including: continuous vs discrete vs digital signals; deterministic vs stochastic signals; and characteristics of LTI systems.
#
# ### Module 09 - Digital Sampling and Reconstruction
#
# This module explores the key consequences of performing digital sampling (i.e., aliasing), discusses strategies to avoid aliasing, and how to reconstruct analog signals from properly sampled time series.
#
# ### Module 10 - Discrete Fourier Transform
#
# This module looks at how we can take 1D/2D Fourier transforms that we defined in a continuous fashion in the modules above and looks at how they can be posed in discrete systems.
#
# ### Module 11 - Windows and Spectrograms
#
# This module looks at how we can go beyond Fourier transforms that span the entire time series to those that provide more information on the "local" frequency information.
#
# ### Module 12 - Z Transforms
#
# This module looks at how we can generalize Discrete Fourier Transforms to more generalized and abstract spaces.
#
# ### Module 13 - Practical Filtering
#
# We finish the course by looking how Z-Transforms can be use to set up FIR and IIR filters that can be used to do a whole bunch of useful things such as lowpass, highpass, bandpass and bandreject filtering of 1D signals.
| 01_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
plt.style.use('fivethirtyeight')
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='sans')
# + tags=["hide-input"]
from IPython.lib.display import YouTubeVideo
YouTubeVideo('Je-qT2FQrrM')
# -
# # Solving equations of motion
#
# In this notebook, you will plot the solutions to three 1-DOF equations of motion. Each starts at the origin with an initial velocity, $v = 5~m/s$. The three equations of motion and solutions are derived in the video above
#
# |system | equation of motion | solution|
# |---|---|------|
# |a.| $m\ddot{x} = -\mu mg$ |$\rightarrow x(t) = v_0t - \frac{\mu gt^2}{2}$|
# |b.| $m\ddot{x} = -b \dot{x}$| $\rightarrow x(t) = \frac{v_0 m}{b}\left(1 - e^{-\frac{b}{m} t}\right)$ |
# |c.| $m\ddot{x} = -k x$| $\rightarrow x(t) = \frac{v_0}{\omega}\sin\omega t$|
# ## Coulomb friction on a sliding block
#
# This first example, has a small trick. The acceleration is constant, $-\mu g$, until the velocity is zero. At this point, the block stops moving. To solve for $x(t)$
#
# - calculate $x(t)$ and $v(t)$ if acceleration is constant
# - set the values of $v(t)<0$ to 0
# - set the values of $x(t)$ given $v(t)=0$ as the maximum $x$
#
# Here, $\mu=0.3$ and m = 0.5 kg
t = np.linspace(0, 3)
xa = 5*t - 0.5*0.3*9.81*t**2
va = 5 - 0.3*1*9.81*t
va[va < 0] = 0
xa[va == 0] = xa.max()
plt.plot(t, xa)
plt.xlabel('time (s)')
plt.ylabel('position (m)')
# ## Viscous friction
#
# This second example has a exponentially decaying speed. This type of motion is common in door dampers and shock absorbers. The faster the object moves, the faster it decelerates.
#
# - $v(t) = v_0 e^{-\frac{b}{m}t}$
# - $x(t) = \frac{v_0 m}{b}\left(1 - e^{-\frac{b}{m} t}\right)$
#
# Here, b = 1 kg/s and m = 0.5 kg
m = 0.5
b = 1
xb = 5*m/b*(1-np.exp(-b/m*t))
plt.plot(t, xb)
plt.xlabel('time (s)')
plt.ylabel('position (m)')
# ## Linear spring and the harmonic oscillator
#
# This third example is a [harmonic oscillator](https://en.wikipedia.org/wiki/Harmonic_oscillator). Any object that has a restoring force e.g. a spring attached to a mass, a pendulum swinging, object hanging from a rubber band. The harmonic oscillator is described by the general equation
#
# $\ddot{x} = -\omega^2 x$
#
# where $\omega = \sqrt{\frac{k}{m}}$ for a spring mass. Here, $k=2~N/m$ and m=0.5 kg.
w = np.sqrt(2/0.5)
xc = 5/w*np.sin(w*t)
plt.plot(t, xc)
plt.xlabel('time (s)')
plt.ylabel('position (m)');
# ## Wrapping up - comparing all three examples
#
# You have plotted three solutions
#
# 1. sliding with friction
# 2. viscous friction
# 3. harmonic oscillator
#
# Now, you can plot all three together.
plt.plot(t, xa, label = 'friction')
plt.plot(t, xb, label = 'viscous')
plt.plot(t, xc, label = 'harmonic')
plt.legend();
plt.xlabel('time (s)')
plt.ylabel('position (m)');
# Some similiraties between the three plots
#
# - each plot begins at 0 m
# - each plot has the same initial slope
#
# Some differences between the three plots
#
# - the friction and viscous friction have a final position, but the harmonic plot continues to move
# - the blue friction plot has two distinct functions: $\propto t^2$ and $\propto constant$, but the other plots are continuous functions
| module_03/plotting-solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import xgboost as xgb
import pickle
from scipy.stats import zscore
from sklearn.model_selection import train_test_split
# -
def preProcess(data):
#convert store_and_fwd_flag to number
f = lambda x: 0 if x == 'N' else 1
data["store_and_fwd_flag"] = data["store_and_fwd_flag"].apply(lambda x: f(x))
#converting to paramters
data['mm_pickup'] = data.pickup_datetime.dt.month
data['dow_pickup'] = data.pickup_datetime.dt.weekday
data['day_pickup'] = data.pickup_datetime.dt.day
data['hour_pickup'] = data.pickup_datetime.dt.hour
data['min_pickup'] = data.pickup_datetime.dt.minute
#gather distance in terms of latitude and longitude difference
data['latitude_difference'] = data['dropoff_latitude'] - data['pickup_latitude']
data['longitude_difference'] = data['dropoff_longitude'] - data['pickup_longitude']
#convert trip_duration to seconds from minutes
data['trip_duration'] = data['trip_duration'].apply(lambda x: round(x / 60))
#calculate distance travelled in kilometer as per the Haversine Formula for Latitude_difference and longitude_difference
#Haversine formula - the shortest distance over the earth’s surface – giving an ‘as-the-crow-flies’ distance between the points (ignoring any hills they fly over, of course!).
#source: https://www.movable-type.co.uk/scripts/latlong.html
R = 6371 #Radius of earth
phi1 = data['dropoff_latitude'] * np.pi / 180
phi2 = data['pickup_latitude'] * np.pi / 180
Dlat = np.abs(data['latitude_difference'] * np.pi / 180)
Dlong = np.abs(data['longitude_difference'] * np.pi / 180)
a = np.sin(Dlat / 2) * np.sin(Dlat / 2) + np.cos(phi1) * np.cos(phi2) * np.sin(Dlong / 2) * np.sin(Dlong / 2)
c = 2 * np.arctan2(np.sqrt(np.abs(a)), np.sqrt(np.abs(1-a)))
data["trip_distance"] = R * c
def costFunction(y_actual, y_estimated):
'''
Evaluation metric for XGBoost
'''
#Dimension-tracker 3
#source https://github.com/dmlc/xgboost/blob/master/demo/guide-python/custom_objective.py
#source https://stackoverflow.com/questions/55001509/typeerror-when-writing-my-own-evaluation-metric-for-xgboost-in-python
#print(type(y_actual), type(y_estimated), y_actual.size, y_estimated.num_row(), print(type(y_actual.size)), print(type(y_estimated.num_row())))
y_estimated_float = y_estimated.get_label()
assert y_actual.shape == y_estimated_float.shape
return 'my-error', np.sqrt(np.square(y_estimated_float - y_actual).mean())
def XGBModel(X, y):
'''
Machine learning model
Input: passenger count, coordinates, pickup datetime, store_and_fwd_flag
Output: trip duration
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 2020)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.2, random_state = 2020)
#Dimension-tracker 1
#print(type(X_train), type(y_test), X_train.shape, X_test.shape, y_train.shape, y_test.shape, X_val.shape, y_val.shape)
#XGBoost paramaeters
parameters = {
'booster': 'gbtree',
'objective': 'reg:squarederror',
'max_depth': 14,
'subsample': 0.9,
'eta': 0.05,
'colsample_bytree': 0.7,
'colsample_bylevel': 0.7,
'n_jobs': 4,
}
#Define training and cross-validation sets for XGBoost
#The metric of this project is RMSLE,
#RMSLE is by default not supported by algorithms such as LightGBM and XGBoost
#Hence, you will need to write your own optimization function for your models if you want to use RMSLE.
#Using a log-transformed target will allow you to simply use the RMSE metric, which is often the default objective for a regression model.
Dtrain = xgb.DMatrix(data = X_train, label = np.log1p(y_train))
Dval = xgb.DMatrix(data = X_val, label = np.log1p(y_val))
#Dimension-tracker 2
#print(Dtrain.num_col(), Dtrain.num_row(), Dval.num_col(), Dval.num_row())
#for tracking the error
watchlist = [(Dval, 'eval'), (Dtrain, 'train')]
#Number of training rounds
n_rounds = 1000
#Train model
GBM = xgb.train(params = parameters, dtrain = Dtrain, num_boost_round = n_rounds, evals = watchlist, feval = costFunction, verbose_eval = True)
return GBM
# +
taxiDB = pd.read_csv(filepath_or_buffer = 'train.csv', delimiter = ',', engine = 'c', low_memory = True, infer_datetime_format = True, parse_dates=[2,3])
preProcess(taxiDB)
#dividing data to test and train dataset
X = taxiDB.drop(['id', 'vendor_id', 'pickup_datetime', 'dropoff_datetime', 'trip_duration'], axis = 1)
y = taxiDB['trip_duration']
#train model to data
model = XGBModel(X, y)
# -
#Saving the model as a pickle file
filename = "xgb_model.sav"
pickle.dump(model, open(filename, 'wb'))
# +
#Testing cells for error calculation
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 2020)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.2, random_state = 2020)
#finding estimated values
y_estimated = np.exp(model.predict(xgb.DMatrix(X_test))) - 1
# +
#Error calculations
S = y_estimated
A = y_test.to_numpy(dtype = float)
error = S - A
#1. Mean absolute deviation
MAD = sum(abs(error)) / len(A)
print("Mean Absolute Deviation is:", MAD, "minutes")
#2. Mean square error
MSE = sum(error**2) / len(A)
print("Mean Square Error is:", MSE, "minutes")
#3. Mean absolute percentage error
MAPE = sum(np.divide(error, A)) / len(A) * 100
print("Mean Absolute Percentage Error is:", MAPE, "%")
#4. bias
bias = sum(error)
print("bias is:", bias, "minutes")
#5. Root mean square percentage error
RMSPE = np.sqrt(np.mean(np.square((A - S) / A))) * 100
print("Root Mean Square Percentage Error is:", RMSPE, "%")
# -
print(taxiDB.head())
print(X.head())
| xgboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Reference" data-toc-modified-id="Reference-1"><span class="toc-item-num">1 </span>Reference</a></span></li></ul></div>
# -
# <img src='./img/intel-logo.jpg' width=50%, Fig1>
#
# # OpenCV 기초강좌
#
# <font size=5><b>06. Face Emotion Detection <b></font>
#
# <div align='right'>성 민 석 (Minsuk Sung)</div>
# <div align='right'>류 회 성 (Hoesung Ryu)</div>
#
# <img src='./img/OpenCV_Logo_with_text.png' width=20%, Fig2>
#
#
# ---
# +
import cv2
import sys
cascPath = "./data/haarcascades/haarcascade_profileface.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y+10), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame,'EYE',(x,y),cv2.FONT_HERSHEY_SIMPLEX,2,(0, 255, 0),2)
cv2.imshow('Video', frame)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
# -
# ## Reference
# - https://github.com/parulnith/Face-Detection-in-Python-using-OpenCV/blob/master/Face%20Detection%20with%20OpenCV-Python.ipynb
# - https://realpython.com/face-detection-in-python-using-a-webcam/
# - https://pysource.com/2019/03/12/face-landmarks-detection-opencv-with-python/
| opencv/.ipynb_checkpoints/OpenCV Lecture06. Example - Face Detection-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
names=["Tomás", "Pauline", "Pablo", "Bjork","Alan","Juana"]
woman=[False,True,False,False,False,True]
ages=[32,33,28,30,32,27]
country=["Chile", "Senegal", "Spain", "Norway","Peru","Peru"]
education=["Bach", "Bach", "Master", "PhD","Bach","Master"]
education #testing
import pandas
from pandas import DataFrame
data = {'names': names, 'woman': woman, 'ages': ages, 'country': country, 'education': education}
data
friends = DataFrame.from_dict(data)
friends
# Who is the oldest person in this group of friends?
friends[friends.ages == max(friends.ages)]
# +
# How many people are 32?
len(friends[friends.ages == 32])
#c. Do some research and find the answer using query() and min()
#Solve this in a new Jupyter notebook, and then upload it to GitHub. Name the notebook as 'hw_data_structures'.
# -
# How many are not Peruvian? (use two different codes)
len(friends[friends.country == 'Peru'])
# Who is the person with the highest level of education?
friends[friends.education == 'PhD']
# what is the sex of the oldest person in the group?
friends[friends.ages == max(friends.ages)].woman
# +
### Homework
# If you have the query:
# where is the youngest male in the group from?
# my own answer
male = friends[(friends.woman == False)]
male[male.ages == min(male.ages)]
# -
#a. Find the answer using sort_values()
male.sort_values('ages', ascending=True).head(1)
#b. Do some research and find the answer using where() and min()
male.where(male.ages == min(male.ages))
| ex_data_structures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import ffmpeg # Library to work with audio and video files, here used to extract audio
import subprocess # To make subprocess call using terminal
import os
from ibm_watson import SpeechToTextV1
from ibm_watson.websocket import RecognizeCallback, AudioSource
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
# +
# Extracts audio from video
# ffmpeg -i input_filename -ab bitrate -ar frequency -vn output_filename
# TODO: Change input_filename.mp4 to file to be converted
# If your output is '127' that means command not found and likely ffmpeg has not been correctly installed.
command = 'ffmpeg -i input_filename.mp4 -ab 160k -ar 44100 -vn output_filename.wav'
subprocess.call(command, shell=True)
# -
# TODO: Insert the api_key and url as strings as listed on your IBM Cloud account (Watson Speech to Text service)
api_key = ''
url = ''
# Setups service
authenticator = IAMAuthenticator(api_key)
stt = SpeechToTextV1(authenticator=authenticator)
stt.set_service_url(url)
# Compresses audio
command = 'ffmpeg -i output_filename.wav -vn -ar 44100 -ac 2 -b:a 192k output_filename.mp3'
subprocess.call(command, shell=True)
# Splits audio into smaller and more manageable files
command = 'ffmpeg -i output_filename.mp3 -f segment -segment_time 1800 -c copy audio_file_%03d.mp3'
subprocess.call(command, shell=True)
files = []
for filename in os.listdir('.'):
if filename.startswith('audio_file_') and filename.endswith('.mp3'):
files.append(filename)
files.sort()
files
# Model list: https://cloud.ibm.com/apidocs/speech-to-text#listmodels
# Japanese models: ja-JP_BroadbandModel, ja-JP_NarrowbandModel
# JP_Broadband seems to capture the speech more accurately
results = []
for filename in files:
with open(filename, 'rb') as f:
res = stt.recognize(audio=f, content_type= 'audio/mp3', model='ja-JP_BroadbandModel', continuous=True,
inactivity_timeout=1200).get_result()
results.append(res)
results[0] # Shows speech to text results for the first audio file
text = []
for file in results:
for result in file['results']:
text.append(result['alternatives'][0]['transcript'].rstrip() + '.\n')
# +
transcript_list = ''.join(text).split()
for n, i in enumerate(transcript_list):
if i.endswith(u'.'):
transcript_list[n] = i[:-1]
for n, i in enumerate(transcript_list):
# Light text cleaning below
if i == u'ですね':
transcript_list[n] = u'ですね。'
if i == u'はい':
transcript_list[n] = u'はい、'
if i.endswith(u'ます'):
transcript_list[n] = i.replace(u'ます', u'ます。')
if i.endswith(u'ましょう'):
transcript_list[n] = i.replace(u'ましょう', u'ましょう。')
if i == u'ね' and transcript_list[n-1] == u'です':
transcript_list[n] = u'ね。'
if i == u'ねえ':
transcript_list[n] = u'ね。'
if i.startswith('D_'): # Dropoffs in speech. Often in places like ええと and まあー
transcript_list[n] = ''
# -
transcript = ''.join(transcript_list)
transcript = transcript.replace(u'ます。ので', 'ますので、')
transcript
# Saves transcript variable as 'トランスクリプト_long.txt'
with open('トランスクリプト_long.txt', 'w') as out:
out.writelines(transcript)
| video-to-text-longfiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # License
# ***
# Copyright (C) 2018 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ***
# ## Simple multilayer perception (MLP) example
# imports
import urllib.request as urllib2
import numpy as np
import pandas as pd
# #### Set simple hyperparameters
LEARN_RATE = 0.005
ITERATIONS = 600
HIDDEN_UNITS = 30
# #### Fetch simple Iris dataset
# +
# load and preprocess Iris data set
# easy binomial classification task: seperate Setosa irises from Versicolor irises
# fetch data from UCI repository
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
connection = urllib2.urlopen(url)
raw = connection.read()
# initialize empty X and y arrays
X = np.zeros((100, 4))
y = np.zeros((100, 1))
# load iris data into X and y arrays
row_idx = 0
for line in str(raw)[2:-5].split('\\n'):
line = line.replace('Iris-setosa', '1').replace('Iris-versicolor', '0')
line = line.split(',')
# remove Virginica irises from data set
if line[-1] != 'Iris-virginica':
line = np.asarray(line)
X[row_idx, :] = line[:-1]
y[row_idx, :] = line[-1]
row_idx += 1
print('Data inputs:\n', X)
print('\n')
print('Data target:\n', y)
# -
# #### Training routine
# +
# very simple MLP routine
# with logistic activation for hidden and output layer
# set random seed
# always do this when working with random numbers
np.random.seed(12345)
# randomly initialize our weights with mean 0
hidden_weights = np.random.random((4, HIDDEN_UNITS)) - 0.5 # 4 X HIDDEN_UNITS weights in hidden layer
output_weights = np.random.random((HIDDEN_UNITS, 1)) - 0.5 # HIDDEN_UNITS X 1 weights in output layer
print('MLP architecture is: 4 input units -> %d hidden units -> 1 output units.' % HIDDEN_UNITS)
print()
print('There are %d hidden weights to optimize.' % (4 * HIDDEN_UNITS))
print('Initial hidden weights:\n', hidden_weights)
print()
print('There are %d output weights to optimize.' % HIDDEN_UNITS)
print('Initial output weights:\n', output_weights)
# initialize empty pandas DataFrame to hold iteration scores
iter_frame = pd.DataFrame(columns=['Iteration', 'Error'])
# activation function
def logistic_activation_function(weights_times_inputs):
return 1 / (1 + np.exp(-weights_times_inputs))
# trainign loop
for iteration in range(0, ITERATIONS):
### feed-forward phase ##########
# run data through input, hidden, and output layers
input_layer = X
hidden_layer = logistic_activation_function(np.dot(input_layer, hidden_weights))
output_layer = logistic_activation_function(np.dot(hidden_layer, output_weights))
if iteration == 0:
print('\nInitial yhat:\n', output_layer)
print()
print('Training ...')
### evaluate error function ##########
output_logloss_error = -y * np.log(output_layer) + (1 - y)*np.log(1 - output_layer)
if ((iteration + 1) % 100) == 0:
print('Iteration %4i, Error: %5.2f' % (iteration + 1, np.sum(output_logloss_error)))
# record iteration and error
iter_frame = iter_frame.append({'Iteration': iteration,
'Error': np.sum(output_logloss_error)},
ignore_index=True)
### back-propogation phase ##########
# back-propogate error from output layer to hidden layer
# weight's output delta and input activation are multiplied to find the gradient of the weights
# due to chain rule
output_loss_gradient = output_layer - y # logloss derivative
output_layer_gradient = output_layer * (1 - output_layer) # output sigmoid derivative
output_input = hidden_layer # linear combo derivative
output_total_gradient = output_input.T.dot(output_loss_gradient * output_layer_gradient)
hidden_loss_gradient = output_loss_gradient.dot(output_weights.T) # backprop error/logloss derivative
hidden_layer_gradient = hidden_layer * (1 - hidden_layer) # hidden sigmoid derivative
hidden_input = input_layer # linear combo derivative
hidden_total_gradient = hidden_input.T.dot(hidden_loss_gradient * hidden_layer_gradient)
### update weights based on gradient ##########
# update weights in direction that minimizes error using layerwise gradients
# (input layer is never updated, b/c it is the data itself)
# scale by learning rate
output_weights -= LEARN_RATE * output_total_gradient
hidden_weights -= LEARN_RATE * hidden_total_gradient
print('Maximum iterations reached, done.')
# -
# #### Analyze results
y_yhat_frame = pd.DataFrame(columns = ['y', 'yhat'])
y_yhat_frame['y'] = y.reshape(-1)
y_yhat_frame['yhat'] = output_layer.reshape(-1)
y_yhat_frame
# %matplotlib inline
_ = iter_frame.plot(kind='line', x='Iteration', y='Error', title='Iteration Chart')
| 05_neural_networks/src/py_part_5_basic_mlp_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Load and preprocess 2000 data
#
# We will, over time, look over other years. Our current goal is to explore the features of a single year.
#
# ---
# + deletable=true editable=true
# %pylab --no-import-all inline
import pandas as pd
# + [markdown] deletable=true editable=true
# ## Load the data.
#
# ---
#
# If this fails, be sure that you've saved your own data in the prescribed location, then retry.
# -
file = "../data/interim/2000data.dta"
df_rawest = pd.read_stata(file)
# + deletable=true editable=true
good_columns = [#'campfin_limcorp', # "Should gov be able to limit corporate contributions"
'V000523', # Your own party identification
'V000694', # Abortion
'V001531', # Moral Relativism
'V001530', # "Newer" lifetyles
'V001533', # Moral tolerance
'V001532', # Traditional Families
'V001481', # Gay Job Discrimination
'V000748', # Gay Adoption
'V000727', # Gay Military Service
'V000609', # National health insurance
'V000620', # Guaranteed Job
'V000550', # Services/Spending
'V000674a', # Affirmative Action -- 1-5; 7 is other
'V001508',
'V001511',
'V001509',
'V001510',
]
df_raw = df_rawest[good_columns]
# + [markdown] deletable=true editable=true
# ## Clean the data
# ---
# + deletable=true editable=true
def convert_to_int(s):
"""Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2
"""
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn("Couldn't convert: "+s)
return np.nan
except AttributeError:
return s
def negative_to_nan(value):
"""Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent.
"""
return value if value >= 0 else np.nan
def lib1_cons2_neutral3(x):
"""Rearrange questions where 3 is neutral."""
return -3 + x if x != 1 else x
def liblow_conshigh(x):
"""Reorder questions where the liberal response is low."""
return -x
def not_informative_to_nan(x):
"""Convert non-informative values to missing.
ANES codes various non-answers as 8, 9, and 0.
For instance, if a question does not pertain to the
respondent.
"""
return np.nan if x in {8, 9, 0} else x
df = df_raw.applymap(convert_to_int)
df.rename(inplace=True, columns=dict(zip(
good_columns,
["PartyID",
"Abortion",
"MoralRelativism",
"NewerLifestyles",
"MoralTolerance",
"TraditionalFamilies",
"GayJobDiscrimination",
"GayAdoption",
"GayMilitaryService",
"NationalHealthInsurance",
"StandardOfLiving",
"ServicesVsSpending",
"AffirmativeAction",
"RacialWorkWayUp",
"RacialGenerational",
"RacialDeserve",
"RacialTryHarder",
]
)))
non_pid_columns = list(df.columns)
non_pid_columns.remove('PartyID')
df[non_pid_columns] = df[non_pid_columns].applymap(not_informative_to_nan)
# Code so that liberal is lower numbers
df.loc[:, 'PartyID'] = df.PartyID.apply(lambda x: np.nan if x >= 7 else x) # 7: other minor party, 8: apolitical, 9: NA
df.loc[:, 'Abortion'] = df.Abortion.apply(lambda x: np.nan if x in {7, 8, 9, 0} else -x)
df.loc[:, 'NewerLifestyles'] = df.NewerLifestyles.apply(lambda x: -x) # Tolerance. 1: tolerance, 7: not
df.loc[:, 'TraditionalFamilies'] = df.TraditionalFamilies.apply(lambda x: -x) # 1: moral relativism, 5: no relativism
df.loc[:, 'ServicesVsSpending'] = df.ServicesVsSpending.apply(lambda x: -x) # Gov't insurance?
df.loc[:, 'RacialTryHarder'] = df.RacialTryHarder.apply(lambda x: -x) # Racial support
df.loc[:, 'RacialWorkWayUp'] = df.RacialWorkWayUp.apply(lambda x: -x) # Systemic factors?
# + deletable=true editable=true
print("Variables now available: df")
# + deletable=true editable=true
df_rawest.V000523.value_counts()
# -
df.PartyID.value_counts()
# + deletable=true editable=true
df.head()
# -
df.to_csv("../data/processed/2000.csv")
| notebooks/1.0-adm-load-data-2000.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Drill - Describing Data
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## 1. "Greg was 14, Marcia was 12, Peter was 11, Jan was 10, Bobby was 8, and Cindy was 6 when they started playing the Brady kids on The Brady Bunch. <NAME> was 8 years old when he joined the show.
#
# - ## What are the mean, median, and mode of the kids' ages when they first appeared on the show?
#
# - ## What are the variance, standard deviation, and standard error?"
# ## Measures of Central Tendency - Mean, Median, and Mode
brady_bunch = pd.DataFrame()
brady_bunch['name'] = ['Greg', 'Marcia', 'Peter', 'Jan', 'Bobby', 'Cindy', 'Oliver']
brady_bunch['age'] = [14, 12, 11, 10, 8, 6, 8]
# ### Mean
def df_mean(df, column):
mean = df[column].mean()
return mean
df_mean(brady_bunch, 'age')
# ### Median
def df_median(df, column):
median = df[column].median()
return median
df_median(brady_bunch, 'age')
# ### Mode(s)
def df_modes(df, column):
(values, counts) = np.unique(df[column], return_counts=True)
indices = [x[0] for x in list(enumerate(counts)) if x[1] == counts[np.argmax(counts)]]
modes = [values[x] for x in indices]
if len(modes) == len(set(df[column])):
modes = None
return modes
df_modes(brady_bunch, 'age')
# ## Measures of Variance - Variance, Standard Deviation, and Standard Error
# ### Variance
def df_var(df, column):
var = df[column].var(ddof=False)
return var
df_var(brady_bunch, 'age')
# ### Standard Deviation
def df_std(df, column):
std = df[column].std(ddof=False)
return std
brady_bunch_age_std = df_std(brady_bunch, 'age')
brady_bunch_age_std
# ### Standard Error
def df_sterr(df, column):
sterr = df[column].std(ddof=False) / np.sqrt(len(df[column])-1)
return sterr
df_sterr(brady_bunch, 'age')
# ## DataFrame.describe()
brady_bunch.describe()
# -------------------------------------------------------------------------------------------------------------------------
# ## 2. Using these estimates, if you had to choose only one estimate of central tendency and one estimate of variance to describe the data, which would you pick and why?
# ### Descibing the data with:
# - ### Measures of Central Tendency - _mean_
# - ### Measures of Variance - _standard deviation_
#
# ### I would use the _mean_ and the _standard deviation_ to describe the data, because together, they provide a quick overview of how the datapoints are distributed.
#
# ### For this sample, the average (mean) age was almost 10 years old, and all ages in the dataset fall within two (2) standard deviations from the mean.
# -------------------------------------------------------------------------------------------------------------------------
# ## 3. Next, Cindy has a birthday. Update your estimates- what changed, and what didn't?
brady_bunch.loc[brady_bunch['name'] == 'Cindy', ['age']] = 7
brady_bunch
brady_bunch.describe()
print('New mean: ', df_mean(brady_bunch, 'age'))
print('New median: ', df_median(brady_bunch, 'age'))
print('New mode: ', df_modes(brady_bunch, 'age'))
print('New variance: ', df_var(brady_bunch, 'age'))
print('New standard deviation: ', df_std(brady_bunch, 'age'))
print('New standard error: ', df_sterr(brady_bunch, 'age'))
# ### The mean, variance, standard deviation, and standard error changed when the dataframe was updated with Cindy's new age. The median and mode remained the same.
# -------------------------------------------------------------------------------------------------------------------------
# ## 4. Nobody likes <NAME>. Maybe the network should have used an even younger actor. Replace <NAME> with 1-year-old Jessica, then recalculate again. Does this change your choice of central tendency or variance estimation methods?
# DataFrame after updating Cindy's age
brady_bunch
# Replace Oliver with Jessica
brady_bunch[brady_bunch['name'] == 'Oliver'] = ('Jessica', 1)
# DataFrame afer replacing Oliver with Jessica
brady_bunch
print('New mean: ', df_mean(brady_bunch, 'age'))
print('New median: ', df_median(brady_bunch, 'age'))
print('New mode: ', df_modes(brady_bunch, 'age'))
print('New variance: ', df_var(brady_bunch, 'age'))
print('New standard deviation: ', df_std(brady_bunch, 'age'))
print('New standard error: ', df_sterr(brady_bunch, 'age'))
# ### By changing one of the show's characters (and including their age in calculations), the mean was significantly affected and is no longer as close to the median. I would use the _median_ and _standard deviation_ to describe the data.
# ----------------------------------------------------------------------------------------------------------------------
# ## 5. On the 50th anniversary of The Brady Bunch, four different magazines asked their readers whether they were fans of the show. The answers were: TV Guide 20% fans Entertainment Weekly 23% fans Pop Culture Today 17% fans SciPhi Phanatic 5% fans
#
# ## Based on these numbers, what percentage of adult Americans would you estimate were Brady Bunch fans on the 50th anniversary of the show?
#
# The statistic for "SciPhi Phanatic" does not seem to be representative
# of the population at large as that magazine's sampling would seem to favor fans
# of only one specific genre of entertainment. Including this data in statistical
# calculations for the popularity of the TV show would likely insert some degree of bias.
bb_fans = pd.DataFrame()
bb_fans['magazine'] = ['TV Guide', 'Entertainment Weekly', 'Pop Culture Today']
bb_fans['percentage'] = [20, 23, 17]
bb_fans.mean()
# ### I would estimate that 20% of adult Americans were fans of The Brady Bunch TV show.
| thinkful/data_science/my_progress/intro_data_science_fundamentals/unit_3_drills/Unit_3_-_Lesson_1_-_Drill_-_Describing_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zzZbP0LM6m5z"
# # Extractive QA with Elasticsearch
#
# txtai is datastore agnostic, the library analyzes sets of text. The following example shows how extractive question-answering can be added on top of an Elasticsearch system.
# + [markdown] id="xk7t5Jcd6reO"
# # Install dependencies
#
# Install `txtai` and `Elasticsearch`.
# + id="0y1UA4-q-YdA"
# %%capture
# Install txtai and elasticsearch python client
# !pip install git+https://github.com/neuml/txtai elasticsearch
# Download and extract elasticsearch
# !wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.1-linux-x86_64.tar.gz
# !tar -xzf elasticsearch-7.10.1-linux-x86_64.tar.gz
# !chown -R daemon:daemon elasticsearch-7.10.1
# + [markdown] id="nKWz-C5gCJy8"
# Start an instance of Elasticsearch directly within this notebook.
# + id="3ZfJeWbM6wmj"
import os
from subprocess import Popen, PIPE, STDOUT
# If issues are encountered with this section, ES can be manually started as follows:
# ./elasticsearch-7.10.1/bin/elasticsearch
# Start and wait for server
server = Popen(['elasticsearch-7.10.1/bin/elasticsearch'], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1))
# !sleep 30
# + [markdown] id="TWEn4w68-D1y"
# # Download data
#
# This example is going to work off a subset of the [CORD-19](https://www.semanticscholar.org/cord19) dataset. COVID-19 Open Research Dataset (CORD-19) is a free resource of scholarly articles, aggregated by a coalition of leading research groups, covering COVID-19 and the coronavirus family of viruses.
#
# The following download is a SQLite database generated from a [Kaggle notebook](https://www.kaggle.com/davidmezzetti/cord-19-slim/output). More information on this data format, can be found in the [CORD-19 Analysis](https://www.kaggle.com/davidmezzetti/cord-19-analysis-with-sentence-embeddings) notebook.
# + id="8tVrIqSq-KBa"
# %%capture
# !wget https://github.com/neuml/txtai/releases/download/v1.1.0/tests.gz
# !gunzip tests.gz
# !mv tests articles.sqlite
# + [markdown] id="hSWFzkCn61tM"
# # Load data into Elasticsearch
#
# The following block copies rows from SQLite to Elasticsearch.
# + id="So-OBvUT61QD" colab={"base_uri": "https://localhost:8080/"} outputId="9647b8f8-8471-41bf-ccfa-a75306665638"
import sqlite3
import regex as re
from elasticsearch import Elasticsearch, helpers
# Connect to ES instance
es = Elasticsearch(hosts=["http://localhost:9200"], timeout=60, retry_on_timeout=True)
# Connection to database file
db = sqlite3.connect("articles.sqlite")
cur = db.cursor()
# Elasticsearch bulk buffer
buffer = []
rows = 0
# Select tagged sentences without a NLP label. NLP labels are set for non-informative sentences.
cur.execute("SELECT s.Id, Article, Title, Published, Reference, Name, Text FROM sections s JOIN articles a on s.article=a.id WHERE (s.labels is null or s.labels NOT IN ('FRAGMENT', 'QUESTION')) AND s.tags is not null")
for row in cur:
# Build dict of name-value pairs for fields
article = dict(zip(("id", "article", "title", "published", "reference", "name", "text"), row))
name = article["name"]
# Only process certain document sections
if not name or not re.search(r"background|(?<!.*?results.*?)discussion|introduction|reference", name.lower()):
# Bulk action fields
article["_id"] = article["id"]
article["_index"] = "articles"
# Buffer article
buffer.append(article)
# Increment number of articles processed
rows += 1
# Bulk load every 1000 records
if rows % 1000 == 0:
helpers.bulk(es, buffer)
buffer = []
print("Inserted {} articles".format(rows), end="\r")
if buffer:
helpers.bulk(es, buffer)
print("Total articles inserted: {}".format(rows))
# + [markdown] id="X5RO-VNwzMAo"
# # Query data
#
# The following runs a query against Elasticsearch for the terms "risk factors". It finds the top 5 matches and returns the corresponding documents associated with each match.
#
#
# + id="ucd9mwSfFTMm" colab={"base_uri": "https://localhost:8080/", "height": 348} outputId="b21d6aff-6abe-48f5-9914-7b7fb8472adb"
import pandas as pd
from IPython.display import display, HTML
pd.set_option("display.max_colwidth", None)
query = {
"_source": ["article", "title", "published", "reference", "text"],
"size": 5,
"query": {
"query_string": {"query": "risk factors"}
}
}
results = []
for result in es.search(index="articles", body=query)["hits"]["hits"]:
source = result["_source"]
results.append((source["title"], source["published"], source["reference"], source["text"]))
df = pd.DataFrame(results, columns=["Title", "Published", "Reference", "Match"])
display(HTML(df.to_html(index=False)))
# + [markdown] id="ylxOKji1-9_K"
# # Derive columns with Extractive QA
#
# The next section uses Extractive QA to derive additional columns. For each article, the full text is retrieved and a series of questions are asked of the document. The answers are added as a derived column per article.
# + id="mwBTrCkcOM_H"
# %%capture
from txtai.embeddings import Embeddings
from txtai.pipeline import Extractor
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create extractor instance using qa model designed for the CORD-19 dataset
extractor = Extractor(embeddings, "NeuML/bert-small-cord19qa")
# + id="Yv75Lh-cOpL9" colab={"base_uri": "https://localhost:8080/", "height": 400} outputId="adee88e1-02bf-4a20-febb-6d2c170a63f9"
document = {
"_source": ["id", "name", "text"],
"size": 1000,
"query": {
"term": {"article": None}
},
"sort" : ["id"]
}
def sections(article):
rows = []
search = document.copy()
search["query"]["term"]["article"] = article
for result in es.search(index="articles", body=search)["hits"]["hits"]:
source = result["_source"]
name, text = source["name"], source["text"]
if not name or not re.search(r"background|(?<!.*?results.*?)discussion|introduction|reference", name.lower()):
rows.append(text)
return rows
results = []
for result in es.search(index="articles", body=query)["hits"]["hits"]:
source = result["_source"]
# Use QA extractor to derive additional columns
answers = extractor([("Risk factors", "risk factor", "What are names of risk factors?", False),
("Locations", "city country state", "What are names of locations?", False)], sections(source["article"]))
results.append((source["title"], source["published"], source["reference"], source["text"]) + tuple([answer[1] for answer in answers]))
df = pd.DataFrame(results, columns=["Title", "Published", "Reference", "Match", "Risk Factors", "Locations"])
display(HTML(df.to_html(index=False)))
| examples/06_Extractive_QA_with_Elasticsearch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from tensorflow import keras
import os
import re
# +
# Set the output directory for saving model file
# Optionally, set a GCP bucket location
OUTPUT_DIR = '../models'
DO_DELETE = False
USE_BUCKET = False
BUCKET = 'BUCKET_NAME'
if USE_BUCKET:
OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR)
from google.colab import auth
auth.authenticate_user()
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# +
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# -
train = load_dataset(os.path.join("../data/", "aclImdb", "train"))
test = load_dataset(os.path.join("../data/", "aclImdb", "test"))
train = train.sample(5000)
test = test.sample(5000)
DATA_COLUMN = 'sentence'
LABEL_COLUMN = 'polarity'
label_list = [0, 1]
# +
# Use the InputExample class from BERT's run_classifier code to create examples from the data
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
# +
# This is a path to an uncased (all lowercase) version of BERT
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
# -
tokenizer.tokenize("This here's an example of using the BERT tokenizer")
# +
# We'll set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 128
# Convert our train and test features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples,
label_list,
MAX_SEQ_LENGTH,
tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples,
label_list,
MAX_SEQ_LENGTH,
tokenizer)
# -
| notebooks/Sentiment Analysis Example using BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import rescomp as rc
import scipy as sp
import numpy as np
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = [10, 5]
# # Evolution of Reservoir Fixed Points with Time
#
# This notebook will investigate the following ideas:
# * Plot the movement of the fixed points along with the orbit of the reservoir nodes
# - Use a training signal that is easy to understand
# - Show the effect of different kinds of readin matrixes on the movement of fixed points
# - Measure the effectiveness of the different readins on linear independence of the signals
# - Show a link between linear independence and learning ability
# - We want to know the derivative of the fixed point.
# +
LORENZPRMS = {
"res_sz" : 3,
"activ_f" : lambda x: 1/(1 + np.exp(-1*x)),
"gamma" : 19.1,
"mean_degree" : 2.0,
"ridge_alpha" : 6e-7,
"sigma" : 0.063,
"spect_rad" : 8.472
}
rcomp = rc.ResComp(**LORENZPRMS, sparse_res=False)
# -
t, U = rc.orbit("lorenz", duration=5, trim=True)
u = CubicSpline(t, U)
rstar = []
r0 = np.ones(LORENZPRMS["res_sz"])
r0 = np.random.rand(LORENZPRMS["res_sz"])
for ti in t:
F = lambda r: rcomp.res_f(ti, r, u)
fp = sp.optimize.fsolve(F, r0)
rstar.append(fp)
r0 = fp
rstar = np.vstack(tuple(rstar))
Udrive = rcomp.internal_state_response(t, U, rstar[0])
# ## Sigmoid (3 Nodes)
N = 0
plt.plot(t[N:], rstar[N:,:], 'k', alpha=0.6)
plt.plot(t[N:], Udrive[N:,:], 'cyan', alpha=0.8)
plt.title("3 Nodes. Fixed points in black.")
plt.show()
# +
LORENZPRMS2 = {
"res_sz" : 10,
"activ_f" : lambda x: 1/(1 + np.exp(-1*x)),
"gamma" : 19.1,
"mean_degree" : 2.0,
"ridge_alpha" : 6e-7,
"sigma" : 0.063,
"spect_rad" : 8.472
}
rcomp = rc.ResComp(**LORENZPRMS2, sparse_res=False)
# -
t, U = rc.orbit("lorenz", duration=20, trim=True)
u = CubicSpline(t, U)
rstar = []
r0 = np.ones(LORENZPRMS2["res_sz"])
for ti in t:
F = lambda r: rcomp.res_f(ti, r, u)
fp = sp.optimize.fsolve(F, r0)
rstar.append(fp)
r0 = fp
rstar = np.vstack(tuple(rstar))
Udrive = rcomp.internal_state_response(t, U, rstar[0])
# ## Sigmoid (10 Nodes)
# +
plt.plot(t, rstar, 'k', alpha=0.6)
plt.plot(t, Udrive, 'cyan', alpha=0.8)
plt.title("10 Nodes. Fixed points in black.")
plt.show()
# -
rcomp.train(t, U)
pre = rcomp.predict(t, U[0])
plt.plot(t, U, c='gray')
plt.plot(t, pre, c="blue", alpha=0.7)
plt.title("Prediction with Sigmoid")
plt.show()
print("Error: ", rc.system_fit_error(t, pre, "lorenz", order=2))
#
# +
LORENZPRMS3 = {
"res_sz" : 10,
"activ_f" : np.sin,
"gamma" : 19.1,
"mean_degree" : 2.0,
"ridge_alpha" : 6e-7,
"sigma" : 0.063,
"spect_rad" : 1.5
}
rcomp = rc.ResComp(**LORENZPRMS3, sparse_res=False)
# -
t, U = rc.orbit("lorenz", duration=5, trim=True)
u = CubicSpline(t, U)
rstar = []
r0 = np.zeros(LORENZPRMS3["res_sz"])
for ti in t:
F = lambda r: rcomp.res_f(ti, r, u)
fp = sp.optimize.fsolve(F, r0)
rstar.append(fp)
r0 = fp
rstar = np.vstack(tuple(rstar))
Udrive = rcomp.internal_state_response(t, U, rstar[0])
# ## Sin(x) (10 Nodes)
# +
plt.plot(t, rstar, 'k', alpha=0.6)
plt.plot(t, Udrive, 'cyan', alpha=0.8)
plt.title("10 Nodes. Fixed points in black. Sin(x) activation")
plt.show()
# -
# ## Sin(x) prediction
rcomp.train(t, U)
pre = rcomp.predict(t, U[0])
plt.plot(t, U, c='gray')
plt.plot(t, pre, c="blue", alpha=0.7)
plt.title("Prediction with sin(x)")
plt.show()
print(rc.system_fit_error(t, pre, "lorenz", order=2))
rc.plot3d(pre)
# ## Sigmoid (30 Nodes)
# +
LORENZPRMS4 = {
"res_sz" : 50,
"activ_f" : lambda x: 1/(1 + np.exp(-1*x)),
"gamma" : 19.1,
"mean_degree" : 2.0,
"ridge_alpha" : 6e-7,
"sigma" : 0.063,
"spect_rad" : 8.472
}
rcomp = rc.ResComp(**LORENZPRMS4, sparse_res=False)
# -
t, U = rc.orbit("lorenz", duration=20, trim=True)
u = CubicSpline(t, U)
rstar = []
r0 = np.ones(LORENZPRMS4["res_sz"])
for ti in t:
F = lambda r: rcomp.res_f(ti, r, u)
fp = sp.optimize.fsolve(F, r0)
rstar.append(fp)
r0 = fp
rstar = np.vstack(tuple(rstar))
Udrive = rcomp.internal_state_response(t, U, rstar[0])
# ## Sigmoid (20 Nodes)
# +
plt.plot(t, rstar, 'k', alpha=0.6)
plt.plot(t, Udrive, 'cyan', alpha=0.8)
plt.title("20 Nodes. Fixed points in black.")
plt.show()
# -
#
# ## Sigmoid prediction
rcomp.train(t, U)
pre = rcomp.predict(t, U[0])
plt.plot(t, U, c='gray')
plt.plot(t, pre, c="blue", alpha=0.7)
plt.title("Prediction with Sigmoid")
plt.show()
print("Error: ", rc.system_fit_error(t, pre, "lorenz", order=2))
rc.plot3d(pre)
# ## Hypothesis:
# Linear independence does not make for better learning
| Notebooks/FixedPointOrbits.ipynb |