code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python(scRFE1)
# language: python
# name: scrfe1
# ---
import pandas as pd
from anndata import read_h5ad
adata = read_h5ad('/Users/madelinepark/Downloads/Liver_droplet.h5ad')
from scRFE.scRFE import scRFE
# +
# set(adata.obs['cell_ontology_class'])
# +
# scRFE1 = scRFE(adata, classOfInterest = 'age', nEstimators = 10, Cv=3)
# +
# scRFEage = scRFE(adata, classOfInterest = 'age', nEstimators = 10, Cv=3)
# +
# from scRFE.scRFE import scRFE
# -
scRFEpypi = scRFE(adata, classOfInterest = 'cell_ontology_class', nEstimators = 10, Cv=3)
scRFEpypi[0]
|
scripts/practiceScripts/scRFE-Duplicate-Jul24.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('pytorch')
# language: python
# name: python3
# ---
import torch
# 왜 .py랑 jupyternotebook에서 다르게 작동하지?
from utils.imagenet_dataset import TinyImagenet200
# +
data_root = './data/tiny-imagenet-200'
dataset = TinyImagenet200(
)
|
tiny-imagenet200-train-test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="P9bJCDjdlgG6" colab_type="text"
# # **Tame Your Python**
#
# Let's see how we can classify emails based on their contents
#
# `Leggo`
#
# + id="aQwc0re5mFld" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
# + id="_5S81I3mXJD0" colab_type="code" colab={}
def plot_dataset(X):
plt.scatter(X[:,0], X[:,1])
plt.show()
# + id="-F0QPdiu7-fD" colab_type="code" colab={}
def visual(c, X):
from sklearn.cluster import KMeans
cluster_object = KMeans(n_clusters = c, init = 'k-means++')
y_pred = cluster_object.fit_predict(X)
colors = ['red', 'green', 'blue', 'cyan', 'black', 'yellow', 'magenta', 'brown', 'orange', 'silver', 'goldenrod', 'olive', 'dodgerblue', 'turqoise']
clusters = np.unique(y_pred)
print(clusters)
for i in np.unique(y_pred):
plt.scatter(X[y_pred == i, 0], X[y_pred == i, 1], s = 10, c = colors[i], label = 'Cluster' + str(i))
plt.title('Clusters')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
# + id="NUD9-aXg7zrO" colab_type="code" colab={}
def visual_elbow(X):
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 20):
kmeans = KMeans(n_clusters = i, init = 'k-means++')
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.scatter(range(1, 20), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
plt.clf()
# + [markdown] id="keG44qpdQiVA" colab_type="text"
# ## Get the dataset
# + id="18dgY56EWjRm" colab_type="code" colab={}
n = 1000
from sklearn.datasets import make_moons, make_blobs, make_circles, make_s_curve
X_moons, y_moons = make_moons(n_samples = n, noise=0.1)
X_blobs, y_blobs = make_blobs(n_samples = n, n_features = 2)
X_circles, y_circles = make_circles(n_samples=n, noise=0.1, factor = 0.5)
X_scurve, y_scurve = make_s_curve(n_samples=n, noise = 0.1)
X_random = np.random.random([n, 2])
transformation = [[0.80834549, -0.83667341], [-0.20887718, 0.85253229]]
X_aniso = np.dot(X_blobs, transformation)
# + id="DlMRwTHmWwr8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="e61e0476-c3d5-46b4-d41c-2b07403d0ef5"
plot_dataset(X_moons)
# + id="HPjA8Jq6eDB7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="aedc0260-6014-47fa-fba0-ae725fc75ebb"
visual_elbow(X_moons)
# + id="XDEHoQEveEPT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="a7fad2e7-0af2-4619-c78e-53a61223909f"
visual(10, X_moons)
# + id="5jjPOG4sXkpY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="95fcadf5-4bf3-4393-dd39-056a9ed4fb5c"
plot_dataset(X_blobs)
# + id="tEIkWOnEewjC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="481de4ff-fe90-41b7-e353-2fb97b2d7c14"
visual_elbow(X_blobs)
visual(3, X_blobs)
# + id="z4GaxV_PYNZG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="069fce15-a633-4920-f60a-9cb594ea86b2"
plot_dataset(X_circles)
# + id="u_ra8sjehoYE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="7a706d64-4279-4762-8afd-03c607f0eaff"
visual_elbow(X_circles)
visual(3, X_circles)
# + id="l0scEMuiZBVM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="6f681fc2-96ab-47fd-b35b-12256130ea48"
plot_dataset(X_scurve[:,0:2])
# + id="S-1QhMkGhuD5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="bcb59a93-0970-407a-c950-86026ef9831a"
visual_elbow(X_scurve)
visual(2, X_scurve)
# + id="g0tHqns2ZvFs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="7ca2bad3-3709-4655-e9fb-efdd37e4453d"
plot_dataset(X_random)
# + id="D2_KjNEnh5Lh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="42dadf63-b70f-41a9-9480-5b2db5c8bfa8"
visual_elbow(X_random)
visual(3, X_random)
# + id="SWrSoma0bNIj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="6fc30ce8-fa8c-4fc9-a6e2-356baf4df98a"
plot_dataset(X_aniso)
# + id="dmLNCuomh7l-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="3bfcf56e-8e5b-498b-8bb2-88bf2b5f5712"
visual_elbow(X_aniso)
visual(3, X_aniso)
# + id="h-B0EAiBh9Z7" colab_type="code" colab={}
|
MachineLearning_DataScience/Demo81_Clustering_KMeans_VisualAid.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis and Visualisation
# Three popular modules used for data analysis in Python.
#
# [Pandas](https://pandas.pydata.org/)
#
# Pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
#
# [NumPy](https://numpy.org/)
#
# NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object sophisticated (broadcasting) functions tools for integrating C/C++ and Fortran code useful linear algebra, Fourier transform, and random number capabilities
#
# [Matplotlib](https://matplotlib.org/)
#
# Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.
#
# We will focus on `pandas` which uses `numpy` and `matplotlib` internally.
import pandas as pd
help(pd)
# ## Data Structures
# A `Series` is a list of values. Like a row in a spreadsheet. However, a `Series` has an index (first row).
s = pd.Series([1, 3, 5, 7, 9], index=['a', 'b', 'c', 'd', 'e'])
s
# Lookup value by index.
s[['a']]
# Lookup multiple values by index.
s[['a', 'b']]
# A `DataFrame` is a 2-dimension data structure with rows and columns. It is like a spreadsheet. A `DataFrame` also has an index.
# The easiest way to create a `DataFrame` is to read in a `csv` or `spreadsheet`.
df = pd.read_csv('data.csv', index_col=0, dtype='int64')
# Look at the `DataFrame`
df
# Check the shape of a `DataFrame`
df.shape
# Information about the `DataFrame` such as memory usage
df.info()
# Lookup first 10 rows
df[0:10]
# Lookup last 10 rows
df[-10:]
# Lookup by index values where index is of type integer. If index were string use `df.loc` instead.
df.iloc[[999993,999996]]
# Lookup one column
df['A']
# Lookup multiple columns
df[['A', 'C']]
# Filter rows by `query` expression
df.query('A > 111895686 and A < 111899999')
# Cumulative sum first 1000 rows
df[0:1000].cumsum()
# Calculate percentage change first 1000 rows
df[0:1000].pct_change()
# Divide everything by 10
df = df / 10
# Multiply everything by 10
df = df * 10
# Add 10 to everything
df = df + 10
# Subtract 10 from everything
df = df - 10
# Descriptive statistics
df.describe()
# Plot one column
# %matplotlib inline
df['A'].plot()
# Plot two columns
df[['A', 'C']].plot()
# Plot percentage change of two columns for first 100000 rows
df[['A', 'C']][0:100000].plot()
# Plot distribution of one column
df['A'].hist(bins=1000)
|
18 - Data Analysis and Visualisation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Self-Supervised Monocular Depth Estimation in Autonomous Driving"
# > "In this post, I will review self-supervised monocular depth estimation methods."
#
# - toc: True
# - branch: master
# - badges: true
# - comments: true
# - categories: [deep learning, autonomous driving, jupyter]
# - image: images/some_folder/your_image.png
# - hide: false
# - search_exclude: true
# It is easy for humans to estimate depth in a scene, but what about machines? Typically, robots and self-driving cars use LiDAR sensors to gauge the depth of a scene. However, LiDAR is an expensive sensor that is beyond the reach of many personal vehicles. Robo-Taxis may be reasonable in business models that provide service across a city, but not for personal vehicles. As a result, some companies are using camera-only approaches to infer depth information from monocular images. I will discuss some of the state-of-the-art approaches for monocular depth estimation in this post.
#
# [Several approaches are usually used for depth estimation](https://arxiv.org/pdf/2003.06620.pdf):
#
# - Geometry-based methods: Geometric constraints are used to recover 3D structures from images. In 3D reconstruction and Simultaneous Localization and Mapping (SLAM) , structure from motion (SfM) is an effective method of estimating 3D structures from a series of 2D image sequences. The accuracy of depth estimation depends heavily on exact feature matching and high-quality image sequences. SfM suffers from monocular scale ambiguity as well. Stereo vision matching is also capable of recovering 3D structures of scenes from two viewpoints. It simulates the way human eyes work by using two cameras, and a cost function is used to calculate disparity maps of images. Due to the calibration of the transformation between the two cameras, the scale factor is incorporated into depth estimation during stereo vision matching.
#
# - Sensor-based methods: This approach uses sensors such as RGB-D and LiDAR. There are sevaral disadvantages for this method such as cost, power consumption, size of sensor.
#
# - Deep learning-based methods: The pixel-level depth map can be recovered from a single image in an end-to-end manner based on deep learning. It can be done in supervised, semi-supervised, or self-supervised manner.
#
# In this post, we just consider the self-supervised methods in which the geometric constraints between frames are regarded as the supervisory signal during the training process. There are several types of self-supervised learning methods for estimating depth using images, such as stereo-based and monocular videos. Using methods based on monocular videos presents its own challenges. Along with estimating depth, the model also requires estimating ego-motion between pairs of temporal images during training. The process involves training a pose estimation network, which takes a finite sequence of frames as input and outputs the corresponding camera transformations. Stereo data, however, make the camera-pose estimation a one-time offline calibration but may introduce occlusion and texture-copy artifacts.
#
# One of the interesting use cases for depth estimation is to use it as an auxiliary task for end-to-end policy learning. It can lead to better representation learning and help the policy to learn some information about the geometric and the depth of the scene. Other tasks, such as optical flow, semantic segmentation, object detection, motion prediction, etc, can also be used to improve representation learning. For example, the following image shows a model from Wayve.ai, a self-driving car company in the UK working on end-to-end autonomous driving, which tries to use multi-task learning to improve representation learning and driving policy learning.
# 
# *[source](https://wayve.ai/blog/driving-intelligence-with-end-to-end-deep-learning/)*
#
# There are also some works that try to learn multiple tasks jointly. Sometimes there is some information in other related tasks that help the networks to learn better. For example, the following work tries to learn optical flow, motion segmentation, camera motion estimation, and depth estimation together:
#
# 
# *[source](https://openaccess.thecvf.com/content_CVPR_2019/papers/Ranjan_Competitive_Collaboration_Joint_Unsupervised_Learning_of_Depth_Camera_Motion_Optical_CVPR_2019_paper.pdf)*
#
# Or the following work that tries to learn optical flow and depth together:
#
# 
# *[source](https://openaccess.thecvf.com/content_ECCV_2018/papers/Yuliang_Zou_DF-Net_Unsupervised_Joint_ECCV_2018_paper.pdf)*
#
# Here in this post, we just want to understand how depth estimation works, and then it would be more straightforward to mix it with other tasks. Maybe in the future, I write other blog posts on other techniques and tasks. In the rest of this post, I will review two related papers for self-supervised monocular depth estimation which use monocular videos and stereo images methods. Let’s get started!
# # Unsupervised Learning of Depth and Ego-Motion from Video
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# The human brain is very good at detecting ego-motion and 3D structures in scenes. Observing thousands of scenes and forming consistent models of what we see in the past has provided us with a deep, structural understanding of the world. We can apply this knowledge when perceiving a new scene, even from a single monocular image, since we have accumulated millions of observations about the world’s regularities — roads are flat, buildings are straight, cars are supported by roads, etc.
#
# The self-supervised methods based on the monocular image sequences and the geometric constraints are built on the projection between neighboring frames:
#
# 
# *[source]()*
#
# where $p_n$ stands for the pixel on image $I_n$, and $p_{n−1}$ refers to the corresponding pixel of $p_n$ on the image $I_{n−1}$. K is the camera intrinsics matrix, which is known. $D_n(p_n)$ denotes the depth value at pixel $p_n$, and $T_{n→n−1}$ represents the spatial transformation between $I_n$ and $I_{n−1}$. Hence, if $D_n(p_n)$ and $T_{n→n−1}$ are known, the correspondence between the pixels on different images ($I_n$ and $I_{n−1}$) are established by the projection function.
#
# This work tries to estimate the $D$ and $T$ in the above equation by training an end-to-end model in an unsupervised manner to observe sequences of images and to explain its observations by predicting the ego-motion (parameterized as 6-DoF transformation matrices) and the underlying scene structure (parameterized as per-pixel depth maps under a reference view).
#
# They propose two CNNs in this work that are trained jointly: a single-view depth estimation and a camera pose estimation from unlabeled video sequences.
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
# They use the view synthesis idea as the supervision signal for depth and pose prediction CNNs: given one input view of a scene, synthesize a new image of the scene seen from a different camera pose. This synthesis process can be implemented in a fully differentiable manner with CNNs as the geometry and pose estimation modules.
#
# Let’s consider $<I_1, …, I_N>$ as a training image sequence with one of the frames I_t as the target view and the rest as the source views $I_s(1 ≤ s ≤ N, s≠t)$. The view synthesis objective can be formulated as:
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# where p indexes over pixel coordinates and $Iˆ_s$ is the source view $I_s$ warped to the target coordinate frame based on a depth image-based rendering module (described in the following), taking the predicted depth $Dˆ_t$, the predicted 4×4 camera transformation matrix $Tˆ_{t→s}$ and the source view I_s as input.
#
# The differentiable depth image-based renderer reconstructs the target view $I_t$ by sampling pixels from a source view $I_s$ based on the predicted depth map $Dˆ_t$ and the relative pose $Tˆ_{t→s}$.
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# Let $p_t$ denote the homogeneous coordinates of a pixel in the target view, and $K$ denote the camera intrinsics matrix. The $p_t$’s projected coordinates onto the source view $p_s$ (which is a continuous value) can be obtained by:
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# Then a differentiable bilinear sampling mechanism is used to linearly interpolate the values of the 4-pixel neighbors (top-left, top-right, bottom-left, and bottom-right) of $p_s$ to approximate $I_s(p_s)$.
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# where $w^{ij}$ is linearly proportional to the spatial proximity between $p_s$ and $p^{ij}_s$, and $sum(w^{ij})=1$.
#
# The above view synthesis formulation implicitly assumes 1) the scene is static without moving objects; 2) there is no occlusion/disocclusion between the target view and the source views; 3) the surface is Lambertian so that the photo-consistency error is meaningful. To improve the robustness of the learning pipeline to these factors, an explainability prediction network (jointly and simultaneously with the depth and pose networks) is trained that outputs a per-pixel soft mask $Eˆ_s$ for each target-source pair, indicating the network’s belief in where direct view synthesis will be successfully modeled for each target pixel.
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
#
# Since there is no direct supervision for $Eˆ_s$, training with the above loss would result in predicting $Eˆ_s$ to be zero. To prevent this, a regularization term $L_{reg}(Eˆ_s)$ is considered that encourages nonzero predictions.
#
# The other issue is that the gradients are mainly derived from the pixel intensity difference between $I(p_t)$ and the four neighbors of $I(p_s)$, which would inhibit training if the correct $p_s$ (projected using the ground-truth depth and pose) is located in a low-texture region or far from the current estimation. To alleviate this problem, they use an explicit multi-scale and smoothness loss (the $L1$ norm of the second-order gradients for the predicted depth maps) that allows gradients to be derived from larger spatial regions directly.
#
# The final loss function they used for training is as follows:
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# And finally, the results are as follows:
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# 
# *[source](https://arxiv.org/pdf/1704.07813.pdf)*
#
# 
# *[source](https://github.com/tinghuiz/SfMLearner)*
#
# paper: https://arxiv.org/pdf/1704.07813.pdf
#
# code: https://github.com/tinghuiz/SfMLearner
#
# Presentation at NeurIPS 2017:
#
# > youtube: https://youtu.be/HWu39YkGKvI
# # Digging Into Self-Supervised Monocular Depth Estimation
#
# This paper uses both monocular videos and stereo pairs for depth estimation.
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# The problem is formulated as the minimization of a photometric reprojection error at training time. The relative pose for each source view $I_t'$, with respect to the target image $I_t$’s pose, is shown as $T_{t→t’}$. Then a dense depth map $D_t$ is predicted in which it minimizes the photometric reprojection error $L_p$:
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# Here $p_e$ is a photometric reconstruction error, e.g. the $L1$ distance in pixel space; $proj()$ is the resulting 2D coordinates of the projected depths $D_t$ in $I_t’$ and $<>$ is the sampling operator. Bilinear sampling is used to sample the source images, which is locally sub-differentiable, and $L1$ and SSIM are used to make the photometric error function $p_e$:
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# where α = 0.85. They also used edge-aware smoothness:
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# where $d*_t$ is the mean-normalized inverse depth to discourage shrinking of the estimated depth.
#
# In stereo training, the source image $I_t’$ is the second view in the stereo pair to $I_t$, which has a known relative pose. But for monocular sequences, this pose is not known and a neural network is trained jointly with the depth estimation network to minimize $L_p$ and to do pose estimation, $T_{t→t’}$.
#
# For monocular training, two frames temporally adjacent to $I_t$ are used as source frames, i.e. $I_t’ ∈ {I_{t−1}, I_{t+1}}$. In mixed training (MS), $I_t’$ includes the temporally adjacent frames and the opposite stereo view.
#
# The image below shows the overview of the proposed approach and in this paper.
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
# They also propose three architectural and loss innovations which, when combined, lead to large improvements in monocular depth estimation when training with monocular video, stereo pairs, or both:
#
# - A novel appearance matching loss to address the problem of occluded pixels that occur when using monocular supervision.
#
# - A novel and simple auto-masking approach to ignore pixels where no relative camera motion is observed in monocular training.
#
# - A multi-scale appearance matching loss that performs all image sampling at the input resolution, leading to a reduction in depth artifacts
#
# We just explained the main part of the method. To read more about the details of the above innovations, please read the paper. The final loss function is as follows:
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# And the results are as follows:
#
# 
# *[source](https://arxiv.org/pdf/1806.01260.pdf)*
#
# 
# *[source](https://github.com/nianticlabs/monodepth2)*
#
# paper: https://arxiv.org/pdf/1806.01260.pdf
#
# code: https://github.com/nianticlabs/monodepth2
#
# video:
#
# > youtube: https://youtu.be/sIN1Tp3wIbQ
#
|
_notebooks/2022-01-17-monocular-depth-estimation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Quantitative Performance Test (Computational Time) of SUMMA Simulation and Reproducibility of Figure 9 of Clark et al., (2015b) in Binder
#
# #### <NAME><sup>a</sup>, <NAME><sup>a</sup>, <NAME><sup>b</sup>, <NAME><sup>c</sup>, <NAME><sup>c</sup>, <NAME><sup>d</sup>, <NAME><sup>e</sup>, <NAME><sup>d</sup>, <NAME><sup>f</sup>
#
# <sup>a</sup>Department of Engineering Systems & Environment, University of Virginia, Charlottesville, VA, USA (<EMAIL>, <EMAIL>, <EMAIL>)
# <sup>b</sup>Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA
# <sup>c</sup>School of Computing, DePaul University, Chicago, IL, USA (<EMAIL>, <EMAIL>)
# <sup>d</sup>Department of Geography & Geographic Information Science, University of Illinois at Urbana-Champaign, IL, USA
# <sup>e</sup>Consortium of Universities for the Advancement of Hydrological Science, Inc, 150 Cambridge Park Drive, Cambridge, MA 02140, USA (<EMAIL>)
# <sup>f</sup>Department of Civil and Environmental Engineering, Utah Water Research Laboratory, Utah State University, Logan, Utah, USA (<EMAIL>)
# # 1. Introduction
# ## 1.1 Research Abstract
# There are a growing number of approaches for overcoming the reproducibility crisis facing computational science fields. The objective of this research is to compare eleven of these approaches and suggest best practices and guidance for which approach is most appropriate to achieve modeling objectives, specifically for simulating hydrologic and environmental systems. We first present the eleven approaches that each use a different combination of software tools for achieving reproducibility. We then measure quantitative performance (complexity, size of reproducible artifacts, and computational time) and qualitative performance using the SUMMA hydrologic model as a use case study for testing each approach. Based on the results, we recommend reproducible approaches best suited for achieving different modeling objectives. These recommendations aim to guide modelers in their efforts to create and share computational artifacts in a reproducible manner, depending on their research needs and purposes.
# ## 1.2 MyBinder
# MyBinder is an online service for building and sharing reproducible and interactive computational environments from online repositories. Under the hood, it is a federation of BinderHub deployments that are maintained by the Binder community. It serves as both a public service and a demonstration of the BinderHub technology, though it is by no means the only BinderHub in existence. If you’re interested in deploying your own BinderHub for your own uses, please see the BinderHub documentation and don’t hesitate to reach out to the Binder community.
# # 2. Application
# ## 2.1 Study Area
# We used a case study of Clark et al. (2015b) which describes a set of modeling experiments exploring various hydrologic modeling scenarios using SUMMA. The study area for these modeling experiments is the Reynolds Mountain East Area (A=32.7km2) in the Reynolds Creek Experimental Watershed in Idaho, USA
#
# Here is the link of Clark et al. (2015b)(SUMMA 2nd Paper): https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2015WR017200
# <img src="study_area.jpg" width="1000">
# Figure 1. Reynolds Mountain East Area in the Reynolds Creek Experimental Watershed
# ## 2.2 Example Application: A Hydrologic Model Software (SUMMA:The Structure for Unifying Multiple Modeling Alternative) and Python model API (pySUMMA)
# The Structure for Unifying Multiple Modeling Alternative (SUMMA) was developed to enable the controlled and systematic evaluation of multiple model representations of hydrologic processes and scaling behavior (Clark et al., 2015a). SUMMA has several beneficial capabilities that assist with a unifying framework. These include 1) the theoretical range of the SUMMA model, 2) the flexible hierarchical spatial structure, and 3) the application of different physical processes.
# <img src="summa.png" width="600">
# Figure 2. Conceptual diagram to illustrate the SUMMA framework to describe the application of multiple process parameterizations with conservation equations and a numerical solver
# ## 2.3 Setting of Simulation Scenarios and Performance Test
# ### 2.3.1 SUMMA Case Studies for Performance Test
# In this study, we simulate 2 cases studies for performance test in Local Computer and reproduce Figure 9 from the cases studies.
# ### (Scenario - 3) A Single Simulatioin
# - An applied Parameterization: 1d Richard
#
# - Simulation Periods: 2002-07-01~2008-9-30 (75 months)
# ### (Scenario - 4) Ensemble Simulations (3 simulations)
#
# - Applied Three Parameterization: 1d Richards, Lumped Topmodel, and Distributed Topmodel
#
# - Simulation Periods: 2002-07-01~2008-9-30 (75 months)
# ### 2.3.2 Quantitative Measurement: Computional Time
# **- Measurement: Wall time**
# - (Wall time): The actual time spent in running the process from start to finish
# # 3. Software and Data Availability
# * Software
# - SUMMA 3.0.3 (https://github.com/NCAR/summa/releases/tag/v3.0.3)
# - pySUMMA 3.0.3 (https://github.com/UW-Hydro/pysumma/releases/tag/v3.0.3)
#
# * Dataset in HydroShare
# - (Model input) https://www.hydroshare.org/resource/eefc8724d589425a84ba206f55928735/
# # 4. Download SUMMA Model Instance from HydroShare
# * First, you need to check and set the Jupyter kernel as <font color='red'>"Python 3"</font> for this notebook. Then for downloading the SUMMA model instance, you have to fill in <font color='red'>HydroShare ID and P/W </font>at the cell below. If you don't have HydroShare account, you can sign up at HydroShare (https://www.hydroshare.org/)
from pysumma import hydroshare_utils
import os
from pysumma import hydroshare_utils
import os
resource_id = 'eefc8724d589425a84ba206f55928735'
instance = hydroshare_utils.get_hs_resource(resource_id, os.getcwd())
# * Set user defined directory in filemanger file which control the location of every configuration files for SUMMA
# !cd {instance}/; chmod +x ./installTestCases_local.sh
# !cd {instance}/; ./installTestCases_local.sh
# # 5. A Single SUMMA Run
# ## 5.1 Configuration Setting
# ### 5.1.1 Review a File Manager file
import pysumma as ps
import os
instance = "SummaModel_Reynolds_Runoff"
executable = os.getcwd()+"/summa/bin/summa.exe"
# path to the SUMMA filemanager file
file_manager = os.path.join(os.getcwd(), instance, 'settings/summa_fileManager_1dRichards.txt')
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_1dRichards = ps.Simulation(executable, file_manager)
# Print filamager
print(S_1dRichards.manager)
# Print Decision files
print(S_1dRichards.decisions)
# ## 5.2 Check the Current Status of a Local Computer
# ### 5.2.1 Check CPU inforamtion
# !cat /proc/cpuinfo
# ### 5.2.2 Check the number of CPUs
# !nproc --all
# ## 5.3 Performance Test
# ### 5.3.1 Computional Time using Wall Time
# %%time
S_1dRichards.run('local', run_suffix='_single_time')
results_1dRichards_ncfile = S_1dRichards.get_output_files()
# ## 5.4 Visualization of a Single SUMMA output
# ### 5.4.1 Plot Runoff from SUMMA Output and Observation
# +
from pysumma.plotting.plotting import Plotting
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
def calc_total_runoff(runoff_output_df):
# average Instance Runoff variable is runoff
runoff = runoff_output_df['averageInstantRunoff']
dates = runoff.coords['time'].data
# create data value(Y-axis) attribute from ouput netcdf
data_values = runoff.data*86400000
# create two dimensional tabular data structure
total_runoff_df = pd.DataFrame(data_values, index=dates)
# round time to nearest hour (ex. 2006-10-01T00:59:59.99 -> 2006-10-01T01:00:00)
total_runoff_df.index = total_runoff_df.index.round("D")
# set the time period to display plot
total_runoff_df = total_runoff_df.loc["2002-10-01":"2008-09-30"]
# resample data by the average value hourly
total_runoff_by_daily = total_runoff_df.resample("D").mean()
return total_runoff_by_daily
# get daily runoff output using distributed Topmodel method (distributed Topmodel method appied 1 hru)
results_1dRichards = xr.open_dataset(results_1dRichards_ncfile[0])
daily_1dRichards = calc_total_runoff(results_1dRichards)
# Combine the different lateral flux parameterizations on simulations of runoff
Runoff_Combine = pd.concat([daily_1dRichards], axis=1)
# add label
Runoff_Combine.columns = ["Baseflow = 1D Richards'"]
# create pySUMMA Plotting Object
Val_Streamflow = Plotting(os.getcwd() +'/' + instance + '/data/validationData/ReynoldsCreek_valData.nc')
# read Runoff data(Q) from validation netcdf file
obs_streamflow = Val_Streamflow.ds['Q']
# create dates(X-axis) attribute from validation netcdf file
dates = obs_streamflow.coords['time'].data
# Change unit from cm/hr to mm/day
data_values = obs_streamflow.data*24*10
# create two dimensional tabular data structure
df = pd.DataFrame(data_values, index=dates)
# set the time period to display plot
df_filt = df.loc["2002-10-01":"2008-09-30"]
# select label
df_filt.columns = ['Observations']
# resample data by the average daily from hourly
obs_streamflow_daily = df_filt.resample("D").mean()
# set x index accoording to the change of time step
obs_date = obs_streamflow_daily.index
graph_runoff = pd.concat([Runoff_Combine, obs_streamflow_daily], 1)
fig = plt.figure(figsize=(20,5))
graph_runoff["Baseflow = 1D Richards'"].plot(color='r')
graph_runoff["Observations"].plot.area(color='grey', label="Observations")
plt.tick_params(labelsize = 15)
plt.xlabel("Time (day)", fontsize=18)
plt.ylabel("Runoff (mm/day)", fontsize=18)
plt.ylim(0,35)
plt.legend(fontsize=20, loc=2)
# -
# # 6. Ensemble SUMMA Runs
# ## 6.1 Configuration Setting
# ### 6.1.1 Set different lateral flow parameterizations
import pysumma as ps
executable = os.getcwd()+"/summa/bin/summa.exe"
config = {'++file_manager_1dRichards++': {'file_manager': '/home/jovyan/SummaModel_Reynolds_Runoff/settings/summa_fileManager_1dRichards.txt'},
'++file_manager_lumpedTopmodel++': {'file_manager': '/home/jovyan/SummaModel_Reynolds_Runoff/settings/summa_fileManager_lumpedTopmodel.txt'},
'++file_manager_distributedTopmodel++': {'file_manager': '/home/jovyan/SummaModel_Reynolds_Runoff/settings/summa_fileManager_distributedTopmodel.txt'}}
total_ens = ps.Ensemble(executable, config, num_workers=3)
# ## 6.2 Performance Test
# ### 6.2.1 Computional Time: Wall Time
# %%time
total_ens.run('local')
# ## 6.3 Visualization of Ensemble SUMMA outputs
# ### 6.3.1 Check the list of Ensemble SUMMA outputs
out_file_paths = [s.get_output_files() for s in total_ens.simulations.values()]
out_file_paths
# ## 4. Results
# ### Recreate the Figure 9 plot from Clark et al., 2015: The Basin-Wide Runoff for the model representation of the lateral flux of liquid water
from pysumma.plotting.plotting import Plotting
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
# ### 1) Create function to calculate daily runoff from SUMMA output for the period 1 oct 2002 to 1 oct 2008
def calc_total_runoff(runoff_output_df):
# average Instance Runoff variable is runoff
runoff = runoff_output_df['averageInstantRunoff']
dates = runoff.coords['time'].data
# create data value(Y-axis) attribute from ouput netcdf
data_values = runoff.data*86400000
# create two dimensional tabular data structure
total_runoff_df = pd.DataFrame(data_values, index=dates)
# round time to nearest hour (ex. 2006-10-01T00:59:59.99 -> 2006-10-01T01:00:00)
total_runoff_df.index = total_runoff_df.index.round("D")
# set the time period to display plot
total_runoff_df = total_runoff_df.loc["2002-10-01":"2008-09-30"]
# resample data by the average value hourly
total_runoff_by_daily = total_runoff_df.resample("D").mean()
return total_runoff_by_daily
# ### 2) Get daily runoff
# get daily runoff output using 1d Richards method(1d Richards method appied 1 hru)
results_1dRichards = xr.open_dataset(out_file_paths[0][0])
daily_1dRichards = calc_total_runoff(results_1dRichards)
# get daily runoff output using lumped Topmodel method (lumped Topmodel method appied 1 hru)
results_lumpedTopmodel = xr.open_dataset(out_file_paths[1][0])
daily_lumpedTopmodel = calc_total_runoff(results_lumpedTopmodel)
# get daily runoff output using distributed Topmodel method (distributed Topmodel method appied 1 hru)
results_distributedTopmodel = xr.open_dataset(out_file_paths[2][0])
daily_distributedTopmodel = calc_total_runoff(results_distributedTopmodel)
# ### 3) Combine the different lateral flux parameterizations on simulations of runoff into a single Pandas Dataframe
# Combine the different lateral flux parameterizations on simulations of runoff
Runoff_Combine = pd.concat([daily_1dRichards, daily_lumpedTopmodel, daily_distributedTopmodel], axis=1)
# add label
Runoff_Combine.columns = ["Baseflow = 1D Richards'", 'Baseflow = Topmodel(lumped)', 'Baseflow = Topmodel(distributed)']
Runoff_Combine.head()
# ### 4) Add obervation data from streamflow station and Reynolds Mountain East to the plot
# create pySUMMA Plotting Object
import os
instance = "SummaModel_Reynolds_Runoff"
Val_Streamflow = Plotting(os.getcwd() +'/' + instance + '/data/validationData/ReynoldsCreek_valData.nc')
# read Runoff data(Q) from validation netcdf file
obs_streamflow = Val_Streamflow.ds['Q']
# create dates(X-axis) attribute from validation netcdf file
dates = obs_streamflow.coords['time'].data
# Change unit from cm/hr to mm/day
data_values = obs_streamflow.data*24*10
# create two dimensional tabular data structure
df = pd.DataFrame(data_values, index=dates)
# set the time period to display plot
df_filt = df.loc["2002-10-01":"2008-09-30"]
# select label
df_filt.columns = ['Observations']
# resample data by the average daily from hourly
obs_streamflow_daily = df_filt.resample("D").mean()
# set x index accoording to the change of time step
obs_date = obs_streamflow_daily.index
# ### 5) Plotting output of the Parameterization of the Lateral Flux of Liquid Water and observation data
graph_runoff = pd.concat([Runoff_Combine, obs_streamflow_daily], 1)
graph_runoff.head()
# +
fig = plt.figure(figsize=(20,5))
graph_runoff["Baseflow = 1D Richards'"].plot(color='r')
graph_runoff["Observations"].plot.area(color='grey', label="Observations")
plt.tick_params(labelsize = 15)
plt.xlabel("Time (day)", fontsize=18)
plt.ylabel("Runoff (mm/day)", fontsize=18)
plt.ylim(0,35)
plt.legend(fontsize=20, loc=2)
# +
fig = plt.figure(figsize=(20,5))
graph_runoff["Baseflow = Topmodel(distributed)"].plot(color='r')
graph_runoff["Observations"].plot.area(color='grey', label="Observations")
plt.tick_params(labelsize = 15)
plt.xlabel("Time (day)", fontsize=18)
plt.ylabel("Runoff (mm/day)", fontsize=18)
plt.ylim(0,35)
plt.legend(fontsize=20, loc=2)
# +
fig = plt.figure(figsize=(20,5))
graph_runoff["Baseflow = Topmodel(lumped)"].plot(color='r')
graph_runoff["Observations"].plot.area(color='grey', label="Observations")
plt.tick_params(labelsize = 15)
plt.xlabel("Time (day)", fontsize=18)
plt.ylabel("Runoff (mm/day)", fontsize=18)
plt.ylim(0,35)
plt.legend(fontsize=20, loc=2)
# -
|
Quantitative Performance Test (Computational Time) of SUMMA Simulation and Reproducibility of Figure 9 of Clark et al., (2015b) in Binder .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pymongo
from pymongo import MongoClient
import pandas as pd
from datetime import datetime
import pprint
import time
# +
# datetime object containing current date and time
now = datetime.now()
current_USDprice = (input("USD Price Now is : "))
#Establish connection
connection = MongoClient('localhost' , 27017)
db = connection.EmployeeDB
data = db.Kafka_Mongo
inserted_row={
"Date": now,
"Daily_Rate": float(current_USDprice)
}
last_collection = data.insert_one(inserted_row)
currennies = data.find()
for doc in currennies:
print(msg["Date"],"Daily_Rate : ",msg["Daily_Rate"])
# -
Producer = KafkaProducer(bootstrap_servers='localhost:9092',value_serializer=lambda x: x.encode('utf-8'))
print("Started Streaming")
df = pd.DataFrame(list(data.find()))
df = df.drop("_id" , axis =1)
df=df.sort_values('Date', ascending=False)
df
# +
df=df.sort_values('Date', ascending=True)
records_list = df.to_dict(orient="records")
message_list = []
message = None
for message in records_list:
message_fields_value_list = []
message_fields_value_list.append(message["Date"])
message_fields_value_list.append(message["Daily_Rate"])
message = ','.join(str(v) for v in message_fields_value_list)
print("Message Type: ", type(message))
print("Message: ", message)
Producer.send("Kafka-To-Talend", message)
time.sleep(1)
print("Kafka Producer Application Completed. ")
# -
|
Mongo_Producer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: deeplearning
# language: python
# name: deeplearning
# ---
# # Mask Detection using MobilenetV2
# Here is a step by step guide to make a classifier that can classify the faces as wearing mask, not wearing a mask and not wearing a mask properly.
# The biggest challenge is here to convert the available data to the form we can use it. It is going to take most of our time.So let us explore the data. Our dataset contains two folders, one having images and other having annotations.Since each of these images contain multiple persons which are either wearing a mask or not weraing it or not wearing it properly, we need to get these faces out of the images.
# The annotations file of each file will help in that as it contains information about where the face it and under which category does it fall
# So here a brief summary of what we are going to do here
# 1. First we are going to go through all the annotations file and parse the information using beautifulsoap.Later we are going to make a list which contains a dictionary having information about coordinates of faces and the labels of each face.
# 2. Next we will use opencv to read the image , extract the face ares from it, preprocess it and save it to a list.We are going to save the labels simuntaneosly in another list
# 3. Now we will finetune the MobilenetV2 model and get the results.
#
# Also to know how to use this model to detect the masks in real time on a webcam feed, follow the article [Mask detection using openCV](https://www.mygreatlearning.com/blog/real-time-face-detection).
#
#
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
#import the libraries
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout,BatchNormalization
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import cv2
import random as rand
# +
# Defiine functions that performs task 1 mentioned above
#This function will get the coordinates of face given in the annotations file
# the coordinates of lower left corner and upper right corner
def generate_box(obj):
xmin = int(obj.find('xmin').text)
ymin = int(obj.find('ymin').text)
xmax = int(obj.find('xmax').text)
ymax = int(obj.find('ymax').text)
return [xmin, ymin, xmax, ymax]
#This function will give label assciated with each label and convert them to numbers
def generate_label(obj):
if obj.find('name').text == "with_mask":
return 1
elif obj.find('name').text == "mask_weared_incorrect":
return 2
return 0
#Using in this main function we parse the annotations file and get the objects out from them
# Also we use the above two functions here
def generate_target(image_id, file):
with open(file) as f:
data = f.read()
soup = BeautifulSoup(data, 'xml')
objects = soup.find_all('object')
num_objs = len(objects)
boxes = []
labels = []
for i in objects:
boxes.append(generate_box(i))
labels.append(generate_label(i))
boxes=np.array(boxes)
labels=np.array(labels)
img_id = np.array(image_id)
# Annotation is in dictionary format
target = {}
target["boxes"] = boxes
target["labels"] = labels
return (target,num_objs)
# -
imgs = list(sorted(os.listdir("/kaggle/input/face-mask-detection/images/")))
len(imgs)
labels = list(sorted(os.listdir("/kaggle/input/face-mask-detection/annotations/")))
# Here we use the above functions and save results in lists
targets=[]#store coordinates
numobjs=[]#stores number of faces in each image
#run the loop for number of images we have
for i in range(853):
file_image = 'maksssksksss'+ str(i) + '.png'
file_label = 'maksssksksss'+ str(i) + '.xml'
img_path = os.path.join("/kaggle/input/face-mask-detection/images/", file_image)
label_path = os.path.join("/kaggle/input/face-mask-detection/annotations/", file_label)
#Generate Label
target,numobj = generate_target(i, label_path)
targets.append(target)
numobjs.append(numobj)
# +
#In this step we carry forward the second step mentioned above.
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
face_images=[]
face_labels=[]
for i in range(853):
img_path = r"../input/face-mask-detection/images/maksssksksss{}.png".format(i)
#read image
img = cv2.imread(img_path)
for j in range(numobjs[i]):
# get coordinates of ith image in list
locs=(targets[i]['boxes'][j])
# Get the face from the image using the coordinates
#the arguments are as ymin , ymax and xmin xmax
img1=img[locs[1]:locs[3],locs[0]:locs[2]]
img1 = cv2.resize(img1, (224, 224))
img1 = img_to_array(img1)
img1 = preprocess_input(img1)
face_images.append(img1)
face_labels.append(targets[i]['labels'][j])
face_images= np.array(face_images, dtype="float32")
face_labels = np.array(face_labels)
# -
# Next we check total number of faces detected and how many belong to each class. We see that the images in which mask is woren properly are really less. We will increase their weight when training the model.
len(face_labels)
unique, counts = np.unique(face_labels, return_counts=True)
dict(zip(unique, counts))
#Encode the labels in one hot encode form
lb = LabelEncoder()
labels = lb.fit_transform(face_labels)
labels = to_categorical(labels)
labels
#Perform data augmentation.
aug = ImageDataGenerator(
zoom_range=0.1,
rotation_range=25,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest"
)
# +
#define the model
baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_shape=(224, 224, 3))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(256, activation="relu")(headModel)
headModel = Dropout(0.25)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# -
#define the hyperparamets for traing te neural network
INIT_LR = 1e-4
EPOCHS = 20
BS = 32
#divide data into training and testing sets
(trainX, testX, trainY, testY) = train_test_split(face_images, labels,
test_size=0.2, stratify=labels, random_state=42)
#Free some space.I did this tep as the notebook was running out of space while training
del targets,face_images,face_labels
# +
#Complie the model and train it
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the head of the network
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS,
class_weight = {0:5 , 1:1, 2:10}) #Here we sets weights to each class.
#Class having high weight will be considered more importat while training
# -
# The next step is o evaluate the model and plot the graphs showing the learning process.In the plot we can see when the the training stopped,the model was still improving, thus we save the model and train it again from the point we left.
# +
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=32)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs
))
# # serialize the model to disk
# print("[INFO] saving mask detector model...")
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.show()
# -
#save the model and name it as you wish.
model.save('kaggle1.h5')
#load the saved model
import keras
model = keras.models.load_model('kaggle1.h5')
# +
#train the saved model again
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS,
class_weight = {0:5 , 1:1, 2:10})
# +
#Evaluate the model again
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=32)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs
))
# # serialize the model to disk
# print("[INFO] saving mask detector model...")
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.show()
# -
# As you can see the performance of the model increased after training again for about 20 epochs(I ran the training cell twice by mistake so in the graph you cannot see that the model has started training from where I left)
# When testing this model on my webcam, I found it was not much reliable as if you cover your face with hand,it ll show you are wearing the mask. I have made another model but that can only detect if the person is wearing a mask or not.The model works exceptionally well abd better than this model as it cannot be fooled with a hand over mouth.Here is the link to the article [Real time mask detection using OpenCV](https://www.mygreatlearning.com/blog/real-time-face-detection)
# [Here is a link to the youtube video showing results of this model on webcam stream](https://youtu.be/HFNxRrirj2s)
#
|
WithandWithoutMaskDetection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="e9bef506" outputId="23e61fa9-803c-416c-c2ea-49fda74021d7" tags=["active-ipynb", "remove-input", "remove-output"]
# # This cell is mandatory in all Dymos documentation notebooks.
# missing_packages = []
# try:
# import openmdao.api as om
# except ImportError:
# if 'google.colab' in str(get_ipython()):
# !python -m pip install openmdao[notebooks]
# else:
# missing_packages.append('openmdao')
# try:
# import dymos as dm
# except ImportError:
# if 'google.colab' in str(get_ipython()):
# !python -m pip install dymos
# else:
# missing_packages.append('dymos')
# try:
# import pyoptsparse
# except ImportError:
# if 'google.colab' in str(get_ipython()):
# !pip install -q condacolab
# import condacolab
# condacolab.install_miniconda()
# !conda install -c conda-forge pyoptsparse
# else:
# missing_packages.append('pyoptsparse')
# if missing_packages:
# raise EnvironmentError('This notebook requires the following packages '
# 'please install them and restart this notebook\'s runtime: {",".join(missing_packages)}')
# + [markdown] id="b0a95015"
# (examples:the_mountain_car_problem)=
# # The Mountain Car Problem
#
# The mountain car problem proposes a vehicle stuck in a "well." It lacks the power to directly climb out of the well, but instead must accelerate repeatedly forwards and backwards until it has achieved the energy necessary to exit the well.
#
# The problem is a popular machine learning test case, though the methods in Dymos are capable of solving it.
# It first appeared in the PhD thesis of <NAME> in 1990. {cite}`moore1990efficient`.
# The implementation here is based on that given by Melnikov, Makmal, and Briegel {cite}`melnikov2014projective`.
# + [markdown] id="977155e8"
# ## State and control variables
#
# This system has two state variables, the position ($x$) and velocity ($v$) of the car.
#
# This system has a single control variable ($u$), the effort put into moving. This control is contrained to the range $[-1 \, 1]$.
#
# The dynamics of the system are governed by
#
# \begin{align}
# \dot{x} &= v \\
# \dot{v} &= 0.001 * u - 0.0025 * \cos(3 x)
# \end{align}
#
#
# + [markdown] id="6e236c20"
# ## Problem Definition
#
# We seek to minimize the time required to exit the well in the positive direction.
#
# \begin{align}
# \mathrm{Minimize} \, J &= t_f
# \end{align}
#
# Subject to the initial conditions
#
# \begin{align}
# x_0 &= -0.5 \\
# v_0 &= 0.0
# \end{align}
#
# the control constraints
#
# \begin{align}
# |u| \le 1
# \end{align}
#
# and the terminal constraints
#
# \begin{align}
# x_f &= 0.5 \\
# v_f &\ge 0.0
# \end{align}
# + [markdown] id="03ef4a09"
# ## Defining the ODE
#
# The following code implements the equations of motion for the mountain car problem.
#
# A few things to note:
#
# 1. By providing the tag `dymos.state_rate_source:{name}`, we're letting Dymos know what states need to be integrated, there's no need to specify a rate source when using this ODE in our Phase.
# 2. Pairing the above tag with `dymos.state_units:{units}` means we don't have to specify units when setting properties for the state in our run script.
# 3. We only use compute_partials to override the values of $\frac{\partial \dot{v}}{\partial x}$ because $\frac{\partial \dot{v}}{\partial u}$ and $\frac{\partial \dot{x}}{\partial v}$ are constant and their value is specified during `setup`.
# + colab={"base_uri": "https://localhost:8080/"} id="ec2b8a83" outputId="668bf949-c304-4e34-9fca-27697a497d4b"
import numpy as np
import openmdao.api as om
class MountainCarODE(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_input('x', shape=(nn,), units='m')
self.add_input('v', shape=(nn,), units='m/s')
self.add_input('u', shape=(nn,), units='unitless')
self.add_output('x_dot', shape=(nn,), units='m/s',
tags=['dymos.state_rate_source:x', 'dymos.state_units:m'])
self.add_output('v_dot', shape=(nn,), units='m/s**2',
tags=['dymos.state_rate_source:v', 'dymos.state_units:m/s'])
ar = np.arange(nn, dtype=int)
self.declare_partials(of='x_dot', wrt='v', rows=ar, cols=ar, val=1.0)
self.declare_partials(of='v_dot', wrt='u', rows=ar, cols=ar, val=0.001)
self.declare_partials(of='v_dot', wrt='x', rows=ar, cols=ar)
def compute(self, inputs, outputs):
x = inputs['x']
v = inputs['v']
u = inputs['u']
outputs['x_dot'] = v
outputs['v_dot'] = 0.001 * u - 0.0025 * np.cos(3*x)
def compute_partials(self, inputs, partials):
x = inputs['x']
partials['v_dot', 'x'] = 3 * 0.0025 * np.sin(3 * x)
# + [markdown] id="hVK50KxH6YJ4"
# ## Solving the minimum-time mountain car problem with Dymos
#
# The following script solves the minimum-time mountain car problem with Dymos.
# Note that this example requires the IPOPT optimizer via the `pyoptsparse` package.
# Scipy's SLSQP optimizer is generally not capable of solving this problem.
#
# To begin, import the packages we require:
# + id="ii-ApZna669K"
import dymos as dm
import matplotlib.pyplot as plt
from matplotlib import animation
# + [markdown] id="FBKAO6H368u5"
# Next, we set two constants.
# `U_MAX` is the maximum allowable magnitude of the acceleration.
# The references show this problem being solved with $-1 \le u \le 1$.
#
# Variable `NUM_SEG` is the number of equally spaced polynomial segments into which time is being divided.
# Within each of these segments, the time-history of each state and control is being treated as a polynomial (we're using the default order of 3).
# + id="NPwe1sZj7lLT"
# The maximum absolute value of the acceleration authority of the car
U_MAX = 1.0
# The number of segments into which the problem is discretized
NUM_SEG = 30
# + [markdown] id="OI5v_AkL7qYm"
# We then instantiate an OpenMDAO problem and set the optimizer and its options.
#
# For IPOPT, setting option `nlp_scaling_method` to `'gradient-based'` can substantially improve the convergence of the optimizer without the need for us to set all of the scaling manually.
#
# The call to `declare_coloring` tells the optimizer to attempt to find a sparsity pattern that minimizes the work required to compute the derivatives across the model.
# + id="3yXzgcIw8Tjq"
#
# Initialize the Problem and the optimization driver
#
p = om.Problem()
p.driver = om.pyOptSparseDriver(optimizer='IPOPT')
p.driver.opt_settings['print_level'] = 5
p.driver.opt_settings['max_iter'] = 500
p.driver.opt_settings['mu_strategy'] = 'adaptive'
p.driver.opt_settings['bound_mult_init_method'] = 'mu-based'
p.driver.opt_settings['mu_init'] = 0.01
p.driver.opt_settings['nlp_scaling_method'] = 'gradient-based' # for faster convergence
p.driver.declare_coloring()
# + [markdown] id="OcugnHOL8fIF"
# Next, we add a Dymos Trajectory group to the problem's model and add a phase to it.
#
# In this case we're using the Radau pseudospectral transcription to solve the problem.
# + id="sGYfpaiI8eH-"
#
# Create a trajectory and add a phase to it
#
traj = p.model.add_subsystem('traj', dm.Trajectory())
tx = transcription=dm.Radau(num_segments=NUM_SEG)
phase = traj.add_phase('phase0', dm.Phase(ode_class=MountainCarODE, transcription=tx))
# + [markdown] id="IjCxNJQV82u8"
# At this point, we set the options on the main variables used in a Dymos phase.
#
# In addition to `time`, we have two states (`x` and `v`) and a single control (`u`).
#
# There are no parameters and no polynomial controls.
# We could have tried to use a polynomial control here, but as we will see the solution contains large discontinuities in the control value, which make it ill-suited for a polynomial control. Polynomial controls are modeled as a single (typically low-order) polynomial across the entire phase.
#
# We're fixing the initial time and states to whatever values we provide before executing the problem. We will constrain the final values with nonlinear constraints in the next step.
#
# The scaler values (`ref`) are all set to 1 here. We're using IPOPT's `gradient-based` scaling option and will let it work the scaling out for us.
#
# Bounds on time duration are guesses, and the bounds on the states and controls come from the implementation in the references.
#
# Also, we don't need to specify targets for any of the variables here because their names _are_ the targets in the top-level of the model.
# The rate source and units for the states are obtained from the tags in the ODE component we previously defined.
# + id="IdjlON_895PU"
#
# Set the variables
#
phase.set_time_options(fix_initial=True, duration_bounds=(.05, 10000), duration_ref=1)
phase.add_state('x', fix_initial=True, fix_final=False, lower=-1.2, upper=0.5, ref=1, defect_ref=1)
phase.add_state('v', fix_initial=True, fix_final=False, lower=-0.07, upper=0.07, ref=1, defect_ref=1)
phase.add_control('u', lower=-U_MAX, upper=U_MAX, ref=1, continuity=True, rate_continuity=False)
# + [markdown] id="iCWqHlu_-NZv"
# Next we define the optimal control problem by specifying the objective, boundary constraints, and path constraints.
#
# **Why do we have a path constraint on the control `u` when we've already specified its bounds?**
#
# Excellent question!
# In the `Radau` transcription, the $n^{th}$ order control polynomial is governed by design variables provided at $n$ points in the segment that **do not contain the right-most endpoint**.
# Instead, this value is interpolated based on the values of the first $(n-1)$.
# Since this value is not a design variable, it is necessary to constrain its value separately.
# We could forgo specifying any bounds on `u` since it's completely covered by the path constraint, but specifying the bounds on the design variable values can sometimes help by telling the optimizer, "Don't even bother trying values outside of this range.".
#
# Note that sometimes the opposite is true, and giving the optimizer the freedom to explore a larger design space, only to eventually be "reined-in" by the path constraint can sometimes be valuable.
#
# The purpose of this interactive documentation is to let the user experiment.
# If you remove the path constraint, you might notice some outlying control values in the solution below.
# + colab={"base_uri": "https://localhost:8080/"} id="d498cb7b" outputId="18be63ac-a3cb-4632-eb7d-130eced98d70"
#
# Minimize time at the end of the phase
#
phase.add_objective('time', loc='final', ref=1000)
phase.add_boundary_constraint('x', loc='final', lower=0.5)
phase.add_boundary_constraint('v', loc='final', lower=0.0)
phase.add_path_constraint('u', lower=-U_MAX, upper=U_MAX)
#
# Setup the Problem
#
p.setup()
# + [markdown] id="E6q4fW_NAx6B"
# We then set the initial guesses for the variables in the problem and solve it.
#
# Since `fix_initial=True` is set for time and the states, those values are not design variables and will remain at the values given below throughout the solution process.
#
# We're using the phase `interp` method to provide initial guesses for the states and controls.
# In this case, by giving it two values, it is linearly interpolating from the first value to the second value, and then returning the interpolated value at the input nodes for the given variable.
#
# Finally, we use the `dymos.run_problem` method to execute the problem.
# This interface allows us to do some things that the standard OpenMDAO `problem.run_driver` interface does not.
# It will automatically record the final solution achieved by the optimizer in case named `'final'` in a file called `dymos_solution.db`.
# By specifying `simulate=True`, it will automatically follow the solution with an explicit integration using `scipy.solve_ivp`.
# The results of the simulation are stored in a case named `final` in the file `dymos_simulation.db`.
# This explicit simulation demonstrates how the system evolved with the given controls, and serves as a check that we're using a dense enough grid (enough segments and segments of sufficient order) to accurately represent the solution.
#
# If those two solution didn't agree reasonably well, we could rerun the problem with a more dense grid.
# Instead, we're asking Dymos to automatically change the grid if necessary by specifying `refine_method='ph'`.
# This will attempt to repeatedly solve the problem and change the number of segments and segment orders until the solution is in reasonable agreement.
# + colab={"base_uri": "https://localhost:8080/"} id="iRY53Rq0_0c6" outputId="1d987fbe-e703-4e89-cb8c-14af763b8d7f" tags=["hide-output"]
#
# Set the initial values
#
p['traj.phase0.t_initial'] = 0.0
p['traj.phase0.t_duration'] = 500.0
p.set_val('traj.phase0.states:x', phase.interp('x', ys=[-0.5, 0.5]))
p.set_val('traj.phase0.states:v', phase.interp('v', ys=[0, 1.0]))
p.set_val('traj.phase0.controls:u', np.sin(phase.interp('u', ys=[0, 1.0])))
#
# Solve for the optimal trajectory
#
dm.run_problem(p, run_driver=True, simulate=True, refine_method='ph', refine_iteration_limit=5)
# + [markdown] id="3f9a5d44"
# ## Plotting the solution
#
# The recommended practice is to obtain values from the recorded cases.
# While the problem object can also be queried for values, building plotting scripts that use the case recorder files as the data source means that the problem doesn't need to be solved just to change a plot.
# Here we load values of various variables from the solution and simulation for use in the animation to follow.
# + id="beea4c6a"
sol = om.CaseReader('dymos_solution.db').get_case('final')
sim = om.CaseReader('dymos_simulation.db').get_case('final')
t = sol.get_val('traj.phase0.timeseries.time')
x = sol.get_val('traj.phase0.timeseries.states:x')
v = sol.get_val('traj.phase0.timeseries.states:v')
u = sol.get_val('traj.phase0.timeseries.controls:u')
h = np.sin(3 * x) / 3
t_sim = sim.get_val('traj.phase0.timeseries.time')
x_sim = sim.get_val('traj.phase0.timeseries.states:x')
v_sim = sim.get_val('traj.phase0.timeseries.states:v')
u_sim = sim.get_val('traj.phase0.timeseries.controls:u')
h_sim = np.sin(3 * x_sim) / 3
# + tags=["remove-input", "remove-output"]
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(t[-1, 0], 102.479, tolerance=1.0E-3)
assert_near_equal(x[-1, 0], 0.5, tolerance=1.0E-3)
# + [markdown] id="7b0a9304"
# ## Animating the Solution
#
# The collapsed code cell below contains the code used to produce an animation of the mountain car solution using Matplotlib.
#
# The green area represents the hilly terrain the car is traversing. The black circle is the center of the car, and the orange arrow is the applied control.
#
# The applied control _generally_ has the same sign as the velocity and is 'bang-bang', that is, it wants to be at its maximum possible magnitude. Interestingly, the sign of the control flips shortly before the sign of the velocity changes.
# + colab={"base_uri": "https://localhost:8080/", "height": 530} id="da31be29" outputId="f351fca0-6415-4469-8a42-dc035fe2bbc4" tags=["hide-input"]
fig = plt.figure(constrained_layout=True, figsize=(12, 6))
gs = fig.add_gridspec(3, 2)
anim_ax = fig.add_subplot(gs[:, 0])
anim_ax.set_aspect('equal')
x_ax = fig.add_subplot(gs[0, 1:])
v_ax = fig.add_subplot(gs[1, 1:])
u_ax = fig.add_subplot(gs[2, 1:])
x_ax.set_ylabel('x')
v_ax.set_ylabel('v')
u_ax.set_ylabel('u')
u_ax.set_xlabel('t')
# set up the subplots as needed
anim_ax.set_xlim((-1.75, 0.75));
anim_ax.set_ylim((-1.25, 1.25));
anim_ax.set_xlabel('x');
anim_ax.set_ylabel('h');
x_sol_line, = x_ax.plot(t, x, 'o', ms=1, label='solution')
v_ax.plot(t, v, 'o', ms=1)
u_ax.plot(t, u, 'o', ms=1)
x_sim_line, = x_ax.plot([], [], '-', linewidth=3, label='simulation')
v_sim_line, = v_ax.plot([], [], '-', linewidth=3)
u_sim_line, = u_ax.plot([], [], '-', linewidth=3)
plt.figlegend(ncol=2, handles=[x_sol_line, x_sim_line], loc='upper center',
bbox_to_anchor=(0.78,0.98))
x_ax.grid(alpha=0.2)
txt_x = x_ax.text(0.8, 0.1, f'x = {x_sim[0, 0]:6.3f}', horizontalalignment='left',
verticalalignment='center', transform=x_ax.transAxes)
v_ax.grid(alpha=0.2)
txt_v = v_ax.text(0.8, 0.1, f'v = {v_sim[0, 0]:6.3f}', horizontalalignment='left',
verticalalignment='center', transform=v_ax.transAxes)
u_ax.grid(alpha=0.2)
txt_u = u_ax.text(0.8, 0.1, f'u = {u_sim[0, 0]:6.3f}', horizontalalignment='left',
verticalalignment='center', transform=u_ax.transAxes)
x_terrain = np.linspace(-1.75, 0.75, 100);
h_terrain = np.sin(3 * x_terrain) / 3;
terrain_line, = anim_ax.plot(x_terrain, h_terrain, '-', color='tab:gray', lw=2);
terrain = anim_ax.fill_between(x_terrain, h_terrain, -1.25*np.ones_like(x_terrain), color='tab:green');
car, = anim_ax.plot([], [], 'ko', ms=12);
u_vec = anim_ax.quiver(x_sim[0] + 0.005, h_sim[0] + 0.005, u_sim[0], [0], scale=10, angles='xy', color='tab:orange')
# See https://brushingupscience.com/2019/08/01/elaborate-matplotlib-animations/ for quiver animation
ANIM_DURATION = 5
PAUSE_DURATION = 2
ANIM_FPS = 20
num_points = t_sim.size
num_frames = ANIM_DURATION * ANIM_FPS
pause_frames = PAUSE_DURATION * ANIM_FPS
idx_from_frame_num = np.linspace(0, num_points-1, num_frames, dtype=int)
def drawframe(n):
if n >= idx_from_frame_num.size:
idx = num_points - 1
else:
idx = idx_from_frame_num[n]
x = x_sim[idx]
v = v_sim[idx]
u = u_sim[idx]
t = t_sim[idx]
h = np.sin(3 * x) / 3 + 0.025
car.set_data(x, h)
dh_dx = np.cos(3 * x)
u_vec.set_offsets(np.atleast_2d(np.asarray([x + 0.005, h + 0.005]).T))
u_vec.set_UVC(u * np.cos(dh_dx), u * np.sin(dh_dx))
x_sim_line.set_data(t_sim[:idx], x_sim[:idx])
v_sim_line.set_data(t_sim[:idx], v_sim[:idx])
u_sim_line.set_data(t_sim[:idx], u_sim[:idx])
txt_x.set_text(f'x = {x[0]:6.3f}')
txt_v.set_text(f'v = {v[0]:6.3f}')
txt_u.set_text(f'u = {u[0]:6.3f}')
return car, u_vec, x_sim_line, v_sim_line, u_sim_line
# # blit=True re-draws only the parts that have changed.
# # repeat_delay has no effect when using to_jshtml, so pad drawframe to show the final frame for PAUSE_FRAMES extra frames.
anim = animation.FuncAnimation(fig, drawframe, frames=num_frames + pause_frames, interval=1000/ANIM_FPS, blit=True);
plt.close() # Don't let jupyter display the un-animated plot
from IPython.display import HTML
js = anim.to_jshtml()
with open('mountain_car_anim.html', 'w') as f:
print(js, file=f)
HTML(filename='mountain_car_anim.html')
# -
# ## References
#
# ```{bibliography}
# :filter: docname in docnames
# ```
|
docs/dymos_book/examples/mountain_car/mountain_car.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala 2.11
// language: scala
// name: scala211
// ---
// Add the "bootstrap" facility to the notebook
interp.load.ivy("com.videoamp" %% "ammonium-util" % "2.0.1")
// Import the ammonite.ops package members. See http://ammonite.io/#Ammonite-Ops for more info
import ammonite.ops._
// Set this to the IP address of your Spark master
val masterIP = ""
// Download the Spark libraries from the master and prepare Spark for configuration
// Execute this once the master node is available
vamp.ammonium.bootstrap(masterIP)
// +
// The Scala kernel can run so-called Scala scripts. See http://ammonite.io/#ScalaScripts
// for more information. We have a repo of helpful scripts at
// https://github.com/VideoAmp/scala-scripts, such as DisplayableDataFrame.sc, which adds
// the display() and display(count: Int) methods to the DataFrame type.
// +
// Spark Configuration
// Set your Spark app's name
val appName = ""
// Set your app's core count. The default value configures Spark to allocate all available cores
// to your app
val appCores = Int.MaxValue
// Set the default level of parallelism for your Spark app. The recommended minimum is 512,
// and can be increased by factors of 2 to help address executor OOM errors in apps with
// large storage memory requirements
val parallelism = 512
// Set your desired executor heap size (in GB). The recommended range is 8 to 16. The JVM can
// struggle collecting garbage efficiently in Spark executors with large heap spaces
val executorHeapGB = 16
// The number of cluster cores to assign to each executor. To run one executor per worker, set
// this to the number of cores per worker node. To run two executors per worker, set it to half
// the number of cores per worker, etc. One executor per worker is recommended unless you need
// more than 16 GB of heap per worker. In this case, two executors with 16 GB of heap each is
// recommended rather than one executor with 32 GB
val coresPerExecutor = 32
// Set this to your worker's maximum allocated memory (in GB). 50 is recommended for c3.8xl
// workers and 220 is recommended for r3.8xl workers
val totalExecutorMemoryGB = 50
// This allocates the remainder of your worker's memory to off-heap memory. Do not change this
// unless you have good reason
val executorOffHeapBytes = (totalExecutorMemoryGB - executorHeapGB) * (1024 * 1024 * 1024).toLong
// These are recommended executor JVM flags
val executorFlags =
"-XX:+UseParallelGC" ::
"-XX:+HeapDumpOnOutOfMemoryError" ::
"-XX:HeapDumpPath=/scratch1/heapdumps" ::
"-XX:+PrintClassHistogram" ::
Nil
// These tune some advanced settings to recommended values
sparkConf
.set("spark.driver.maxResultSize", "2048")
.set("spark.kryoserializer.buffer.max", "1g")
.set("spark.rdd.compress", "true")
// You can set Hadoop configuration properties by prefixing a Hadoop configuration key with
// "spark.hadoop". For example, to set the default HDFS replication level to 2:
// sparkConf
// .set("spark.hadoop.dfs.replication", "2")
// Uncomment to set additional configuration here
// sparkConf
// .set("spark.foo", "bar")
// .set("spark.biz", "baz")
// These settings simply reflect the values set above. Do not modify this
sparkConf
.setAppName(appName)
.set("spark.cores.max", appCores.toString)
.set("spark.executor.cores", coresPerExecutor.toString)
.set("spark.default.parallelism", parallelism.toString)
.set("spark.sql.shuffle.partitions", parallelism.toString)
.set("spark.executor.memory", executorHeapGB + "g")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", executorOffHeapBytes.toString)
.set("spark.executor.extraJavaOptions", executorFlags.mkString(" "))
// +
// At this point you should have access to a `SparkSession` from the `spark` val, e.g.
// spark.table("mydumbdatabase.mydumbtable")
// +
// Helpful imports
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel.OFF_HEAP
import org.apache.spark.sql.{ Column, DataFrame, Dataset }
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import spark.implicits._
import spark.table
|
notebooks/flint_starter_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
df = pd.read_csv("C:/Users/User/Downloads/student_scores.csv")
df
df.describe()
# +
X=df.iloc[:, :-1].values
Y=df.iloc[:,1].values
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)
from sklearn.linear_model import LinearRegression
reg=LinearRegression()
reg.fit(X_train,Y_train)
print(reg.coef_)
print(reg.intercept_)
# -
y_pred=reg.predict(X_test)
ff=pd.DataFrame({'Actual':Y_test,'Predicted':y_pred})
ff
df.plot(x='Hours',y='Scores',style=".")
plt.show()
# +
#This step is done only to intrepet the problem in a mathematical manner.
guess =df.Hours*reg.coef_+reg.intercept_
df['Guess']=guess
df
# -
df.plot(x='Hours',y='Guess')
df.plot(x='Hours',y='Scores',style=".")
# +
x = df.Hours
fig,ax= plt.subplots()
y1=df.Guess
y=df.Scores
ax.plot(x,y1,'-',label='predicted')
ax.plot(x,y,'o',label='scores')
plt.show()
|
Linear Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="LvLRoEFSIwsx"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="VE1jorLNJ0vp"
dataset = pd.read_csv('DataSetKopi.csv')
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# + colab={"base_uri": "https://localhost:8080/"} id="IFcKD41vKQAD" outputId="2ae65e70-af6b-4b9d-f8b1-4f1a65a90464"
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="vyydUY7RKUUr" outputId="e55d9308-2e1c-4f32-88c2-714f9b6f1a91"
print(y)
# + id="38CizorGKeZT"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# + id="dEXhVNDhLLST"
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(x[:, 1:3])
x[:, 1:3] = imputer.transform(x[:, 1:3])
# + colab={"base_uri": "https://localhost:8080/"} id="3lNSFzF-L73E" outputId="31472275-1a5a-4686-fcfa-be8c08f852a9"
print(x)
# + id="KfUliSIPL_Vv"
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
x = np.array(ct.fit_transform(x))
# + id="ryfS15ScNQuc"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
# + colab={"base_uri": "https://localhost:8080/"} id="FlB5WZ0gNjWU" outputId="c033ea29-443b-4273-ce27-ddbf37d1ba93"
print(y)
# + id="03VDRKpZNmM8"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1)
# + colab={"base_uri": "https://localhost:8080/"} id="pDM6ax6XOAYV" outputId="605af0bc-ad53-4887-a300-0c3191a6e6dd"
print(x_train)
# + colab={"base_uri": "https://localhost:8080/"} id="Uvxbh0YBOESI" outputId="5485a4b9-3be1-4bac-933b-e9b10188e852"
print(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="7tcSqWPGOQgE" outputId="efa5827c-90b0-448c-8481-70237c2631fe"
print(y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="I5PDYtVzOWUo" outputId="50a36f5f-b283-4a7d-ca1e-410dff91e55f"
print(y_test)
# + id="2sycB5SqOZjt"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train[:, 3:] = sc.fit_transform(x_train[:, 3:])
x_test[:, 3:] = sc.transform(x_test[:, 3:])
# + colab={"base_uri": "https://localhost:8080/"} id="M6c40IyePAfR" outputId="8ffee074-2177-4b01-a870-7c371844b1fb"
print(x_train)
# + colab={"base_uri": "https://localhost:8080/"} id="jNjE3N1nPEP9" outputId="05779d1a-bea8-4969-b8b2-73e2a8dcbe74"
print(x_test)
|
Tugas(Kuis)Pertemuan3DataMining.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Visual perception for autonomous vehicles using active learning
#
# Training computer vision models for autonomous driving to achieve high-end performance requires large labeled datasets, which can be prohibitively expensive. This notebook shows an end to end pipeline that streamlines the process of labelling driving scene datasets for a task, using SageMaker Groundtruth autolabelling and active learning. The task that we focus is pedestrian detection in camera images. This is framed as an object detection task where the goal of the final trained model is to predict 2D bounding boxes around pedestrians in an image.
#
# The active learning pipeline ensures that, starting with just a handful of a labels we can train a model, predict bounding boxes, compare the models predictions to a real labels and only add more labels if we need to increase the performance of the model. Using Step Functions, we can create a workflow that automates this process and iteratively performs the active learning loop.
# +
# !bash setup.sh
import sagemaker
from package import config, manifest
role = config.role
sess = sagemaker.Session()
# -
# ## Dataset
#
# The image dataset we are using for this demonstration is a subset of the [A2D2 dataset](https://www.a2d2.audi/a2d2/en/dataset.html). We select a subset of camera frames from the original dataset and have generated 2D bounding box labels around objects of interests from the segmentation maps in the original dataset.
#
# For flexibility and ease of use, we will be working with an S3 manifest file of the dataset. You can replace the `data_manifest_file` with an s3 or local path to your custom manifest file in the same format to use your own dataset.
data_manifest_file = 's3://{}/{}/{}'.format(config.solution_upstream_bucket,
config.solution_name,
'data/manifests/a2d2_visual_perception_bbox.manifest')
# Let's take a peek at single entry in the manifest file
manifest_rows = manifest.get_manifest_rows_from_path(data_manifest_file)
manifest_rows[0]
# Let's try and visualize some images that have annotations
manifest.visualize_manifest_images(data_manifest_file)
# ## SageMaker Groundtruth Labeling Workteam
#
# Now that we have seen what data we will be working with. Let's prepare a SageMaker Groundtruth private workteam that we will use as the workforce for data labeling. For the purposes of this demonstration, you will add yourself to a SageMaker private workteam that's managed by a Cognito User Pool. Then when we create labeling jobs during the active learning pipeline, the jobs will be sent to this workteam and you'll be able to go and manually label images using the SageMaker Groundtruth labeling UI
# ### Create a work team
#
# The first step is to create a workteam. By launching this solution the necessary Cognito resources for create a new private workteam should have already been created. If you have already used GroundTruth before and set up a Private Workforce, the new workteam will be created against the existing Groundtruth Cognito configuration. However, if there are no existing Cognito configurations for labeling jobs, we will used the Cognito resources created when launching the solution.
# +
from package import workteam
cognito_config = workteam.get_cognito_configs()
workteam_name = config.solution_prefix + "-workteam"
if cognito_config is None:
workteam_arn = workteam.create_groundtruth_workteam(workteam_name, config)
userpool = config.cognito_user_pool
else:
workteam_arn = workteam.create_groundtruth_workteam(workteam_name, cognito_config)
userpool = cognito_config['UserPool']
# -
# ### Update your user invitation email with the new workteam
#
# Now your private groundtruth workteam is ready to go. Now we'll setup the domain for the Groundtruth labeling jobs that and also update the backing Cognito user pool with this domain. You'll be able to use this to invite people to help you label your dataset.
# +
signup_domain = workteam.get_signup_domain(workteam_name=workteam_name)
workteam.update_user_pool_with_invite(userpool, signup_domain)
# -
# ### Invite yourself and others to label your dataset
#
# Invite people to help you label your dataset. Use the following link to enter your email
# address to get an invite.
#
"https://{}.console.aws.amazon.com/sagemaker/groundtruth?region={}#/labeling-workforces/add-workers".format(config.region, config.region)
# Then open the following link in a new tab to add your email address to the workteam.
#
# Click on the **Workers** tab, then click add worker to add your email address.
"https://{}.console.aws.amazon.com/sagemaker/groundtruth?region={}#/labeling-workforces/private-details/{}".format(config.region, config.region, workteam_name)
# ## Prepare Dataset for Active Learning
#
# Now, we will prepare the dataset for the active learning loop. For this, we start with a manifest file that is partially labeled. Starting with this allows us to have the first stage of the Active Learning loop be a model training job instead of a processing job. We could also start with a fully unlabeled dataset but this would mean the first stage of our active learning loop would be a labeling job.
# ### Upload partially labeled input to S3
#
# The utility function below will copy the `data_manifest_file` into an input manifest for this experiment. In the input manifest, we will only keep bounding box labels for a subset of the data, and exclude other class labels besides Pedestrians before uploading the input manifest to the s3 path in `input_data`.
#
# To see the input manifest, you can open the file `./manifests/partially_labeled_input.manifest`
# +
from package.active_learning import prepare
input_data = 's3://{}/{}/{}'.format(config.solution_bucket, 'active-learning', config.s3_data_prefix)
s3_input_manifest_path = prepare.partially_labeled_input(input_data, manifest_rows)
# -
# Next, we will prepare the job template and class_labels that will be used by all GroundTruth labeling job in the Active Learning loop. You can see the contents of the template at `./artifacts/instructions.template` and the simple class labels for Pedestrian bounding box at `./artifacts/class_labels.json`
# +
from package.active_learning import prepare
s3_class_labels_path, s3_template_path = prepare.labels_config_and_template(input_data)
# -
# Now that we have our input data artifacts in the s3 input location, it is time to create the input request to the active learning loop
# ### Create Groundtruth request for active learning
#
# The input request to the active learning loop will be in the form of the [GroundTruth Labeling Job request syntax](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateLabelingJob.html) but this will be passed as in input to the Step Functions StateMachine for the active learning loop. The stateMachine will be able to parse the request and select the appropriate state of the active learning loop to begin execution.
#
# You can see the input request at the path `./requests/ground_truth.request`
# +
from package.active_learning import request
gt_request = request.create_ground_truth_request(s3_input_manifest_path,
s3_class_labels_path,
s3_template_path,
config.region,
role,
config.solution_prefix,
's3://{}/{}/output/'.format(config.solution_bucket, config.s3_data_prefix),
workteam_arn=workteam_arn)
# -
# ## Start Active Learning Loop
#
# All the input configurations are complete so we can create a new instance of the active learning pipeline and start execution. As mentioned already, we are using a Step Functions workflow to orchestrate the active learning loop.
# +
from package.active_learning.step_functions import ActiveLearningPipeline
step_functions_pipeline = ActiveLearningPipeline(config.step_functions_active_learning)
# -
execution_arn = step_functions_pipeline.start_execution(gt_request)
|
source/sagemaker/active-learning-visual-perception.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
key = pd.read_csv("data/key.csv")
key.tail()
k = key.groupby("station_nbr").size().reset_index()
k.plot.bar(x='station_nbr', y=0, label="store count", colors='lightgreen')
plt.show()
weather = pd.read_csv("weather_imputation.csv")
weather['date'] = pd.to_datetime(weather['date'])
weather.tail()
wthr = pd.read_csv("data_monthmean/station1_monthmean.csv")
for i in range(2,21):
if i == 5:
station = weather[weather['station_nbr']==5]
else:
station = pd.read_csv("data_monthmean/station" + str(i) + "_monthmean.csv")
wthr = pd.concat([wthr,station], axis=0)
wthr.reset_index(drop=True, inplace=True)
wthr.tail()
# +
import datetime as dt
# 상대습도 계산
wthr["relative_humility"] = 100*(np.exp((17.625*((wthr['dewpoint']-32)/1.8))/(243.04+((wthr['dewpoint']-32)/1.8)))/np.exp((17.625*((wthr['tavg']-32)/1.8))/(243.04+((wthr['tavg']-32)/1.8))))
# 체감온도 계산
wthr["windchill"] = 35.74 + 0.6215*wthr["tavg"] - 35.75*(wthr["avgspeed"]**0.16) + 0.4275*wthr["tavg"]*(wthr["avgspeed"]**0.16)
# sunset, sunrise로 낮시간 계산
wthr['daytime'] = (((wthr['sunset'] // 100 * 60) + (wthr['sunset'] % 100))- ((wthr['sunrise'] // 100 * 60) + (wthr['sunrise'] % 100))) / 60
# 날짜데이터 datetime 변환
wthr["date"] = pd.to_datetime(wthr["date"])
# # date 년, 월, 일 columns 추가하기
# wthr["date_y"] = wthr["date"].dt.year
# wthr["date_m"] = wthr["date"].dt.month
# wthr["date_d"] = wthr["date"].dt.day
# 요일 columns 추가(monday = 0, sunday = 6), 주말 columns 추가
# 평일 : 0, 토요일, 일요일 : 1
wthr["week7"] = wthr["date"].dt.dayofweek
wthr['weekend'] = 0
wthr.loc[wthr['week7'] == 5, 'weekend'] = 1
wthr.loc[wthr['week7'] == 6, 'weekend'] = 1
# -
wthr.station_nbr.unique()
wthr.corr()
plt.figure(figsize=(10,10))
cmap = sns.light_palette("darkgray", as_cmap=True)
sns.heatmap(wthr.corr(), annot=False, cmap=cmap)
plt.show()
wthr.to_csv("weather2_180701.csv", index=False)
wthr['code_change'] = 0
wthr.tail()
len(wthr[wthr['codesum'].values == 'MD'].index), len(wthr[wthr['codesum'].values == 'RA'].index)
for idx in wthr.codesum.index:
if 'RA' in wthr.loc[idx,'codesum']:
wthr.loc[idx, 'code_change'] = 2
elif 'MD' in wthr.loc[idx,'codesum']:
wthr.loc[idx, 'code_change'] = 0
else:
wthr.loc[idx, 'code_change'] = 1
wthr.code_change.unique()
wthr.head()
wthr.codesum.values
wthr = pd.read_csv("weather2_180701.csv")
wthr['date'] = pd.to_datetime(wthr['date'])
wthr.tail()
|
weather2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Navigating a human assembly with gfabase
#
# This notebook demonstrates using gfabase to import (i) a **GFA** human *de novo* assembly graph, and (ii) **PAF** mappings of its segments to the reference genome. With these indexed together in a .gfab file, gfabase can then rapidly query for subgraphs associated with reference genome coordinates, reading only the necessary parts of the file. The subgraphs can be visualized with [Bandage](https://rrwick.github.io/Bandage/), or feed any other downstream analysis.
#
# Prerequisites: `aria2c` (fast download tool), available in the `aria2` package for most Linux distributions, or on macOS `brew install aria2`.
#
# **First, fetching `gfabase` executable:**
# + language="bash"
# # substitute "linux" for "macOS" if appropriate:
# aria2c -c -q -o gfabase https://github.com/mlin/gfabase/releases/download/v0.5.0/gfabase-macOS-x86-64
# chmod +x gfabase
# ./gfabase version | grep gfabase
# -
# **Downloading a [Shasta](https://github.com/chanzuckerberg/shasta) GFA assembled from HG002 ONT reads, and the minimap2 PAF of its segments & GRCh38.**
# + language="bash"
# aria2c -c -q -o Assembly.gfa -s 16 -x 16 --retry-wait 2 \
# https://s3-us-west-2.amazonaws.com/czi.paolo-public/HG002-Guppy-3.6.0-run4-UL/Assembly.gfa
# aria2c -c -q -o Alignments-NoCigar.paf -s 16 -x 16 --retry-wait 2 \
# https://s3-us-west-2.amazonaws.com/czi.paolo-public/HG002-Guppy-3.6.0-run4-UL/mapToHg38/Alignments-NoCigar.tsv
# -
# Grab a coffee for this ~5GB download; subsequently, aria2c will be smart enough not to repeat it.
#
# ## .gfa to .gfab
#
# Use `gfabase load` to generate `Assembly.gfab` from `Assembly.gfa`:
# + language="bash"
# ./gfabase --verbose load -o Assembly.gfab Assembly.gfa
# ls -lh Assembly.gfa*
# -
# The .gfab file is nicely compressed in under a minute (YMMV), and we get some interesting details about the encoded graph. `gfabase view Assembly.gfab` dumps it back to plain GFA.
#
# Note: `gfabase load` expects uncompressed GFA input, but we could stream a `.gfa.gz` e.g.
#
# ```bash
# gunzip -c Assembly.gfa.gz | ./gfabase load -o Assembly.gfab
# ```
#
# ## .gfab += .paf
#
# Add the GRCh38 .paf mappings to the .gfab; include only the mappings of 1Kbp+, since we're just looking for a skeleton for navigation.
# + language="bash"
# ./gfabase add-mappings Assembly.gfab Alignments-NoCigar.paf --length 1000 --replace
# -
# * Don't have .paf mappings handy for your assembly? see [our example WDL pipeline](https://github.com/mlin/gfabase/blob/main/workflows/gfab_winnowmap.wdl) to generate some with [Winnowmap](https://github.com/marbl/Winnowmap).
# * `--replace` makes the command idempotent by deleting any existing mappings stored in the .gfab
# * the .gfab is updated in-place, so make a backup copy first if needed
#
# ## Extracting a subgraph
#
# Now we can use the key command `gfabase sub` to have a look at the assembly graph topology around the HLA locus on chromosome 6.
# + language="bash"
# time ./gfabase sub Assembly.gfab chr6:29,700,000-29,950,000 --range --view --cutpoints 2 --no-sequences --guess-ranges
# -
# * `--range`: interpret each positional argument as a reference genomic range to search in the segment mappings, instead of a GFA segment name
# * `--view`: output plain .gfa instead of a sub-gfab
# * `--cutpoints 2`: extract the graph neighborhood of segments mapped to the specified genomic range
# * specifically: starting from the segments with directly-overlapping mappings, crawl all segments reachable by crossing fewer than two *cutpoints* ("articulation points") of the undirected segment graph
# * lets us see any interesting graph topology associated with the reference range, without loading the whole connected component (chromosome)
# * `--no-sequences`: exclude the DNA sequences, for succinct display here
# * `--guess-ranges`: add `gr:Z` tag to each mapped segment with a genomic range "guessed" by aggregating its PAF mappings. (The tilde signifies that this is imprecise; meant for rough navigation only.)
#
# Notice the operation was nearly instantaneous, which makes it easy to go exploring wherever you like.
#
# ## Visualizing subgraph with Bandage
#
# `gfabase sub` can send the selected subgraph directly to [Bandage](https://rrwick.github.io/Bandage/) for visualization, so we can quickly inspect regions of interest without loading the whole graph. (`gfabase view` can send everything, if desired.)
#
# To proceed, make sure the `Bandage` executable is available in the effective `$PATH`.
# * macOS: `export PATH=/path/to/Bandage.app/Contents/MacOS:$PATH`
# + language="bash"
# ./gfabase sub Assembly.gfab -o HG002-HLA.gfa chr6:29,700,000-29,950,000 --range --cutpoints 2 --guess-ranges --bandage
# -
# With `--guess-ranges --bandage`, this also generates a CSV file for Bandage to load (*File > Load CSV data*) annotating each segment's guessed range.
#
# Here's what we should see, clearly showing where Shasta was and wasn't able to resolve HG002's two haplotypes. In this case, the non-resolved segments serve as the "cutpoints" delimiting the extracted subgraph, including one that seems to best match an ALT contig of GRCh38.
# 
# Much more can be done to integrate Bandage and gfabase in the future. We are just starting out here.
# ## Opening .gfab programmatically
#
# A .gfab file is actually a SQLite (+ [Genomics Extension](https://github.com/mlin/GenomicSQLite)) database; we can open and query its [schema](https://github.com/mlin/gfabase/tree/main/src/schema) directly. With `pip3 install genomicsqlite`:
# +
import genomicsqlite
db = genomicsqlite.connect("Assembly.gfab", read_only=True)
next(db.execute("SELECT count(*) FROM gfa1_segment_meta"))[0]
# -
list(db.execute("""
select substr(twobit_dna(sequence_twobit),0,100) || '...' from gfa1_segment_sequence where segment_id in
(select distinct segment_id from gfa1_segment_mapping where _rowid_ in
genomic_range_rowids('gfa1_segment_mapping', 'chr6', 29700000, 29950000)
order by segment_id);
"""))
# In exchange for the steeper learning curve, this approach can be still much more efficient, as data indices are cached in memory to serve successive queries.
|
notebooks/gfabaseAssemblyNavigation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy.stats as st
import glob
import matplotlib.pyplot as pl
import warnings
import h5py
from tqdm import tqdm_notebook
from scipy.interpolate import interp1d
warnings.simplefilter('ignore')
pl.rcParams['figure.figsize'] = (16/2.54, 16/2.54)
pl.rcParams['font.size'] = 11
pl.rcParams['font.family'] = 'Arial'
pl.rcParams['xtick.direction'] = 'out'
pl.rcParams['xtick.minor.visible'] = True
pl.rcParams['ytick.minor.visible'] = True
pl.rcParams['ytick.right'] = True
pl.rcParams['xtick.top'] = True
pl.rcParams['figure.dpi'] = 96
models = [model.split('/')[-1] for model in glob.glob('../data_input/cmip6/*')]
# +
# models = [
# 'ACCESS-CM2', 'CanESM5-CanOE', 'CNRM-CM6-1', 'EC-Earth3-LR', 'GISS-E2-1-G', 'INM-CM5-0', 'NESM3',
# 'ACCESS-ESM1-5', 'CAS-ESM2-0', 'CNRM-CM6-1-HR', 'EC-Earth3-Veg', 'GISS-E2-1-G-CC', 'IPSL-CM6A-LR', 'NorCPM1',
# 'AWI-CM-1-1-MR', 'CESM2', 'CNRM-ESM2-1', 'EC-Earth3-Veg-LR', 'GISS-E2-1-H', 'MIROC6', 'NorESM1-F',
# 'AWI-ESM-1-1-LR', 'CESM2-FV2', 'FGOALS-f3-L', 'GISS-E2-2-G', 'MIROC-ES2L', 'NorESM2-LM',
# 'BCC-CSM2-MR', 'CESM2-WACCM', 'E3SM-1-0', 'FGOALS-g3', 'HadGEM3-GC31-LL', 'MPI-ESM-1-2-HAM', 'NorESM2-MM',
# 'BCC-ESM1', 'CESM2-WACCM-FV2', 'E3SM-1-1', 'FIO-ESM-2-0', 'HadGEM3-GC31-MM', 'MPI-ESM1-2-HR', 'SAM0-UNICON',
# 'CAMS-CSM1-0', 'CIESM', 'E3SM-1-1-ECA', 'GFDL-CM4', 'IITM-ESM', 'MPI-ESM1-2-LR', 'TaiESM1',
# 'CanESM5', 'CMCC-CM2-SR5', 'EC-Earth3', 'GFDL-ESM4', 'INM-CM4-8', 'MRI-ESM2-0', 'UKESM1-0-LL'
# ]
# +
historical = {}
accepted_models = []
nyears = {}
for model in models:
historical[model] = {}
path_hist_tas = glob.glob('../data_input/cmip6/%s/historical/*/tas.txt' % model)
# if model=='CanESM5' or model=='GISS-E2-1-G':
# dirhist = [x for x in dirhist if 'r1i1p1f1' in x]
# experiment missing? skip model
if len(path_hist_tas)==0:
print(model + ' not provided historical tas')
continue
historical[model]['tas'] = np.zeros((165))
nens = 0
for ens in path_hist_tas:
print(ens)
tas = np.loadtxt(ens)
if tas.size >= 165:
historical[model]['tas'] = historical[model]['tas'] + tas[:165]
nens = nens + 1
if nens == 0:
continue
historical[model]['tas'] = historical[model]['tas'] / nens
nyears[model] = 165
historical[model]['1951-1980'] = np.mean(historical[model]['tas'][101:131]) - np.mean(historical[model]['tas'][0:51])
historical[model]['1961-1990'] = np.mean(historical[model]['tas'][111:141]) - np.mean(historical[model]['tas'][0:51])
historical[model]['1995-2014'] = np.mean(historical[model]['tas'][145:165]) - np.mean(historical[model]['tas'][0:51])
# if we get this far, things have worked out well
accepted_models.append(model)
# -
len(accepted_models)
#nyears
cw_temp = np.loadtxt('../data_input/CW.txt')
blratio = np.loadtxt('../data_input/cmip5_data_2019.txt')[5,:]
cowtan = cw_temp[:,1] - np.mean(cw_temp[:51,1])
blratio = np.concatenate((np.ones(11), blratio))
Tobs = blratio * cowtan
#Tobs=cowtan
print(np.mean(Tobs[111:141]))
print(np.mean(Tobs[101:131]))
sixtyoneninety=np.ones(len(accepted_models))*np.nan
fiftyoneeighty=np.ones(len(accepted_models))*np.nan
ninetyfivefourteen = np.ones(len(accepted_models))*np.nan
full=np.ones((165, len(accepted_models)))
for i, model in enumerate(accepted_models):
full[:,i] = historical[model]['tas'][:165] - np.mean(historical[model]['tas'][0:51])
pl.plot(np.arange(1850, 1850+nyears[model]), historical[model]['tas'] - np.mean(historical[model]['tas'][0:51]))
sixtyoneninety[i] = historical[model]['1961-1990']
fiftyoneeighty[i] = historical[model]['1951-1980']
ninetyfivefourteen[i] = historical[model]['1995-2014']
pl.plot(np.arange(1850, 2020), Tobs, color='k', lw=2)
fig, ax=pl.subplots()#figsize=(9.5/2.54,9.5/2.54))
ax.fill_between(np.arange(1850.5,2015), np.mean(full,axis=1)-np.std(full, axis=1), np.mean(full,axis=1)+np.std(full,axis=1), color='green', alpha=0.5)
ax.plot(np.arange(1850.5,2015), np.mean(full, axis=1), color='green', label='CMIP6 historical')
ax.fill_between(np.arange(1850.5,2015), Tobs[:-5]-cw_temp[:-5,2], Tobs[:-5]+cw_temp[:-5,2], color='k', alpha=0.5)
ax.plot(np.arange(1850.5,2015), Tobs[:-5], color='k', label='Reconstructed GSAT')
ax.set_xlim(1850,2015)
ax.set_ylim(-0.4, 1.35)
ax.legend(loc='upper left')
ax.set_ylabel('Temperature anomaly with respect to 1850-1900, $^{\circ}$C')
ax.set_title('CMIP6 simulated and observed warming')
pl.tight_layout()
pl.savefig('../figures/figureS7.png', dpi=300)
pl.savefig('../figures/figureS7.pdf')
print(np.mean(sixtyoneninety))
print(np.mean(fiftyoneeighty))
print(np.std(sixtyoneninety))
print(np.std(fiftyoneeighty))
# cowtan and way uncertainty from 1850-1900 to 1961-90 (one sigma)
np.sqrt(np.sqrt(np.sum(cw_temp[:51,2]**2)/51)**2 + np.sqrt(np.sum(cw_temp[111:141,2]**2)/30)**2)
for model in ['CanESM5','E3SM-1-0','GFDL-CM4','GFDL-ESM4','GISS-E2-1-G','HadGEM3-GC31-LL','IPSL-CM6A-LR',
'MIROC6','MRI-ESM2-0','NorESM2-LM','UKESM1-0-LL']:
pl.plot(historical[model]['tas'][95:121]-historical[model]['tas'][95])
for model in ['CanESM5','E3SM-1-0','GFDL-CM4','GFDL-ESM4','GISS-E2-1-G','HadGEM3-GC31-LL','IPSL-CM6A-LR',
'MIROC6','MRI-ESM2-0','NorESM2-LM','UKESM1-0-LL']:
print(model, historical[model]['1995-2014']-historical[model]['1951-1980'])
st.linregress(np.arange(11), Tobs[159:])
|
notebooks/60_historical_warming.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### 
#
# # Unit 2 Lab: Control Flow
#
# ## Overview
# Welcome to the second unit lab!
#
# ### Objectives:
#
# - Create a function to validate user input so we can use responses later in our weather analysis.
# - Create a function that generates dummy weather data for testing purposes.
# <hr>
# ### User Input Validation
# One of the most common ways we use conditionals in real-life programming projects is to validate user-entered information. Let's apply what we've learned to create a user validation function.
#
# - Define a function, `user_input_validation()`.
# - Create two variables:
# - `valid_month`
# - `valid_day`
# - Set both variables to `False`.
# - Have `user_input_validation()` return the value `True`.
# Enter your code below:
def user_input_validation():
valid_month = False
valid_day = False
return True
# Run this cell to verify that the function works.
print("OK!") if user_input_validation() else print("Not quite!")
# Next, add two arguments to the `user_input_validation()` function:
# - `user_input_month`
# - `user_input_day`
#
# Write a series of conditional statements that meet the following requirements:
#
# 1. If the value for `user_input_month` is less than 1 or greater than 12, set `valid_month` to `False` and print out `"Invalid month. Please enter a number between 1-12."` Otherwise, set `valid_month` to `True`.
#
# 2. If the value of `user_input_day` is less than 1 or greater than 31, set `valid_day` to `False` and print out `"Invalid day. Please enter a number between 1-31."` Otherwise, set `valid_day` to `True`.
#
# 3. Add a final `if` statement that returns `True` if — and only if — the values of `user_input_month` and `user_input_day` are `True`. Otherwise, return `False`.
#
#
# *Note: It may seem redundant to be setting `valid_month` and `valid_day` to `False` if they aren't valid when we already set them to `False` when we declared them as variables. However, later we'll be looping through these `if...else` statements multiple times, which is why this step is necessary.*
# Edit the function below:
def user_input_validation(user_input_month, user_input_day):
valid_month = False
valid_day = False
if user_input_month < 1 or user_input_month > 12:
valid_month = False
print("Invalid month. Please enter a number between 1-12.")
else:
valid_month = True
if user_input_day < 1 or user_input_day > 31:
valid_day = False
print("Invalid day. Please enter a number between 1-31.")
else:
valid_day = True
if valid_month and valid_day:
return True
return False
# Run the following function with inputs of 13/25, 11/25, -1/5, 5/-1, and 7/19 to test your function.
# The function should return False for 13/25, -1/5, 5/-1 and True for 11/25 and 7/19.
user_input_validation(13,25)
# We also need to protect against the user entering dates that don't exist, such as February 31.
#
# 1. Create a third variable, `valid_date`, within the `user_input_validation()` function.
#
# 2. Create an `if` statement to check if the value of the `month` is 2, 4, 6, 9, or 11 _and_ the `day` entered is greater than 30.
# 3. Add an `or` condition to this `if` statement that also checks if the `month` is February while the `day` entered is greater than 28.
# - If the date received falls within these criteria:
# - Set `valid_date` to `False`.
# - Print `"Invalid date. Please enter a valid date."` Otherwise, set `valid_date` to `True`.
#
# 3. Create a final `if` statement that checks that all three of the following variables are `True`:
# - `valid_month`
# - `valid_day`
# - `valid_date`
# - If they are, return `True`. Otherwise, return `False`.
#
# *Note: To create this `if` statement properly, you'll need to use the `in` keyword on the list of months. This concept was not taught in class. You use `in` to check if an item is in a list, like so:
#
# ```python
# # This prints '5 is in list!'
# if(5 in [1,3,4,5,9]):
# print("5 is in list!")
#
# # This does not print '7 is in list!'
# if(7 in [1,3,4,5,9]):
# print("7 is in list!")
# ```
# Edit the function below:
def user_input_validation(user_input_month, user_input_day):
valid_month = False
valid_day = False
valid_date = False
if user_input_month < 1 or user_input_month > 12:
valid_month = False
print("Invalid month. Please enter a number between 1-12.")
else:
valid_month = True
if user_input_day < 1 or user_input_day > 31:
valid_day = False
print("Invalid day. Please enter a number between 1-31.")
else:
valid_day = True
if (user_input_month in [2,4,6,9,11] and user_input_day > 30) or (user_input_month == 2 and user_input_day > 28):
valid_date = False
print("Invalid date. Please enter a valid date.")
else:
valid_date = True
if valid_month and valid_day and valid_date:
return True
return False
# Run the following function with inputs of 9/31, 11/31, 2/29, 2/30, 2/31, 11/25, and 7/19 to test your function.
# The function should return False for the first five dates and True for 11/25 and 7/19.
user_input_validation(9,31)
# <hr>
# ### Weather Data
#
# The `recent_temperatures` list holds the hourly recorded temperatures for each hour of day. Print out the high and low temperatures for the day.
# +
recent_temperatures = [67,67,68,69,71,73,75,76,79,81,81,80,82,81,81,80,78,75,72,70,67,65,66,66]
# Enter your code below:
print(max(recent_temperatures))
print(min(recent_temperatures))
# -
# Create a function, `avg_temp()`, that reads in `recent_temperatures`, calculates the average temperature, and prints the result.
# +
# Enter your code below:
def avg_temp(list_of_temps):
sum = 0
for temp in list_of_temps:
sum += temp
avg = sum / len(list_of_temps)
print(avg)
avg_temp(recent_temperatures)
# -
# Oftentimes, when working with large sets of data, you'll encounter a list of lists or a list of tuples. In our case, we'll be working with data where each item in a list contains a time and the corresponding temperature that was recorded at that time.
#
# Let's create a list of tuples, `weather_data`, where the first digit represents the hour (0–23) and the second value is the temperature from the `recent_temperatures` list. (Example of list format: [(0,67),(2,67),(3,69),etc.])
#
# Then, print out the list to verify.
# +
# Enter your code below:
weather_data = []
for i in range(len(recent_temperatures)):
hourly_data_point = (i,recent_temperatures[i])
weather_data.append(hourly_data_point)
print(weather_data)
|
unit-2-control-flow/instructor-resources/09-unit-lab-2/pyth621-day2-lab-control-flow-solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
labels = np.array([50, 100, 500, 1000, 2000])
openwhisk50 = np.array([4.49886679649, 4.13753795624, 27.4155349731, 57.0974588394, 91.8039851189])
k8s50 = np.array([0.653932094574, 0.487859964371, 0.854521036148, 3.60483503342, 15.9304699898])
openwhisk300 = np.array([8.24269580841, 9.32439804077, 43.89032197, 76.5126729012, 123.971750975])
k8s300 = np.array([2.52131199837, 4.8513469696, 29.1869990826, 130.197901011, 188.247896194])
# -
plt.figure(figsize=[25, 12])
#plt.xticks(labels)
plt.plot(labels, openwhisk50, label='Openwhisk')
plt.plot(labels, k8s50, label='Spring Boot')
plt.xlabel('Requests', fontsize = 24)
plt.ylabel('Duration, seconds', fontsize = 24)
plt.rc('xtick', labelsize=24)
plt.rc('ytick', labelsize=24)
plt.legend(prop={'size': 24})
plt.show()
|
scripts/results_final/compare_openwhisk_k8s_matrix/plot_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# CellStrat - Intro to Artificial Neural Networks
# Here we look at ANNs using TensorFlow
# We start with Perceptrons first, which are the basic building block of a neural network.
# Ref : “Hands-on Machine Learning with Scikit-Learn and TensorFlow ” by <NAME>
# -
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# -
# # Perceptrons
# ### CellStrat - Let's build a classification model to classify the IRIS flower dataset using a Perceptron.
# This flower can belong to one of the three species Setosa, Versicolor and Virginica.
# The IRIS flower dataset has 150 samples of this flower with their features provided as sepal length, sepal width, petal length
# and petal width. Along with features per sample, the ground truth labels i.e. the species label is also provided in the dataset.
# We will first train a Perceptron with this dataset and then use the trained model to predict the class of sample instance
# having petal length 2 and petal width 0.5
#
# We will use Perceptron API from the SCIKIT-LEARN library
# +
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length, petal width
#X = iris.data[:, (0,1)] # sepal length, sepal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(penalty='l2', alpha=0.0001, fit_intercept=True, max_iter=5,
eta0=0.1, shuffle=True, random_state=42)
per_clf.fit(X, y)
# -
y_pred = per_clf.predict([[2, 0.5]])
y_pred
# +
#CellStrat - the predicted class for petal length = 2 and petal width = 0.5 is the second class i.e. Versicolor. Note that :-
#array([0]) indicates setosa
#array([1]) indicates versicolor
#array([2]) indicates virginica
# +
#CellStrat - Next lets plot the classifier boundary between Setosa and non-Setosa data instances
# let's use numpy linspace to produce some numbers.
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html
# numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)[source]¶
# Return evenly spaced numbers over a specified interval.
# Returns num evenly spaced samples, calculated over the interval [start, stop].
# the ravel command returns contiguous flattened array(1D array with all the input-array elements and with the same type as it).
# +
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap, linewidth=5)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
# -
# # Activation functions
# +
#CellStrat - Let's review the activation functions Logistic Sigmoid, ReLU, Step and tanh.
#we will also take derivative of the output of each activation function and plot it
#derivative = change in the output of activation function for a minor change in input value, divided by the quantum of change
#in the input value. i.e. (f(z2)-f(z1))/(z2-z1).
# +
def logit(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
# +
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=2, label="Step")
plt.plot(z, logit(z), "g--", linewidth=2, label="Logit")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
# +
# Heaviside step function : https://en.wikipedia.org/wiki/Heaviside_step_function
# The Heaviside step function, or the unit step function, usually denoted by H or θ (but sometimes u, 1 or 𝟙),
# is a discontinuous function named after <NAME>ide (1850–1925), whose value is zero for negative argument
# and one for positive argument. It is an example of the general class of step functions, all of which can be
# represented as linear combinations of translations of this one.
#CellStrat - heaviside is a basic step function returning 0 or 1.
# mlp_xor below returns the output of a three layer MLP or multi-layer perceptron
# (Note that XOR requires three layer network, including one hidden layer. XOR cannot be implemented with just with single
# perceptron)
# +
def heaviside(z):
return (z >= 0).astype(z.dtype)
def sigmoid(z):
return 1/(1+np.exp(-z))
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
# +
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
# +
#CellStrat - The outputs show that heaviside step function has a clear classifier boundary for XOR data (where only 1 of the 2
# features can be positive at a time). Whereas the sigmoid has a gradual transitional boundary as it follows a continuous S-curve
# transformation.
# +
#CellStrat - END OF PERCEPTRON and ACTIVATION FUNCTIONS chapter
# -
# # FNN for MNIST
# ## using tf.learn
# +
## from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/")
# commented out above two lines
# added the following
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# -
X_train = mnist.train.images
X_test = mnist.test.images
y_train = mnist.train.labels.astype("int")
y_test = mnist.test.labels.astype("int")
# +
# updated code - latest | use if required or above didn't work!!
# train_data = mnist.train.images # Returns np.array
# train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
# eval_data = mnist.test.images # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# tf.logging.set_verbosity(old_v)
# +
# SKCompat : https://www.tensorflow.org/api_docs/python/tf/contrib/learn/SKCompat
# Defined in tensorflow/contrib/learn/python/learn/estimators/estimator.py.
# Scikit learn wrapper for TensorFlow Learn Estimator.
# the following code trains a DNN for classification with two hidden layers (one with 300
# neurons, and the other with 100 neurons) and a softmax output layer with 10 neurons.
# Under the hood, the DNNClassifier class creates all the neuron layers, based on the ReLU activation
# function (we can change this by setting the activation_fn hyperparameter). The output layer relies on
# the softmax function, and the cost function is cross entropy.
# +
import tensorflow as tf
config = tf.contrib.learn.RunConfig(tf_random_seed=42) # not shown in the config
feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)
dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[300,100], n_classes=10,
feature_columns=feature_cols, config=config)
dnn_clf = tf.contrib.learn.SKCompat(dnn_clf) # if TensorFlow >= 1.1
dnn_clf.fit(X_train, y_train, batch_size=50, steps=40000)
# +
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test)
accuracy_score(y_test, y_pred['classes'])
# +
from sklearn.metrics import log_loss
y_pred_proba = y_pred['probabilities']
log_loss(y_test, y_pred_proba)
# -
# ## Using plain TensorFlow
# +
import tensorflow as tf
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
# -
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="kernel")
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
if activation is not None:
return activation(Z)
else:
return Z
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: mnist.test.images,
y: mnist.test.labels})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt") # or better, use save_path
X_new_scaled = mnist.test.images[:20]
Z = logits.eval(feed_dict={X: X_new_scaled})
y_pred = np.argmax(Z, axis=1)
print("Predicted classes:", y_pred)
print("Actual classes: ", mnist.test.labels[:20])
# +
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# -
show_graph(tf.get_default_graph())
# ## Using `dense()` instead of `neuron_layer()`
# Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function, except for a few minor differences:
# * several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.
# * the default `activation` is now `None` rather than `tf.nn.relu`.
# * a few more differences are presented in chapter 11.
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int64, shape=(None), name="y")
# -
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 20
n_batches = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
show_graph(tf.get_default_graph())
|
deeplearning/perceptron/dl_python_perceptron_3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# http://scikit-learn.org/stable/modules/preprocessing.html
# +
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
X = np.array([[ 1., -1., 2.], [ 2., 0., 0.],[ 0., 1., -1.]])
print('The mean is : %f' % X.mean())
print('The var is : %f' % X.var())
print('The sd is : %f' % X.std())
X
# -
# In practice we often ignore the shape of the distribution and just transform the data to center it by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation.
#
# The function scale provides a quick and easy way to perform this operation on a single array-like dataset:
plt.imshow(X)
X_scaled = preprocessing.scale(X)
print('The mean is : %f' % X_scaled.mean())
print('The var is : %f' % X_scaled.var())
print('The sd is : %f' % X_scaled.std())
X_scaled
plt.imshow(X_scaled)
|
Scikit-learn/Preprocessing - standardisation, mean removal and variance scaling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="yHG6wUbvifuX"
# # Forecasting with machine learning
# + [markdown] id="vidayERjaO5q"
# ## Setup
# + id="gqWabzlJ63nL"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
# + id="cg1hfKCPldZG"
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
# + id="iL2DDjV3lel6"
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
# + [markdown] id="ViWVB9qd8OIR"
# ## Forecasting with Machine Learning
#
# First, we will train a model to forecast the next step given the previous 30 steps, therefore, we need to create a dataset of 30-step windows for training.
# + id="1tl-0BOKkEtk"
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
# + id="Zmp1JXKxk9Vb"
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
# + [markdown] id="T1IvwAFn8OIc"
# ### Linear Model
# + id="ieOKdcEQ0A6k"
keras.backend.clear_session()
tf.random.set_seed(42) # do we need to set a random no for new session?
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size) # here we just use the function blindly :)
valid_set = window_dataset(x_valid, window_size)
model = keras.models.Sequential([
keras.layers.Dense(1, input_shape=[window_size])
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(), # what is huber? quadratic for small errors but linear for large errors (MAE) absolute error are large
optimizer=optimizer, # try other like Adam rmsprop
metrics=["mae"])
model.fit(train_set, epochs=100, validation_data=valid_set)
# + id="N3N8AGRM8OIc"
keras.backend.clear_session() # do we need to set a random no for new session?
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size)
model = keras.models.Sequential([
keras.layers.Dense(1, input_shape=[window_size])
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-6 * 10**(epoch / 30)) # what is the logic here?
optimizer = keras.optimizers.SGD(lr=1e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) # call back here but why validation is not here?
# + id="PF9e7IDm8OId"
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-6, 1e-3, 0, 20])
# + id="uMNwyIFE8OIf"
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size)
valid_set = window_dataset(x_valid, window_size)
model = keras.models.Sequential([
keras.layers.Dense(1, input_shape=[window_size])
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=10)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping])
# + id="_eaAX9g_jS5W"
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size)) # No need to shuffle here because we are not training here.
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
# + id="FnIWROQ08OIj"
lin_forecast = model_forecast(model, series[split_time - window_size:-1], window_size)[:, 0] # what is going on?
# + id="xd7Tj_fA8OIk"
lin_forecast.shape # what is the vector here?
# -
type(lin_forecast)
x_valid.shape
# + id="F-nftslfgQJs"
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, lin_forecast)
# + id="W4E_jXktf7iv"
keras.metrics.mean_absolute_error(x_valid, lin_forecast).numpy()
# + [markdown] id="9nEM33dZ8OIp"
# ### Dense Model Forecasting
# Lets see if we can we do better with additional layers?
# + id="RhGTv4G_8OIp"
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size)
model = keras.models.Sequential([
keras.layers.Dense(10, activation="relu", input_shape=[window_size]),
keras.layers.Dense(10, activation="relu"),
keras.layers.Dense(1)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) # here we don't use validation data b/c we are trying to get the learning rate
# learning rate deals with training data only.
# + id="5g-nC_em8OIq"
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 5e-3, 0, 30]) #How do we get this learning rate number? Is it by intuition?
# -
# Optional code
import pandas as pd
history.history.keys()
losses = pd.DataFrame(history.history)
losses['mae'].plot()
model.history.history.keys()
losses = pd.DataFrame(model.history.history)
losses['mae'].plot()
# Optional code
# + id="B7t0VrCH8OIr"
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size)
valid_set = window_dataset(x_valid, window_size)
model = keras.models.Sequential([
keras.layers.Dense(10, activation="relu", input_shape=[window_size]),
keras.layers.Dense(10, activation="relu"),
keras.layers.Dense(1)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=10) # if it doesn't make any change in last 10 epochs then stop
model.fit(train_set, epochs=500,
validation_data=valid_set, # here we use the validation data
callbacks=[early_stopping])
# + id="RqQbX6DZ8OIu"
dense_forecast = model_forecast(
model,
series[split_time - window_size:-1], # little bit before the validation series
window_size)[:, 0]
# 30 time steps before the start of validation period. It causes overlap but ignore it here ????
# training 30 steps before?
# + id="98zwAuIo8OIv"
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, dense_forecast)
# + id="EgkELN-58OIw"
keras.metrics.mean_absolute_error(x_valid, dense_forecast).numpy() # little better with the MAE but graph shows a diff story
# -
|
4. Time Series/Udacity/04_forecasting_with_machine_learning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="Logo.png" width="100" align="left"/>
#
# # <center> Unit 3 Project </center>
# # <center> Third section : supervised task </center>
#
# In this notebook you will be building and training a supervised learning model to classify your data.
# For this task we will be using another classification model "The random forests" model.
#
# Steps for this task:
# 1. Load the already clustered dataset
# 2. Take into consideration that in this task we will not be using the already added column "Cluster"
# 3. Split your data.
# 3. Build your model using the SKlearn RandomForestClassifier class
# 4. classify your data and test the performance of your model
# 5. Evaluate the model ( accepted models should have at least an accuracy of 86%). Play with hyper parameters and provide a report about that.
# 6. Provide evidence on the quality of your model (not overfitted good metrics)
# 7. Create a new test dataset that contains the testset + an additional column called "predicted_class" stating the class predicted by your random forest classifier for each data point of the test set.
# ## 1. Load the data and split the data:
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
# To-Do: load the data
df = pd.read_csv('HepatitisCdata.csv')
df.head()
# +
# To-Do : keep only the columns to be used : all features except ID, cluster
# The target here is the Category column
# Do not forget to split your data (this is a classification task)
# test set size should be 20% of the data
df = df[['Category','Age', 'Sex', 'ALB', 'ALP', 'ALT', 'AST', 'BIL', 'CHE', 'CHOL', 'CREA', 'GGT', 'PROT']]
df.info()
# +
x = df.iloc[:, 1:]
y = df.iloc[:, 0]
X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# -
# ## 2. Building the model and training and evaluate the performance:
# To-do build the model and train it
# note that you will be providing explanation about the hyper parameter tuning
# So you will be iterating a number of times before getting the desired performance
y_hat = RandomForestClassifier(n_estimators = 3 ,criterion = 'entropy', random_state = 0)
y_hat.fit(X_train, Y_train)
Y_pred = y_hat.predict(X_test)
# To-do : evaluate the model in terms of accuracy and precision
# Provide evidence that your model is not overfitting
from sklearn.metrics import precision_score, accuracy_score
accuracy_score(Y_test, Y_pred)
# > Hint : A Perfect accuracy on the train set suggest that we have an overfitted model So the student should be able to provide a detailed table about the hyper parameters / parameters tuning with a good conclusion stating that the model has at least an accuracy of 86% on the test set without signs of overfitting
# ## 3. Create the summary test set with the additional predicted class column:
# In this part you need to add the predicted class as a column to your test dataframe and save this one
# +
# To-Do : create the complete test dataframe : it should contain all the feature column + the actual target and the ID as well
test_df = X_test
test_df.head()
test_df["Category"] = df['Category']
# -
test_df.head()
# To-Do : Add the predicted_class column
test_df["Predicted_class"] = Y_pred
data = pd.read_csv('clustered_HepatitisC.csv')
test_df['Cluster'] = data['cluster']
test_df.head()
# > Make sure you have 16 column in this test set
# Save the test set
test_df.to_csv("test_summary.csv")
|
3. Supervised task.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import imp, glob
import pandas as pd
import makesyllabus
from IPython.display import HTML
import requests
from pptx import Presentation
from pptx.util import Inches
imp.reload(makesyllabus)
# -
def scientistparts(fname):
"""The scientist file looks like
# Name
<NAME>
# Description
Words words words
words words words
# Sources
sourcy source
...
This is an iterator that returns the (item name, content)
We expect to combine things (e.g. all of the lines in the
description) later. But not now, because, e.g., each line
in the textbook part is a separate textbook.
"""
item_name,content = None,[]
for line in open(fname):
if line.startswith('#'):
if item_name is not None:
yield (item_name,[i for i in content if i.strip()])
item_name = line.replace('#','').strip()
content = []
else:
content.append(line.strip())
if item_name is not None:
yield(item_name,[i for i in content if i.strip()])
# +
known_textbooks = ['Knight, Physics for Scientists and Engineers: A Strategic Approach with Modern Physics, 3rd Edition',
'Knight, College Physics: A Strategic Approach, 2nd Edition']
class Scientist:
def __init__(self,fname):
#self._scientistdict = dict(scientistparts(fname))
# Can't quite do that because older format had you
# putting in two #Textbook parts instead of just two
# lines under #textbook, for instance.
# So, everything in here is assumed to be a list. It's a little
# inconvenient, but I think it's worth it in case people edit the
# markdown/txt files by hand. OTOH, just saying "put in multiple lines"
# would work and simplify this a bit.
self._scientistdict = {}
self._fname = fname
for (k,v) in scientistparts(fname):
if k in self._scientistdict:
self._scientistdict[k].append(v)
else:
self._scientistdict[k] = [v]
self.name = self._scientistdict['Name'][0][0].strip()
def matchestextbook(self,textbookname,verbose=False):
# Should match title. For now, just match edition
# If matches, return chapter and section
# This is why I need a dropdown for "known" textbooks. And I need
# to fix the others via a script.
# Right now, we only know Knight 3rd edition (that's calc)
# and Knight 2nd edition (that's alg.)
for entry in self._scientistdict['Textbook']:
for txt in entry:
if txt.startswith(textbookname):
parts = txt.replace(textbookname,'').split(',')
chapter,section = None,None
for part in [p for p in parts if p]:
_part = part.lower().strip()
if _part.startswith('chapter'):
chapter = int(_part.replace('chapter','').strip())
elif _part.startswith('section'):
section = int(_part.replace('section','').strip())
else:
print('Unknown textbook part',part)
return({'Chapter':chapter,
'Section':section})
return False
def todf(self,chapter,section):
# See comments in __init__ but things may be nested one
# level deeper than you expect.
parts = {'Chapter':chapter,
'Section':section}
for (k,v) in self._scientistdict.items():
if k == 'Textbook':
continue
elif k == 'Photo':
# Can be multiple photos.
# Each gets a link to the original source of the photo, and we follow
# it all up with a URL to download the slide.
for photos in v:
for photo in photos:
this_entry = f'<a href="{photo}"><img src="{photo}" width="300"/></a>'
if k not in parts:
parts[k] = this_entry
else:
parts[k] = ' '.join([parts[k],this_entry])
elif k == 'Sources':
# Wrap it up in a link.
for sources in v:
for source in sources:
formatted = ''
for sourcepart in source.split():
if sourcepart.startswith('http'):
linkpart = sourcepart.strip()
if linkpart.endswith('.'):
linkpart = linkpart[:-1]
formatted += f' <a href="{linkpart}">Link</a> '
else:
formatted += ' ' + sourcepart
if 'Sources' in parts:
parts['Sources'] += formatted
else:
parts['Sources'] = formatted
else:
# v looks like [['Myname']] or [['Description','Description','more description']]
for entry in v:
this_entry = ' '.join(entry)
if k not in parts:
parts[k] = this_entry
else:
parts[k] = ' '.join([parts[k],this_entry])
if 'Photo' in parts:
slide_link = '<a href="Textbooks/{n}.pptx">Download Slide</a>'.format(
n=parts['Name'],)
parts['Photo'] = parts['Photo'] + ' ' + slide_link
#for (k,v) in parts.items():
# print(f'{k}: {v}')
df = pd.DataFrame(data=[parts])
return df
scientists = [Scientist(fname) for fname in glob.glob('Scientists/*.txt')]
def add_scientist_slide(prs,scientist,verbose=False):
"""Creates image file as side effect
"""
for photos in scientist._scientistdict['Photo']:
for photo in photos:
img_path = 'test.jpg'
if verbose:
print(f'Grabbing image: {photo}')
img_data = requests.get(photo).content
with open(img_path, 'wb') as handler:
handler.write(img_data)
blank_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(blank_slide_layout)
shapes = slide.shapes
title_shape = shapes.title
title_shape.text = scientist.name
top = Inches(1.75)
left = Inches(0.5)
height = Inches(5)
pic = slide.shapes.add_picture(img_path, left, top, height=height)
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
text_frame.text = open(scientist._fname).read()
def maketextbook(fname,*,textbookname,scientists,verbose=False):
"""This also makes the associated PPTx slides.
"""
textbook = pd.read_csv(fname)
prs0 = Presentation() # The whole textbook. Individual slides made separately below.
for scientist in scientists:
match = scientist.matchestextbook(textbookname)
if match:
chapter,section = match['Chapter'],match['Section']
if verbose:
print('Adding',scientist._scientistdict['Name'])
df = scientist.todf(chapter=chapter,section=section)
textbook = textbook.append(df)
if 'Photo' in scientist._scientistdict:
prs = Presentation()
add_scientist_slide(prs0,scientist,verbose=verbose)
add_scientist_slide(prs,scientist,verbose=verbose)
# get the original, unparsed photos bit.
prs.save('Textbooks/{n}.pptx'.format(n=scientist.name))
# I want the order of columns
cols = ['Chapter','Section','Topics','Photo','Name','Description','Sources']
cols = cols + [i for i in textbook.columns.tolist() if i not in cols]
textbook = textbook[cols]
textbook = textbook.sort_values(by=['Chapter','Section','Topics','Description','Name'],
ascending=[True, True, True, False, False])
textbook = textbook.fillna('').set_index(['Chapter', 'Section','Topics'])
spaces = ' '*50
textbook.rename(columns={'Photo':'Photo'+spaces,'Description':'Description'+spaces+spaces}, inplace=True)
prs0.save(f'Textbooks/{textbookname}.pptx')
return textbook
textbook = maketextbook('Textbooks/Knight3rdEdition.csv',
textbookname=known_textbooks[0],
scientists=scientists)
pd.set_option('display.max_colwidth', -1)
f = open('knight.html','w')
f.write(textbook.to_html(escape=False))
f.close()
HTML(textbook.to_html(escape=False))
# -
|
Refactoring.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 第十章 LaTeX进阶
#
# ### 导言
#
# LaTeX作为一款文档排版系统,拥有众多文档类型,可用于制作科技论文、技术报告以及幻灯片等,除此之外,LaTeX还拥有包括编辑数学公式、制作图形与表格等功能,LaTeX用户可根据自身需要解锁LaTeX的各种用途。
#
# 添加程序源代码和算法伪代码对于科研报告往往是必要且有效的,因为代码可以展现计算机编程的思路和算法,可以供读者学习和使用。所以,能够学会添加简洁优美、整齐大方的源代码和伪代码是科研工作者的一项重要技能。对于科研工作者,在有些学术交流中,有一种非常重要的展现成果方式就是海报。LaTeX可以制作出优美简洁的海报,有很多模版可以方便制作者使用。另外,简历制作也是科研工作者经常需要用到的,同样LaTeX提供了很多好用的模版,可以方便其使用。
#
# 本章结合大多数科研工作者的总体需要,将主要介绍添加程序源代码、算法伪代码、海报制作和简历制作等内容。
# 【继续】[**10.1 添加程序源代码**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-10/section1.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
|
chapter-10/section0.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="X4cRE8IbIrIV"
# If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MOsHUjgdIrIW" outputId="f84a093e-147f-470e-aad9-80fb51193c8e"
# #! pip install git+https://github.com/huggingface/transformers.git
# #! pip install git+https://github.com/huggingface/datasets.git
# -
# If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.
#
# To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.
#
# First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your username and password (this only works on Colab, in a regular notebook, you need to do this in a terminal):
# +
from huggingface_hub import notebook_login
notebook_login()
# -
# Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
# +
# # !apt install git-lfs
# # !git config --global user.email "<EMAIL>"
# # !git config --global user.name "<NAME>"
# -
# Make sure your version of Transformers is at least 4.8.1 since the functionality was introduced in that version:
# +
import transformers
print(transformers.__version__)
# + [markdown] id="HFASsisvIrIb"
# You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/question-answering).
# + [markdown] id="rEJBSTyZIrIb"
# # Fine-tuning a model on a question-answering task
# -
# In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a question answering task, which is the task of extracting the answer to a question from a given context. We will see how to easily load a dataset for these kinds of tasks and use the `Trainer` API to fine-tune a model on it.
#
# 
#
# **Note:** This notebook finetunes models that answer question by taking a substring of a context, not by generating new text.
# + [markdown] id="4RRkXuteIrIh"
# This notebook is built to run on any question answering task with the same format as SQUAD (version 1 or 2), with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.html#bigtable) if this is the case). It might just need some small adjustments if you decide to use a different dataset than the one used here. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:
# + id="zVvslsfMIrIh"
# This flag is the difference between SQUAD v1 or 2 (if you're using another dataset, it indicates if impossible
# answers are allowed or not).
squad_v2 = False
model_checkpoint = "distilbert-base-uncased"
batch_size = 16
# + [markdown] id="whPRbBNbIrIl"
# ## Loading the dataset
# + [markdown] id="W7QYTpxXIrIl"
# We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.
# + id="IreSlFmlIrIm"
from datasets import load_dataset, load_metric
# + [markdown] id="CKx2zKs5IrIq"
# For our example here, we'll use the [SQUAD dataset](https://rajpurkar.github.io/SQuAD-explorer/). The notebook should work with any question answering dataset provided by the 🤗 Datasets library. If you're using your own dataset defined from a JSON or csv file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.html#from-local-files) on how to load them), it might need some adjustments in the names of the columns used.
# + colab={"base_uri": "https://localhost:8080/", "height": 270, "referenced_widgets": ["69caab03d6264fef9fc5649bffff5e20", "3f74532faa86412293d90d3952f38c4a", "50615aa59c7247c4804ca5cbc7945bd7", "fe962391292a413ca55dc932c4279fa7", "299f4b4c07654e53a25f8192bd1d7bbd", "ad04ed1038154081bbb0c1444784dcc2", "7c667ad22b5740d5a6319f1b1e3a8097", "<KEY>", "80e2943be35f46eeb24c8ab13faa6578", "de5956b5008d4fdba807bae57509c393", "<KEY>", "6c1db72efff5476e842c1386fadbbdba", "<KEY>", "d30a66df5c0145e79693e09789d96b81", "5fa26fc336274073abbd1d550542ee33", "2b34de08115d49d285def9269a53f484", "d426be871b424affb455aeb7db5e822e", "<KEY>", "<KEY>", "<KEY>", "d298eb19eeff453cba51c2804629d3f4", "a7204ade36314c86907c562e0a2158b8", "e35d42b2d352498ca3fc8530393786b2", "75103f83538d44abada79b51a1cec09e", "<KEY>", "051aa783ff9e47e28d1f9584043815f5", "<KEY>", "8ab9dfce29854049912178941ef1b289", "c9de740e007141958545e269372780a4", "<KEY>", "<KEY>", "<KEY>", "a14c3e40e5254d61ba146f6ec88eae25", "c4ffe6f624ce4e978a0d9b864544941a", "1aca01c1d8c940dfadd3e7144bb35718", "<KEY>", "<KEY>", "940d00556cb849b3a689d56e274041c2", "<KEY>", "<KEY>", "9a55087c85b74ea08b3e952ac1d73cbe", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "2ace4dc78e2f4f1492a181bcd63304e7", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "a71908883b064e1fbdddb547a8c41743", "2f5223f26c8541fc87e91d2205c39995"]} id="s_AY1ATSIrIq" outputId="fd0578d1-8895-443d-b56f-5908de9f1b6b"
datasets = load_dataset("squad_v2" if squad_v2 else "squad")
# + [markdown] id="RzfPtOMoIrIu"
# The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set.
# + id="GWiVUF0jIrIv" outputId="35e3ea43-f397-4a54-c90c-f2cf8d36873e"
datasets
# -
# We can see the training, validation and test sets all have a column for the context, the question and the answers to those questions.
# + [markdown] id="u3EtYfeHIrIz"
# To access an actual element, you need to select a split first, then give an index:
# + id="X6HrpprwIrIz" outputId="d7670bc0-42e4-4c09-8a6a-5c018ded7d95"
datasets["train"][0]
# -
# We can see the answers are indicated by their start position in the text (here at character 515) and their full text, which is a substring of the context as we mentioned above.
# + [markdown] id="WHUmphG3IrI3"
# To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset (automatically decoding the labels in passing).
# + id="i3j8APAoIrI3"
from datasets import ClassLabel, Sequence
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel):
df[column] = df[column].transform(
lambda x: [typ.feature.names[i] for i in x]
)
display(HTML(df.to_html()))
# + id="SZy5tRB_IrI7" outputId="ba8f2124-e485-488f-8c0c-254f34f24f13"
show_random_elements(datasets["train"])
# + [markdown] id="n9qywopnIrJH"
# ## Preprocessing the training data
# + [markdown] id="YVx71GdAIrJH"
# Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.
#
# To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:
#
# - we get a tokenizer that corresponds to the model architecture we want to use,
# - we download the vocabulary used when pretraining this specific checkpoint.
#
# That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
# + id="eXNLu_-nIrJI"
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# + [markdown] id="Vl6IidfdIrJK"
# The following assertion ensures that our tokenizer is a fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing.
# +
import transformers
assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
# -
# You can check which type of models have a fast tokenizer available and which don't on the [big table of models](https://huggingface.co/transformers/index.html#bigtable).
# + [markdown] id="rowT4iCLIrJK"
# You can directly call this tokenizer on two sentences (one for the answer, one for the context):
# + id="a5hBlsrHIrJL" outputId="acdaa98a-a8cd-4a20-89b8-cc26437bbe90"
tokenizer("What is your name?", "My name is Sylvain.")
# -
# Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.
#
# Now one specific thing for the preprocessing in question answering is how to deal with very long documents. We usually truncate them in other tasks, when they are longer than the model maximum sentence length, but here, removing part of the the context might result in losing the answer we are looking for. To deal with this, we will allow one (long) example in our dataset to give several input features, each of length shorter than the maximum length of the model (or the one we set as a hyper-parameter). Also, just in case the answer lies at the point we split a long context, we allow some overlap between the features we generate controlled by the hyper-parameter `doc_stride`:
max_length = 384 # The maximum length of a feature (question and context)
doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.
# Let's find one long example in our dataset:
for i, example in enumerate(datasets["train"]):
if len(tokenizer(example["question"], example["context"])["input_ids"]) > 384:
break
example = datasets["train"][i]
# Without any truncation, we get the following length for the input IDs:
len(tokenizer(example["question"], example["context"])["input_ids"])
# Now, if we just truncate, we will lose information (and possibly the answer to our question):
len(
tokenizer(
example["question"],
example["context"],
max_length=max_length,
truncation="only_second",
)["input_ids"]
)
# Note that we never want to truncate the question, only the context, else the `only_second` truncation picked. Now, our tokenizer can automatically return us a list of features capped by a certain maximum length, with the overlap we talked above, we just have to tell it with `return_overflowing_tokens=True` and by passing the stride:
tokenized_example = tokenizer(
example["question"],
example["context"],
max_length=max_length,
truncation="only_second",
return_overflowing_tokens=True,
stride=doc_stride,
)
# Now we don't have one list of `input_ids`, but several:
[len(x) for x in tokenized_example["input_ids"]]
# And if we decode them, we can see the overlap:
for x in tokenized_example["input_ids"][:2]:
print(tokenizer.decode(x))
# Now this will give us some work to properly treat the answers: we need to find in which of those features the answer actually is, and where exactly in that feature. The models we will use require the start and end positions of these answers in the tokens, so we will also need to to map parts of the original context to some tokens. Thankfully, the tokenizer we're using can help us with that by returning an `offset_mapping`:
tokenized_example = tokenizer(
example["question"],
example["context"],
max_length=max_length,
truncation="only_second",
return_overflowing_tokens=True,
return_offsets_mapping=True,
stride=doc_stride,
)
print(tokenized_example["offset_mapping"][0][:100])
# This gives, for each index of our input IDS, the corresponding start and end character in the original text that gave our token. The very first token (`[CLS]`) has (0, 0) because it doesn't correspond to any part of the question/answer, then the second token is the same as the characters 0 to 3 of the question:
first_token_id = tokenized_example["input_ids"][0][1]
offsets = tokenized_example["offset_mapping"][0][1]
print(
tokenizer.convert_ids_to_tokens([first_token_id])[0],
example["question"][offsets[0] : offsets[1]],
)
# So we can use this mapping to find the position of the start and end tokens of our answer in a given feature. We just have to distinguish which parts of the offsets correspond to the question and which part correspond to the context, this is where the `sequence_ids` method of our `tokenized_example` can be useful:
sequence_ids = tokenized_example.sequence_ids()
print(sequence_ids)
# It returns `None` for the special tokens, then 0 or 1 depending on whether the corresponding token comes from the first sentence past (the question) or the second (the context). Now with all of this, we can find the first and last token of the answer in one of our input feature (or if the answer is not in this feature):
# +
answers = example["answers"]
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(tokenized_example["input_ids"][0]) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
offsets = tokenized_example["offset_mapping"][0]
if (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
# Move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while (
token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
start_position = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
end_position = token_end_index + 1
print(start_position, end_position)
else:
print("The answer is not in this feature.")
# -
# And we can double check that it is indeed the theoretical answer:
print(
tokenizer.decode(
tokenized_example["input_ids"][0][start_position : end_position + 1]
)
)
print(answers["text"][0])
# For this notebook to work with any kind of models, we need to account for the special case where the model expects padding on the left (in which case we switch the order of the question and the context):
pad_on_right = tokenizer.padding_side == "right"
# Now let's put everything together in one function we will apply to our training set. In the case of impossible answers (the answer is in another feature given by an example with a long context), we set the cls index for both the start and end position. We could also simply discard those examples from the training set if the flag `allow_impossible_answers` is `False`. Since the preprocessing is already complex enough as it is, we've kept is simple for this part.
def prepare_train_features(examples):
# Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples["question" if pad_on_right else "context"],
examples["context" if pad_on_right else "question"],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples["answers"][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while (
token_start_index < len(offsets)
and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
# + [markdown] id="0lm8ozrJIrJR"
# This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
# + id="-b70jh26IrJS" outputId="acd3a42d-985b-44ee-9daa-af5d944ce1d9"
features = prepare_train_features(datasets["train"][:5])
# + [markdown] id="zS-6iXTkIrJT"
# To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. Since our preprocessing changes the number of samples, we need to remove the old columns when applying it.
# + id="DDtsaJeVIrJT" outputId="aa4734bf-4ef5-4437-9948-2c16363da719"
tokenized_datasets = datasets.map(
prepare_train_features, batched=True, remove_columns=datasets["train"].column_names
)
# + [markdown] id="voWiw8C7IrJV"
# Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.
#
# Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently.
# + [markdown] id="545PP3o8IrJV"
# ## Fine-tuning the model
# + [markdown] id="FBiW8UpKIrJW"
# Now that our data is ready for training, we can download the pretrained model and fine-tune it. Since our task is question answering, we use the `TFAutoModelForQuestionAnswering` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us:
# + id="TlqNaB8jIrJW" outputId="84916cf3-6e6c-47f3-d081-032ec30a4132"
from transformers import TFAutoModelForQuestionAnswering
model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
# + [markdown] id="CczA5lJlIrJX"
# The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do.
# + [markdown] id="_N8urzhyIrJY"
# To train our model, we will need to define a few more things. The first two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of `push_to_hub_model_id` to something you would prefer.
#
# We also tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay.
# + id="Bliy8zgjIrJY"
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-squad"
learning_rate = 2e-5
num_train_epochs = 2
weight_decay = 0.01
# -
# Then we will need a data collator that will batch our processed examples together, here the default one will work.
# +
from transformers import DefaultDataCollator
data_collator = DefaultDataCollator(return_tensors="tf")
# -
# Now we can use this data collator to turn our data into a `tf.data.Dataset`, ready for training.
train_set = tokenized_datasets["train"].to_tf_dataset(
columns=["attention_mask", "input_ids", "start_positions", "end_positions"],
shuffle=True,
batch_size=batch_size,
collate_fn=data_collator,
)
validation_set = tokenized_datasets["validation"].to_tf_dataset(
columns=["attention_mask", "input_ids", "start_positions", "end_positions"],
shuffle=False,
batch_size=batch_size,
collate_fn=data_collator,
)
# Next, we can create an optimizer and specify a loss function. The `create_optimizer` function gives us a very solid optimizer with weight decay and a learning rate schedule, but it needs us to compute the number of training steps to build that schedule.
# +
from transformers import create_optimizer
total_train_steps = (len(tokenized_datasets["train"]) // batch_size) * num_train_epochs
optimizer, schedule = create_optimizer(
init_lr=learning_rate, num_warmup_steps=0, num_train_steps=total_train_steps
)
# -
# As for the loss, all Transformers models compute loss internally, so we can simple leave the loss argument empty to train on this internal loss.
# +
import tensorflow as tf
model.compile(optimizer=optimizer)
# + [markdown] id="rXuFTAzDIrJe"
# We will evaluate our model and compute metrics in the next section (this is a very long operation, so we will only compute the evaluation loss during training). For now, let's just train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.
# + id="imY1oC3SIrJf"
from transformers.keras_callbacks import PushToHubCallback
username = "Rocketknight1"
callback = PushToHubCallback(
output_dir="./qa_model_save",
tokenizer=tokenizer,
hub_model_id=f"{username}/{push_to_hub_model_id}",
)
model.fit(train_set, validation_data=validation_set, epochs=num_train_epochs, callbacks=[callback])
# -
# ## Evaluation
# Evaluating our model will require a bit more work, as we will need to map the predictions of our model back to parts of the context. The model itself predicts logits for the start and en position of our answers: if we take a batch from our validation dataset, here is the output our model gives us:
batch = next(iter(validation_set))
output = model.predict_on_batch(batch)
output.keys()
# The output of the model is a dict-like object that contains the loss (since we provided labels), the start and end logits. We won't need the loss for our predictions, let's have a look a the logits:
output.start_logits.shape, output.end_logits.shape
# We have one logit for each feature and each token. The most obvious thing to predict an answer for each feature is to take the index for the maximum of the start logits as a start position and the index of the maximum of the end logits as an end position.
# +
import numpy as np
np.argmax(output.start_logits, -1), np.argmax(output.end_logits, -1)
# -
# This will work great in a lot of cases, but what if this prediction gives us something impossible: the start position could be greater than the end position, or point to a span of text in the question instead of the answer. In that case, we might want to look at the second best prediction to see if it gives a possible answer and select that instead.
#
# However, picking the second best answer is not as easy as picking the best one: is it the second best index in the start logits with the best index in the end logits? Or the best index in the start logits with the second best index in the end logits? And if that second best answer is not possible either, it gets even trickier for the third best answer.
#
#
# To classify our answers, we will use the score obtained by adding the start and end logits. We won't try to order all the possible answers and limit ourselves to with a hyper-parameter we call `n_best_size`. We'll pick the best indices in the start and end logits and gather all the answers this predicts. After checking if each one is valid, we will sort them by their score and keep the best one. Here is how we would do this on the first feature in the batch:
n_best_size = 20
# +
import numpy as np
start_logits = output.start_logits[0]
end_logits = output.end_logits[0]
# Gather the indices the best start/end logits:
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
valid_answers = []
for start_index in start_indexes:
for end_index in end_indexes:
if (
start_index <= end_index
): # We need to refine that test to check the answer is inside the context
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": "", # We need to find a way to get back the original substring corresponding to the answer in the context
}
)
# -
# And then we can sort the `valid_answers` according to their `score` and only keep the best one. The only point left is how to check a given span is inside the context (and not the question) and how to get back the text inside. To do this, we need to add two things to our validation features:
# - the ID of the example that generated the feature (since each example can generate several features, as seen before);
# - the offset mapping that will give us a map from token indices to character positions in the context.
#
# That's why we will re-process the validation set with the following function, slightly different from `prepare_train_features`:
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples["question" if pad_on_right else "context"],
examples["context" if pad_on_right else "question"],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# We keep the example_id that gave us this feature and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
# And like before, we can apply that function to our validation set easily:
validation_features = datasets["validation"].map(
prepare_validation_features,
batched=True,
remove_columns=datasets["validation"].column_names,
)
# And turn the dataset into a `tf.data.Dataset` as before. Note that we only need to retain the columns being passed to the model - and for prediction, that means no label columns are necessary.
validation_dataset = validation_features.to_tf_dataset(
columns=["attention_mask", "input_ids"],
shuffle=False,
batch_size=batch_size,
collate_fn=data_collator,
)
# Now we can grab the predictions for all features by using the `model.predict` method:
raw_predictions = model.predict(validation_dataset)
# We can now refine the test we had before: since we set `None` in the offset mappings when it corresponds to a part of the question, it's easy to check if an answer is fully inside the context. We also eliminate very long answers from our considerations (with an hyper-parameter we can tune)
max_answer_length = 30
# +
start_logits = output.start_logits[0]
end_logits = output.end_logits[0]
offset_mapping = validation_features[0]["offset_mapping"]
# The first feature comes from the first example. For the more general case, we will need to be match the example_id to
# an example index
context = datasets["validation"][0]["context"]
# Gather the indices the best start/end logits:
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
valid_answers = []
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
if (
start_index <= end_index
): # We need to refine that test to check the answer is inside the context
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char:end_char],
}
)
valid_answers = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[
:n_best_size
]
valid_answers
# -
# We can compare to the actual ground-truth answer:
datasets["validation"][0]["answers"]
# Our model's most likely answer is correct!
#
# As we mentioned in the code above, this was easy on the first feature because we knew it comes from the first example. For the other features, we will need a map between examples and their corresponding features. Also, since one example can give several features, we will need to gather together all the answers in all the features generated by a given example, then pick the best one. The following code builds a map from example index to its corresponding features indices:
# +
import collections
examples = datasets["validation"]
features = validation_features
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# -
# We're almost ready for our post-processing function. The last bit to deal with is the impossible answer (when `squad_v2 = True`). The code above only keeps answers that are inside the context, we need to also grab the score for the impossible answer (which has start and end indices corresponding to the index of the CLS token). When one example gives several features, we have to predict the impossible answer when all the features give a high score to the impossible answer (since one feature could predict the impossible answer just because the answer isn't in the part of the context it has access too), which is why the score of the impossible answer for one example is the *minimum* of the scores for the impossible answer in each feature generated by the example.
#
# We then predict the impossible answer when that score is greater than the score of the best non-impossible answer. All combined together, this gives us this post-processing function:
# +
from tqdm.auto import tqdm
def postprocess_qa_predictions(
examples,
features,
all_start_logits,
all_end_logits,
n_best_size=20,
max_answer_length=30,
):
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
predictions = collections.OrderedDict()
# Logging.
print(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None # Only used if squad_v2 is True.
valid_answers = []
context = example["context"]
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Update minimum null prediction.
cls_index = features[feature_index]["input_ids"].index(
tokenizer.cls_token_id
)
feature_null_score = start_logits[cls_index] + end_logits[cls_index]
if min_null_score is None or min_null_score < feature_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char:end_char],
}
)
if len(valid_answers) > 0:
best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[
0
]
else:
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
best_answer = {"text": "", "score": 0.0}
# Let's pick our final answer: the best one or the null answer (only for squad_v2)
if not squad_v2:
predictions[example["id"]] = best_answer["text"]
else:
answer = (
best_answer["text"] if best_answer["score"] > min_null_score else ""
)
predictions[example["id"]] = answer
return predictions
# -
# And we can apply our post-processing function to our raw predictions:
final_predictions = postprocess_qa_predictions(
datasets["validation"],
validation_features,
raw_predictions["start_logits"],
raw_predictions["end_logits"],
)
# Then we can load the metric from the datasets library.
metric = load_metric("squad_v2" if squad_v2 else "squad")
# Then we can call compute on it. We just need to format predictions and labels a bit as it expects a list of dictionaries and not one big dictionary. In the case of squad_v2, we also have to set a `no_answer_probability` argument (which we set to 0.0 here as we have already set the answer to empty if we picked it).
if squad_v2:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0}
for k, v in final_predictions.items()
]
else:
formatted_predictions = [
{"id": k, "prediction_text": v} for k, v in final_predictions.items()
]
references = [
{"id": ex["id"], "answers": ex["answers"]} for ex in datasets["validation"]
]
metric.compute(predictions=formatted_predictions, references=references)
# If you ran the callback above, you can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:
#
# ```python
# from transformers import TFAutoModelForQuestionAnswering
#
# model = TFAutoModelForQuestionAnswering.from_pretrained("your-username/my-awesome-model")
# ```
|
examples/question_answering-tf.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: atari1.0_kernel
# language: python
# name: atari1.0
# ---
# + [markdown] colab_type="text" id="LN0nZwyMGadB"
# # Sonic The Hedgehog 1 with dqn
#
# ## Step 1: Import the libraries
# +
import time
import retro
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from IPython.display import clear_output
import math
# %matplotlib inline
# -
import sys
sys.path.append('../../')
from algos.agents.dqn_agent import DQNAgent
from algos.models.dqn_cnn import DQNCnn
from algos.preprocessing.stack_frame import preprocess_frame, stack_frame
# + [markdown] colab_type="text" id="tfo8jleHGadK"
# ## Step 2: Create our environment
#
# Initialize the environment in the code cell below.
#
# -
env = retro.make(game='SonicTheHedgehog-Genesis', state='GreenHillZone.Act1', scenario='contest')
env.seed(0)
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device: ", device)
# + [markdown] colab_type="text" id="nS221MgXGadP"
# ## Step 3: Viewing our Enviroment
# -
print("The size of frame is: ", env.observation_space.shape)
print("No. of Actions: ", env.action_space.n)
env.reset()
plt.figure()
plt.imshow(env.reset())
plt.title('Original Frame')
plt.show()
possible_actions = {
# No Operation
0: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# Left
1: [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# Right
2: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
# Left, Down
3: [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
# Right, Down
4: [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
# Down
5: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
# Down, B
6: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
# B
7: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
}
# ### Execute the code cell below to play Pong with a random policy.
def random_play():
score = 0
env.reset()
for i in range(200):
env.render()
action = possible_actions[np.random.randint(len(possible_actions))]
state, reward, done, _ = env.step(action)
score += reward
if done:
print("Your Score at end of game is: ", score)
break
env.reset()
env.render(close=True)
random_play()
# + [markdown] colab_type="text" id="Sr52nmcpGada"
# ## Step 4:Preprocessing Frame
# -
plt.figure()
plt.imshow(preprocess_frame(env.reset(), (1, -1, -1, 1), 84), cmap="gray")
plt.title('Pre Processed image')
plt.show()
# + [markdown] colab_type="text" id="mJMc3HA8Gade"
# ## Step 5: Stacking Frame
# -
def stack_frames(frames, state, is_new=False):
frame = preprocess_frame(state, (1, -1, -1, 1), 84)
frames = stack_frame(frames, frame, is_new)
return frames
# ## Step 6: Creating our Agent
# +
INPUT_SHAPE = (4, 84, 84)
ACTION_SIZE = len(possible_actions)
SEED = 0
GAMMA = 0.99 # discount factor
BUFFER_SIZE = 100000 # replay buffer size
BATCH_SIZE = 32 # Update batch size
LR = 0.0001 # learning rate
TAU = 1e-3 # for soft update of target parameters
UPDATE_EVERY = 100 # how often to update the network
UPDATE_TARGET = 10000 # After which thershold replay to be started
EPS_START = 0.99 # starting value of epsilon
EPS_END = 0.01 # Ending value of epsilon
EPS_DECAY = 100 # Rate by which epsilon to be decayed
agent = DQNAgent(INPUT_SHAPE, ACTION_SIZE, SEED, device, BUFFER_SIZE, BATCH_SIZE, GAMMA, LR, TAU, UPDATE_EVERY, UPDATE_TARGET, DQNCnn)
# -
# ## Step 7: Watching untrained agent play
env.viewer = None
# watch an untrained agent
state = stack_frames(None, env.reset(), True)
for j in range(200):
env.render(close=False)
action = agent.act(state, eps=0.01)
next_state, reward, done, _ = env.step(possible_actions[action])
state = stack_frames(state, next_state, False)
if done:
env.reset()
break
env.render(close=True)
# ## Step 8: Loading Agent
# Uncomment line to load a pretrained agent
start_epoch = 0
scores = []
scores_window = deque(maxlen=20)
# ## Step 9: Train the Agent with DQN
# +
epsilon_by_epsiode = lambda frame_idx: EPS_END + (EPS_START - EPS_END) * math.exp(-1. * frame_idx /EPS_DECAY)
plt.plot([epsilon_by_epsiode(i) for i in range(1000)])
# -
def train(n_episodes=1000):
"""
Params
======
n_episodes (int): maximum number of training episodes
"""
for i_episode in range(start_epoch + 1, n_episodes+1):
state = stack_frames(None, env.reset(), True)
score = 0
eps = epsilon_by_epsiode(i_episode)
# Punish the agent for not moving forward
prev_state = {}
steps_stuck = 0
timestamp = 0
while timestamp < 10000:
action = agent.act(state, eps)
next_state, reward, done, info = env.step(possible_actions[action])
score += reward
timestamp += 1
# Punish the agent for standing still for too long.
if (prev_state == info):
steps_stuck += 1
else:
steps_stuck = 0
prev_state = info
if (steps_stuck > 20):
reward -= 1
next_state = stack_frames(state, next_state, False)
agent.step(state, action, reward, next_state, done)
state = next_state
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
clear_output(True)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
print('\rEpisode {}\tAverage Score: {:.2f}\tEpsilon: {:.2f}'.format(i_episode, np.mean(scores_window), eps), end="")
return scores
scores = train(1000)
# ## Step 10: Watch a Smart Agent!
env.viewer = None
# watch an untrained agent
state = stack_frames(None, env.reset(), True)
for j in range(10000):
env.render(close=False)
action = agent.act(state, eps=0.91)
next_state, reward, done, _ = env.step(possible_actions[action])
state = stack_frames(state, next_state, False)
if done:
env.reset()
break
env.render(close=True)
|
cgames/05_sonic/sonic_dqn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np # to handle matrix and data operation
import pandas as pd # to read csv and handle dataframe
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.autograd import Variable
from sklearn.metrics import roc_auc_score, f1_score
from sklearn.model_selection import train_test_split
from torchnlp.word_to_vector import FastText
from torch.nn.utils.rnn import pad_sequence
# -
model_save_location = "TCM_7.pt"
# +
vectors = FastText()
device = torch.device("cuda:0")
BATCH_SIZE = 96
# +
def load_data():
'''Load dataset, data cleaned using Kaggle method.'''
df = pd.read_csv("toxic-train-kaggle-clean.csv")
df["word_splits"] = df["word_splits"].apply(eval)
df = df[(df["word_splits"].apply(len) > 0) & (df["word_splits"].apply(len) <= 560)]
X_train, X_test, y_train, y_test = train_test_split(df["word_splits"], df.drop("word_splits", axis=1), random_state=99, test_size=0.15)
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
batched_X_train = []
batched_y_train = []
i=0
while (i+1) * BATCH_SIZE < len(X_train):
batched_X_train.append(X_train[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
batched_y_train.append(y_train[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
i+=1
batched_X_train.append(X_train[i*BATCH_SIZE:])
batched_y_train.append(y_train[i*BATCH_SIZE:])
batched_X_test = []
batched_y_test = []
del X_train
del y_train
i=0
while (i+1) * BATCH_SIZE < len(X_test):
batched_X_test.append(X_test[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
batched_y_test.append(y_test[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
i+=1
batched_X_test.append(X_test[i*BATCH_SIZE:])
batched_y_test.append(y_test[i*BATCH_SIZE:])
del X_test
del y_test
return batched_X_train, batched_y_train, batched_X_test, batched_y_test
batched_X_train, batched_y_train, batched_X_test, batched_y_test = load_data()
# +
class ToxicClassifierModel(nn.Module):
def __init__(self,
LSTM_UNITS = 128,
dropout_rate = 0.2,
hidden1Activation = F.relu,
hidden2Activation = F.relu,
#hidden1Size = 512,
#hidden2Size = 512
):
super(ToxicClassifierModel, self).__init__()
self.dropout_rate = dropout_rate
self.BiGRU = nn.GRU(300, hidden_size = LSTM_UNITS, bidirectional=True, num_layers=1)
self.BiRNN = nn.RNN(input_size = 2 * LSTM_UNITS, hidden_size = LSTM_UNITS, bidirectional=True)
self.hidden1 = nn.Linear(4 * LSTM_UNITS, 4 * LSTM_UNITS)
self.hidden1Activation = hidden1Activation
self.hidden2 = nn.Linear(4 * LSTM_UNITS, 4 * LSTM_UNITS)
self.hidden2Activation = hidden2Activation
self.hidden3 = nn.Linear(4 * LSTM_UNITS, 6)
def forward(self, X):
X = X.permute(0, 2, 1)
X = F.dropout2d(X, self.dropout_rate, training=self.training)
X = X.permute(0, 2, 1)
X = self.BiGRU(X)
X = self.BiRNN(X[0])
X = X[0]
X = torch.cat((torch.max(X, 1).values, torch.mean(X, 1)), 1)
X = X.add(self.hidden1Activation(self.hidden1(X)))
X = X.add(self.hidden2Activation(self.hidden2(X)))
X = torch.sigmoid(self.hidden3(X))
return X
class ToxicClassifierFitter():
def __init__(self,
optimizer,
error,
model,
vectors,
device,
EPOCHS = 2,
seed_acc = 0.5,
save_checkpoint = True,
model_save_location = model_save_location
):
self.optimizer = optimizer
self.error = error
self.model = model
self.EPOCHS = EPOCHS
self.acc = seed_acc
self.vectors = vectors
self.device = device
self.model_save_location = model_save_location
self.save_checkpoint = save_checkpoint
def accuracy(self, batched_X_test, batched_y_test):
correct = 0
for batch_idx, (X_batch, y_batch) in enumerate(zip(batched_X_test, batched_y_test)):
var_X_batch = Variable(torch.nn.utils.rnn.pad_sequence([ self.vectors[X] for X in X_batch]).permute(1,0,2)).float().to(self.device)
var_y_batch = Variable(torch.from_numpy(y_batch)).float().to(self.device)
output = self.model(var_X_batch)
# Total correct predictions
predicted = output.data.round()
correct += (predicted == var_y_batch).sum()
del var_X_batch
del var_y_batch
del output
del predicted
torch.cuda.empty_cache()
self.acc = float(correct*100) / float(6 * BATCH_SIZE * len(batched_X_test))
return self.acc
def F1Score(self, batched_X_test, batched_y_test):
preds = []
truePreds = []
for batch_idx, (X_batch, y_batch) in enumerate(zip(batched_X_test, batched_y_test)):
var_X_batch = Variable(torch.nn.utils.rnn.pad_sequence([ vectors[X] for X in X_batch]).permute(1,0,2)).float().to(device)
var_y_batch = Variable(torch.from_numpy(y_batch)).float().to(device)
output = self.model(var_X_batch)
preds = preds + [ round(float(x)) for X in output.data for x in X ]
truePreds = truePreds + [ round(float(x)) for X in var_y_batch for x in X ]
del var_X_batch
del var_y_batch
del output
torch.cuda.empty_cache()
return f1_score(truePreds, preds)
def predict(self, batched_X_test, batched_y_test):
correct = 0
for batch_idx, (X_batch, y_batch) in enumerate(zip(batched_X_test, batched_y_test)):
var_X_batch = Variable(torch.nn.utils.rnn.pad_sequence([ self.vectors[X] for X in X_batch]).permute(1,0,2)).float().to(self.device)
var_y_batch = Variable(torch.from_numpy(y_batch)).float().to(self.device)
output = self.model(var_X_batch)
# Total correct predictions
predicted = output.data.round()
del var_X_batch
del var_y_batch
del output
del predicted
torch.cuda.empty_cache()
return predicted
def fit(self, batched_X_train, batched_y_train, batched_X_test = None, batched_y_test = None):
for epoch in range(self.EPOCHS):
correct = 0
for batch_idx, (X_batch, y_batch) in enumerate(zip(batched_X_train, batched_y_train)):
var_X_batch = Variable(torch.nn.utils.rnn.pad_sequence([ self.vectors[X] for X in X_batch]).permute(1,0,2)).float().to(self.device)
var_y_batch = Variable(torch.from_numpy(y_batch)).float().to(self.device)
self.optimizer.zero_grad()
output = self.model(var_X_batch)
loss = self.error(output, var_y_batch)
loss.backward()
self.optimizer.step()
# Total correct predictions
predicted = output.data.round()
correct += (predicted == var_y_batch).sum()
#print(correct)
if batch_idx % 50 == 0:
print('Epoch : {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy:{:.3f}%'.format(
epoch, batch_idx*len(X_batch), len(batched_X_train), 100.*batch_idx / len(batched_X_train), loss.data, float(correct*100) / float(6 * BATCH_SIZE*(batch_idx+1))))
del var_X_batch
del var_y_batch
del loss
del output
del predicted
torch.cuda.empty_cache()
torch.cuda.empty_cache()
if self.save_checkpoint:
acc2 = self.accuracy(batched_X_test, batched_y_test)
print("Validation accuracy Score:", acc2)
if acc2 > self.acc:
print("Saving best model...")
torch.save(self.model.state_dict(), self.model_save_location)
self.acc = acc2
def fitF1(self, batched_X_train, batched_y_train, batched_X_test = None, batched_y_test = None):
for epoch in range(self.EPOCHS):
preds = []
truePreds = []
for batch_idx, (X_batch, y_batch) in enumerate(zip(batched_X_train, batched_y_train)):
var_X_batch = Variable(torch.nn.utils.rnn.pad_sequence([ self.vectors[X] for X in X_batch]).permute(1,0,2)).float().to(self.device)
var_y_batch = Variable(torch.from_numpy(y_batch)).float().to(self.device)
self.optimizer.zero_grad()
output = self.model(var_X_batch)
loss = self.error(output, var_y_batch)
loss.backward()
self.optimizer.step()
preds = preds + [ round(float(x)) for X in output.data for x in X ]
truePreds = truePreds + [ round(float(x)) for X in var_y_batch for x in X ]
if batch_idx % 50 == 0:
print('Epoch : {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t F1:{:.3f}%'.format(
epoch, batch_idx*len(X_batch), len(batched_X_train), 100.*batch_idx / len(batched_X_train), loss.data, f1_score(truePreds, preds)))
del var_X_batch
del var_y_batch
del loss
del output
torch.cuda.empty_cache()
torch.cuda.empty_cache()
if self.save_checkpoint:
acc2 = self.F1Score(batched_X_test, batched_y_test)
print("Validation F1 Score:", acc2)
if acc2 > self.acc:
print("Saving best model...")
torch.save(self.model.state_dict(), self.model_save_location)
self.acc = acc2
def createFitter(LSTM_UNITS = 128,
#hidden1Size = 512,
#hidden2Size = 512,
dropout_rate = 0.2,
hidden1Activation = F.relu,
hidden2Activation = F.relu,
learning_rate=0.001,
beta_1 = 0.9,
beta_2 = 0.999,
amsgrad=False,
weight_decay=0,
epochs = 1,
model_save_location=model_save_location,
vectors=vectors
):
# get device
model = ToxicClassifierModel(LSTM_UNITS = LSTM_UNITS,
dropout_rate = dropout_rate,
hidden1Activation = hidden1Activation,
hidden2Activation = hidden2Activation,
#hidden1Size = hidden1Size,
#hidden2Size = hidden2Size
)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(beta_1,beta_2), amsgrad=amsgrad, weight_decay=weight_decay)
error = nn.BCELoss()
# return final fitter
return ToxicClassifierFitter(optimizer, error,
model,
vectors,
device,
EPOCHS = epochs,
model_save_location = model_save_location,
save_checkpoint = True)
# +
activation_dict = {'relu':F.relu
,'leaky_relu':F.leaky_relu
,'softmax':F.softmax
,'selu':F.selu
,'tanh':F.tanh
,'sigmoid':torch.sigmoid
,'elu':F.elu
}
TCMFitter = createFitter(
LSTM_UNITS = 128,
dropout_rate = 0.2,
hidden1Activation = activation_dict["relu"],
hidden2Activation = activation_dict["sigmoid"],
learning_rate = 1e-3, #0.002683035186257151,
amsgrad = False,
weight_decay = 0,
epochs = 20
)
# -
TCMFitter.model.load_state_dict(torch.load("TCM_2.pt"))
TCMFitter.fitF1(batched_X_train, batched_y_train, batched_X_test, batched_y_test)
torch.save(TCMFitter.model.state_dict(), "TCM_4.pt")
torch.cuda.empty_cache()
|
model files/nni/RNN Hyperparameter Tuning/TunedKaggleModel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from lib.utilities import *
# %matplotlib inline
# +
############### User Configuration ###############
##################################################
datadir = "." # All ZDA files in this directory + subdirectories will be loaded
selected_filename = "07-09-21_04_01" # focus for subsequent analysis
force_reload = True # Set to True if data in zda_targets dir has changed
# +
############## Driver script: begin ##############
##################################################
# Load data
if force_reload or (data_loader is None) or data_loader.get_n_files_loaded() < 1:
data_loader = DataLoader()
data_loader.load_all_zda(data_dir=datadir + "/zda_targets")
print('Number of files loaded:', data_loader.get_n_files_loaded())
# Select data of interest
selected_data = data_loader.select_data_by_keyword(selected_filename)
# Clip to the time range 20 - 90 ms (get rid of the camera "foot", look at prestim only)
if selected_data is not None:
selected_data.clip_data(t_range=[120,-1])
raw_data, meta, rli = selected_data.get_data(trial=0), selected_data.get_meta(), selected_data.get_rli()
# Need to subtract off the low-frequency voltage drift. First-order correction
tr = Tracer()
tr.correct_background(meta, raw_data, trial_dim=False)
#full trace
tr.plot_trace(raw_data[:,:,:],
40,
40,
meta['interval_between_samples'])
else:
print("Filename containing '" + selected_filename + "' not found. \n\tCheck zda_targets folder and/or set force_reload=True")
# +
# Frequency Decompositions
freq_analzyer = FreqAnalyzer()
x_fft = freq_analzyer.compute_fft_binning(meta)
y_fft_avg = None
n = raw_data.shape[0] * raw_data.shape[1]
for x in range(raw_data.shape[0]):
for y in range(raw_data.shape[1]):
y_fft = freq_analzyer.decompose_trace_frequencies(meta,
raw_data[x,y,:],
x_fft=x_fft,
lower_freq=0,
upper_freq=50,
y_max=2000,
plot=False)
if y_fft_avg is None:
y_fft_avg = y_fft / n
else:
y_fft_avg += y_fft / n
# -
# Mean frequency decomposition (averaged by frequency over all pixels)
plt.plot(x_fft,
np.abs(y_fft_avg))
plt.xlim([1, 400])
plt.ylim([1150,2000])
plt.title("Average Frequency Decomposition: All Pixels")
plt.show()
plt.savefig(selected_filename + "_avg_freq.jpg")
# +
# Average only for high-SNR and/or ROIs
trial = selected_data.get_data(trial=0)
asnr = AnalyzerSNR(trial)
snr = asnr.get_snr(plot=True)
print("max SNR:", np.max(snr),
"\nmin SNR:", np.min(snr))
# -
# cluster by SNR and display
asnr.cluster_on_snr(plot=True)
# k=0 is highest SNR cluster (indices returned)
avg_snr_by_cluster = asnr.get_average_snr_by_cluster()
highest_snr_cluster = asnr.get_kth_cluster(0, plot=True)
# Frequency Decompositions for 3 highest-SNR clusters
for k in range(3):
snr_cluster = asnr.get_kth_cluster(k, plot=False)
if(snr_cluster.shape[1] < 1): # cluster is empty
continue
freq_analzyer = FreqAnalyzer()
x_fft = freq_analzyer.compute_fft_binning(meta)
y_fft_avg = None
n = raw_data.shape[0] * raw_data.shape[1]
for i in range(snr_cluster.shape[1]):
x = snr_cluster[0][i]
y = snr_cluster[1][i]
y_fft = freq_analzyer.decompose_trace_frequencies(meta,
raw_data[x,y,:],
x_fft=x_fft,
lower_freq=0,
upper_freq=50,
y_max=2000,
plot=False)
if y_fft_avg is None:
y_fft_avg = y_fft / n
else:
y_fft_avg += y_fft / n
# Mean frequency decomposition (averaged by frequency over all pixels)
plt.plot(x_fft,
np.abs(y_fft_avg),
label=str(k+1) +"th-highest SNR cluster")
plt.xlim([10, 400])
plt.ylim([10,400])
plt.xlabel("Frequency (Hz)")
plt.ylabel("Power")
plt.legend()
plt.title("Average Frequency Decomposition by SNR Cluster")
plt.show()
plt.savefig(selected_filename + "_all_freq.jpg")
|
sample_analysis_freq.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Supplementary Figure - Marginal Emissions
# <NAME>, 2021.
import os
os.chdir('../')
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from matplotlib.gridspec import GridSpec
import pickle
from simple_dispatch import bidStack
import matplotlib
import scipy
from future_grid import FutureDemand
unit_drops = pd.read_csv('IntermediateOutputs/scheduled_retirements_2019.csv', index_col=0)
additions_df = pd.read_csv('IntermediateOutputs/generator_additions.csv', index_col=0)
import copy
def drop_add_generators(future_year, gd_short_init, unit_drops=None, additions_df=None, drop=True, add=True):
gd_short_final = copy.deepcopy(gd_short_init)
if add:
added_units = additions_df[additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(added_units):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df = pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
gd_short_final.df.loc[idx, 'orispl_unit'] = 'added_'+str(i)
if drop:
dropped_units = unit_drops[unit_drops['retirement_year']<future_year]['orispl_unit'].values
gd_short_final.df = gd_short_final.df[~gd_short_final.df['orispl_unit'].isin(dropped_units)].copy(deep=True).reset_index(drop=True)
return gd_short_final
year = 2035; solar = 3.5; wind = 3; run_year=2019
gd_short = pickle.load(open('IntermediateOutputs/generator_data_short_%s_%s.obj'%('WECC', str(run_year)), 'rb'))
gd_short_copy = drop_add_generators(year, gd_short, drop=True, unit_drops=unit_drops, add=True, additions_df=additions_df)
# # Plotting
# +
def set_up_generator(year=2035, fuel=1.0, gd_short_copy=None):
if gd_short_copy is not None:
return gd_short_copy
else:
if year != 2019:
gd_short_copy = drop_add_generators(year, gd_short, drop=True, unit_drops=unit_drops, add=True, additions_df=additions_df)
else:
gd_short_copy = copy.deepcopy(gd_short)
return gd_short_copy
def set_up_scenario(year=2035, solar=3.5, wind=3, fuel=1.0, ev_pen=1.0,
ev_scenario='HighHome', ev_timers='', ev_workplace_control='',
ev_workplace_bool=False, evs_bool=True, ev_scenario_date='20220408', gd_short_copy=None):
gd_short_copy = set_up_generator(year=year, fuel=fuel, gd_short_copy=gd_short_copy)
future = FutureDemand(gd_short_copy, year=year)
if year != 2019:
future.electrification(scale_vs_given=True)
future.solar_multiplier[year] = solar
future.wind_multiplier[year] = wind
future.solar()
future.wind()
if evs_bool:
if ev_workplace_bool:
future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, wp_control=ev_workplace_control, scenario_date=ev_scenario_date)
else:
future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, scenario_date=ev_scenario_date)
future.update_total()
return gd_short_copy, future
# -
solar=3.5
wind=3
fuel=1
ev_scenario='BusinessAsUsual'
ev_timers=''
penlevel=0.5
ev_scenario_date='20220313'
gd_short_copy, future = set_up_scenario(year=2035, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control='minpeak', ev_workplace_bool=True, evs_bool=True,
gd_short_copy=None, ev_scenario_date=ev_scenario_date)
future.demand['demand'] = future.demand['demand'].clip(0, 1e10)
# timer_names = {'':'Timers9pm', '_midnighttimers':'Timers12am', '_NoTimers':'TimersNone'}
save_str = 'Results/Fuel1_Solar35_Wind3/fuel'+str(fuel)+'_solar'+str(solar)+'_wind'+str(wind)+'_'+ev_scenario+'_'+'TimersMixed'+'_WPcontrol_minpeak'+'_penlevel'+str(penlevel)+'_storagebefore'
storage_before = pd.read_csv(save_str+'_storagebeforedf_'+'20220408'+'.csv', index_col=0)
dpdf = pd.read_csv(save_str+'_withstorage_dpdf_'+'20220408'+'.csv')
test_dpdf = copy.deepcopy(dpdf)
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
ax2 = ax.twinx()
labels1 = ['Weekday', 'Weekend']
lines = ['-', '--', ':', '-.']
for i, weekdays in enumerate([[0, 1, 2, 3, 4], [5, 6]]):
subset = test_dpdf[pd.to_datetime(test_dpdf['datetime']).dt.weekday.isin(weekdays)]
ax.plot(np.reshape(subset['co2_tot'].values / subset['total_incl_noncombustion'].values, (-1, 24)).mean(axis=0), linestyle=lines[i], color='#a50026', label=labels1[i])
ax2.plot(np.reshape(subset['co2_marg'].values, (-1, 24)).mean(axis=0), linestyle=lines[i], color='#313695', label=labels1[i])
if i == 0:
ax.fill_between(np.arange(0, 24), np.percentile(np.reshape(subset['co2_tot'].values / subset['total_incl_noncombustion'].values, (-1, 24)), 25, axis=0), np.percentile(np.reshape(subset['co2_tot'].values / subset['total_incl_noncombustion'].values, (-1, 24)), 75, axis=0), alpha=0.2, color='#a50026')
ax2.fill_between(np.arange(0, 24), np.percentile(np.reshape(subset['co2_marg'].values, (-1, 24)), 25, axis=0), np.percentile(np.reshape(subset['co2_marg'].values, (-1, 24)), 75, axis=0), alpha=0.2, color='#313695')
legend_elements1 = [Line2D([0], [0], color='#a50026', lw=4, label='Average Emissions'),
Line2D([0], [0], color='#313695', lw=4, label='Marginal Emissions'),
Patch(facecolor='grey', edgecolor='grey', alpha=0.2, label='25-75th percentile range')]
legend_elements2 = [Line2D([0], [0], color='k', linestyle=lines[0], lw=2, label='Weekday'),
Line2D([0], [0], color='k', linestyle=lines[1], lw=2, label='Weekend')]
ax2.legend(handles=legend_elements1, loc='lower left', fontsize=12, framealpha=1.0, fancybox=True)
ax.legend(handles=legend_elements2, loc='upper right', fontsize=12, framealpha=1.0, fancybox=True)
ax.set_ylabel('Average Emissions [kg CO2 / MWh]', fontsize=14)
ax2.set_ylabel('Marginal Emissions [kg CO2 / MWh]', fontsize=14)
ax.set_yticks([100, 150, 200, 250, 300, 350])
ax.set_yticklabels([100, 150, 200, 250, 300, 350], fontsize=12)
ax2.set_yticks([350, 400, 450, 500, 550, 600, 650, 700, 750])
ax2.set_yticklabels([350, 400, 450, 500, 550, 600, 650, 700, 750], fontsize=12)
ax.set_xlim([0, 24]); ax.set_xticks(np.arange(0, 24, 1)); ax.set_xticklabels(np.arange(0, 24, 1), fontsize=12)
ax2.set_xlim([0, 24]); ax2.set_xticks(np.arange(0, 24, 1))
ax.set_xlabel('Hour of day', fontsize=14)
plt.tight_layout()
plt.savefig('SupplementPlotting/Plots/supfig10_2035.pdf', bbox_inches='tight')
plt.show()
|
GridModel_GridImpact/SupplementPlotting/supfig10_mefs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
weather = pd.read_csv("../weather3_180703.csv")
weather['date'] = pd.to_datetime(weather["date"])
station = weather[weather['station_nbr'] == 1]
station.tail()
train = pd.read_csv("../data/train.csv")
train.date = pd.to_datetime(train.date)
train.tail()
key = pd.read_csv("../data/key.csv")
station = station.merge(key)
station = station.merge(train)
station.tail()
station['log1p_units'] = np.log1p(station.units)
# units가 다 0인 item_number 골라내기
stn_real = pd.DataFrame(columns = station.columns)
station["item_nbr"] = station["item_nbr"].astype("str")
item_number = station.groupby("item_nbr")["units"].agg(sum)
stn_item = item_number[item_number != 0].index
stn_real = pd.DataFrame(columns = station.columns)
for i in range(len(stn_item)):
stn_real = pd.concat([stn_real, station[station["item_nbr"] == stn_item[i]]], ignore_index=True)
# ## 전체 데이터로 다시 OLS
# +
target1 = station['units']
target2 = station['log1p_units']
station.drop(columns=['units','log1p_units'],inplace=True)
station.tail()
# -
df1 = pd.concat([station,target1], axis=1)
df2 = pd.concat([station,target2], axis=1)
# ##### 1. OLS : df1 (units)
model1 = sm.OLS.from_formula('units ~ tmax + tmin + tavg + dewpoint + wetbulb + heat + cool + preciptotal + stnpressure + sealevel \
+ resultspeed + C(resultdir) + avgspeed + C(year) + C(month) + relative_humility + windchill + weekend \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df1)
result1 = model1.fit()
print(result1.summary())
# ##### 2. OLS : df1 (units) - 스케일링
# - conditional number가 너무 높음.
model1_1 = sm.OLS.from_formula('units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df1)
result1_1 = model1_1.fit()
print(result1_1.summary())
# 스케일링을 했으나 conditional number가 크게 떨어지진 않았다.
# ##### 3. OLS : df1 (units) - 아웃라이어 제거
# 아웃라이어 제거
# Cook's distance > 2 인 값 제거
influence = result1.get_influence()
cooks_d2, pvals = influence.cooks_distance
fox_cr = 4 / (len(df1) - 2)
idx_outlier = np.where(cooks_d2 > fox_cr)[0]
len(idx_outlier)
idx = list(set(range(len(df1))).difference(idx_outlier))
df1_1 = df1.iloc[idx, :].reset_index(drop=True)
df1_1
# OLS - df1_1
model1_1_1 = sm.OLS.from_formula('units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df1_1)
result1_1_1 = model1_1_1.fit()
print(result1_1_1.summary())
# conditional number에 전혀 변화가 없다...
# ##### 4. 변수변환 : df2 (log1p_units)
model2 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df2)
result2 = model2.fit()
print(result2.summary())
# units에 log를 취하여 R square값은 올랐지만, 여전히 conditional number는 그대로. 상관관계가 높은 변수 제거해야할 거 같다.
# ##### 5. 변수변환 : df2 (log1p_units) + 아웃라이어 제거
# 아웃라이어 제거
# Cook's distance > 2 인 값 제거
influence = result2.get_influence()
cooks_d2, pvals = influence.cooks_distance
fox_cr = 4 / (len(df2) - 2)
idx_outlier = np.where(cooks_d2 > fox_cr)[0]
len(idx_outlier)
idx = list(set(range(len(df2))).difference(idx_outlier))
df2_1 = df2.iloc[idx, :].reset_index(drop=True)
df2_1
# OLS - df2_1
model2_1 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df2_1)
result2_1 = model2_1.fit()
print(result2_1.summary())
# 설명력이 더 올라갔다.(0.870), conditional number는 그대로
# ##### 6. 변수변환 : df2 (log1p_units) + 아웃라이어 제거 + preciptotal 변수변환
# +
# OLS - df2_1_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(np.log1p(preciptotal)) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr)', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# -
# 변화없음.(5번 결과와 동일)
# #### 7. result2의 잔차의 정규성 검정 : 정규성을 띄지 않음.
sp.stats.probplot(result2_1_1.resid, plot=plt)
plt.show()
# ##### 8. 다중공선성 감소시키기 : VIF
df_vif = station[['station_nbr','tmax', 'tmin', 'tavg', 'depart', 'dewpoint',
'wetbulb', 'heat', 'cool', 'sunrise', 'sunset', 'snowfall',
'preciptotal', 'stnpressure', 'sealevel', 'resultspeed', 'resultdir',
'avgspeed', 'year', 'month', 'day', 'relative_humility', 'windchill',
'daytime', 'week7', 'weekend', 'code_change', 'rainY', 'otherY',
'nothing', 'store_nbr', 'item_nbr']]
# ##### 9. 다중공선성 감소시키기 : PCA
from patsy import dmatrix
formula = "scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool) \
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ scale(avgspeed) + scale(relative_humility) + scale(windchill) + C(item_nbr) \
+ C(store_nbr) + C(weekend) + C(rainY) + C(otherY) + C(nothing) + 0"
dfX = dmatrix(formula, station, return_type="dataframe")
dfy = pd.DataFrame(target1, columns=["units"])
idx = list(set(range(len(df2))).difference(idx_outlier))
dfX = dfX.iloc[idx, :].reset_index(drop=True)
dfy = dfy.iloc[idx, :].reset_index(drop=True)
# ##### ***tmax, tmin빠져서 conditional number 감소
model_units = sm.OLS(np.log1p(dfy), dfX)
result_units = model_units.fit()
print(result_units.summary())
from sklearn.decomposition import PCA
dfX2 = sm.add_constant(pd.DataFrame(PCA(n_components=25).fit_transform(dfX)))
model_units2 = sm.OLS(np.log1p(dfy), dfX2)
result_units2 = model_units2.fit()
print(result_units2.summary())
# R square가 0.00....
# ##### 9. 다중공선성 감소시키기 : 정규화
# 6번 model 사용
# 순수 Ridge모형(L1_wt=0), 순수 lasso모형(L1_wt=1)
result2 = model2_1_1.fit_regularized(alpha=0.001, L1_wt=0)
result2.params[result2.params>0]
# units가 다 0인 item_number 골라내기
stn_real = pd.DataFrame(columns = station.columns)
station["item_nbr"] = station["item_nbr"].astype("str")
item_number = station.groupby("item_nbr")["units"].agg(sum)
stn_item = item_number[item_number != 0].index
stn_real = pd.DataFrame(columns = station.columns)
for i in range(len(stn_item)):
stn_real = pd.concat([stn_real, station[station["item_nbr"] == stn_item[i]]], ignore_index=True)
# +
# station = station[station["units"] > 0].reset_index(drop=True)
# +
# set(range(len(station)))
# len(stn_real)
influence = result.get_influence()
hat = influence.hat_matrix_diag
plt.figure(figsize=(10, 2))
plt.stem(hat)
plt.show()
# -
idx = list(set(range(len(station))).difference(idx_outlier))
df = station.iloc[idx, :].reset_index(drop=True)
df
model = sm.OLS.from_formula('log1p_units ~ scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool) \
+ scale(np.log1p(preciptotal)) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ scale(avgspeed) + scale(relative_humility) + scale(windchill) + C(item_nbr) \
+ C(store_nbr) + C(weekend) + C(rainY) + C(otherY) + C(nothing) - 1', data=df)
result = model.fit()
print(result.summary())
# 이분산성 확인
plt.scatter(df['heat'], result.resid)
plt.show()
sns.distplot((np.log(np.sqrt(station['tavg']))))
plt.show()
df.corr()
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(dfX.values, i) for i in range(dfX.shape[1])]
vif["features"] = dfX.columns
vif
# -
df.loc[:,1:]
df1.columns
df1_1.describe()
|
weather2_code/station1-dropZeroUnits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table width="100%"> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="35%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <NAME> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# <br>
# <NAME> | September 16, 2019 (updated) <br>
# <NAME> | September 18, 2020 (updated)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2>Rotations</h2>
# <table align="left"><tr><td><i>
# We use certain tools from python library "<b>matplotlib.pyplot</b>" for drawing.
#
# Check the notebook "<a href="../python/Python06_Drawing.ipynb" target="_blank">Python: Drawing</a>" for the list of these tools.
# </i></td></tr></table>
# We start with drawing the quantum states $ \ket{0} $ and $ \ket{+} = \myvector{ \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} } $.
# <i style="font-size:10pt;">
# Our predefined function "draw_qubit()" draws a figure, the origin, the axes, the unit circle, and base quantum states.
# <br>
# Our predefined function "draw_quantum_state(x,y,name)" draws an arrow from (0,0) to (x,y) and associates it with <u>name</u>.
# <br>
# We include our predefined functions with the following line of code:
#
# # %run qlatvia.py
# </i>
# +
# %run qlatvia.py
draw_qubit()
sqrttwo=2**0.5
draw_quantum_state(1,0,"")
draw_quantum_state(1/sqrttwo,1/sqrttwo,"|+>")
# drawing the angle with |0>-axis
from matplotlib.pyplot import gca, text
from matplotlib.patches import Arc
gca().add_patch( Arc((0,0),0.4,0.4,angle=0,theta1=0,theta2=45) )
text(0.08,0.05,'.',fontsize=30)
text(0.21,0.09,'\u03C0/4')
# -
# <h3> Task 1 (Discussion) </h3>
#
# Suppose that we start in state $ \ket{0} $, and then obtain state $\ket{+}$ by applying a rotation operator.
#
# If we apply the same operator consecutively, what can be the new quantum state?
#
# Here are eight candidates:
#
# $$
# \myarray{|c|c|c|c|}{
# \hline
# A & \ket{0} = \vzero & E & \ket{+} = \myrvector{\sqrttwo \\ \sqrttwo}
# \\ \hline
# B & \ket{1} = \vone & F & \ket{-} = \myrvector{\sqrttwo \\ -\sqrttwo}
# \\ \hline
# C & -\ket{0} = \myrvector{-1 \\ 0} & G & -\ket{+} = \myrvector{-\sqrttwo \\ -\sqrttwo}
# \\ \hline
# D & -\ket{1} = \myrvector{0 \\ -1} & H & -\ket{-} = \myrvector{-\sqrttwo \\ \sqrttwo}
# \\ \hline
# }
# $$
# <h3> Rotation with degree $\pi/4$ </h3>
# Its matrix form is similar to Hadamard operator:
#
# $$ R(\pi/4) = \mymatrix{rr}{\cos(\pi/4) & -\sin(\pi/4) \\ \sin(\pi/4) & \cos(\pi/4) }
# = \mymatrix{rr}{ \sqrttwo & -\sqrttwo \\ \sqrttwo & \sqrttwo} $$.
# Let us start with state $ \ket{0} $ and apply $ R(\pi/4) $ 7 times, and draw each state on the unit circle.
# +
# %run qlatvia.py
draw_qubit()
[x,y]=[1,0]
draw_quantum_state(x,y,"v0")
sqrttwo = 2**0.5
oversqrttwo = 1/sqrttwo
R = [ [oversqrttwo, -1*oversqrttwo], [oversqrttwo,oversqrttwo] ]
# function for rotation R
def rotate(px,py):
newx = R[0][0]*px + R[0][1]*py
newy = R[1][0]*px + R[1][1]*py
return [newx,newy]
# apply rotation R 7 times
for i in range(1,8):
[x,y] = rotate(x,y)
draw_quantum_state(x,y,"|v"+str(i)+">")
# -
# <h3> Rotation with degree $\theta$ </h3>
# Recall that the matrix form of rotation is as follows:
#
# $$ R(\theta) = \mymatrix{rr}{\cos(\theta) & -\sin(\theta) \\ \sin(\theta) & \cos(\theta) }, $$
#
# where $ \theta $ is the angle of rotation (in counter-clockwise direction).
# <h3> Rotations with ry-gate </h3>
# **Technical Remark**
#
# Even though, we focus on only real-valued quantum systems in this tutorial, the quantum state of a qubit is represented by 2-dimensional complex-valued vector in general. To visually represent a complex number, we use two dimensions. So, to visually represent the state of a qubit, we use four dimensions.
#
# On the other hand, we can still visualize any state of a qubit. Recall that this representation is called as <i>Bloch sphere</i>.
#
# The rotation operators over a single (complex-valued) qubit are defined on Bloch sphere. The names of gates "x", "y", or "z" refer to the axes on Bloch sphere. When we focus on real-valued qubit, then we should be careful about the parameter(s) that a gate takes.
#
# In qiskit, ry-gate makes a rotation around $y$-axis with the given angle, say $\theta$, on Bloch sphere.
#
# $ Ry(\theta)=\mymatrix{rr}{\cos(\theta/2) & - \sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) }$
#
#
# This refers to a rotation in our real-valued $\ket{0}$-$\ket{1}$ plane with angle $ \frac{\theta}{2} $. Therefore, <b>we should provide the twice of the desired angle in this tutorial.</b>
#
# If our angle is $ \theta $ radians, then we pass $ 2 \theta $ radians as the parameter to ry-gate.
#
# Then ry-gate implements the rotation with angle $\theta$.
#
# The default direction of a rotation by ry-gate is counterclockwise.
#
# ``` python
# mycircuit.ry(angle_of_rotation,quantum_register)
# ```
# <h3> Example </h3>
#
# We implement the rotation with angle $ \pi/4 $ in qiskit.
#
# We start with state $\ket{0}$, and apply ry-gate 8 times.
#
# After each iteration, <b>we read the quantum state from the local simulator</b>, and then draw it.
#
# +
# %run qlatvia.py
draw_qubit()
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
# we define a quantum circuit with one qubit and one bit
qreg1 = QuantumRegister(1) # quantum register with a single qubit
creg1 = ClassicalRegister(1) # classical register with a single bit
mycircuit1 = QuantumCircuit(qreg1,creg1) # quantum circuit with quantum and classical registers
rotation_angle = pi/4
for i in range(1,9):
mycircuit1.ry(2*rotation_angle,qreg1[0])
# the following code is used to get the quantum state of the quantum register
job = execute(mycircuit1,Aer.get_backend('statevector_simulator'),optimization_level=0)
current_quantum_state=job.result().get_statevector(mycircuit1)
print("iteration",i,": the quantum state is",current_quantum_state)
x_value = current_quantum_state[0].real # get the amplitude of |0>
y_value = current_quantum_state[1].real # get the amplitude of |1>
draw_quantum_state(x_value,y_value,"|v"+str(i)+">")
# -
# <h3> Task 2 </h3>
#
# Repeat the same example 12 times for angle $ \pi/6 $.
#
# Repeat the same example 16 times for angle $ 3\pi/8 $.
#
# Repeat the same example 20 times for angle $ \sqrt{2}\pi $.
#
# your code is here
#
# <h3> Task 3 (discussion) </h3>
#
# Let define a rotation angle as $ \theta_\alpha = \alpha \cdot (2\pi) $.
#
# When starting in state $ \ket{0} $ and applying the rotation with angle $ \theta_\alpha $ arbitrarily many times, the state $ \ket{0} $ can be visited again or not?
# - For which values of $\alpha$ will the state $\ket{0}$ be visited?
# - For which values of $\alpha$ will the state $\ket{0}$ never be visited?
# <hr>
# Remember the unitary backend.
#
# Unitary_simulator gives a unitary representation of all gates in the circuit until this point.
#
# ``` python
# job = execute(circuit, Aer.get_backend('unitary_simulator'))
# current_unitary = job.result().get_unitary(circuit, decimals=3)
# print(current_unitary)
# ```
#
#
# <h3> Task 4 </h3>
#
# Create a quantum circuit with one qubit and one bit.
#
# Rotate the qubit with angle $ \pi/4 $ eight times.
#
# After each rotation, print the unitary operator representing the whole rotations until this point.
#
# Verify your results by printing the rotation matrix in python.
# +
#
# your code is here
#
# -
# <a href="B64_Rotations_Solutions.ipynb#task4">click for our solution</a>
# <h3>Task 5 [Extra]</h3>
#
# In this task, you are going to prove that two consecutive reflections is a rotation.
#
# Multiply the matrices for the two operators $\mathrm{Ref}(\theta_{1})$ and $\mathrm{Ref}(\theta_{2})$ and prove that the effect of two consecutive reflections (first over $\theta_1$ and then over $\theta_2$) is a rotation with angle $2(\theta_2 - \theta_{1})$.
#
# Hint: The following formula will be useful.
#
# $
# \begin{align*}
# \sin(a+b)&=\sin(a)\cos(b)+\cos(a)\sin(b)\\
# \sin(a-b)&=\sin(a)\cos(b)-\cos(a)\sin(b)\\
# \cos(a+b)&=\cos(a)\cos(b)-\sin(a)\sin(b)\\
# \cos(a-b)&=\cos(a)\cos(b)+\sin(a)\sin(b)
# \end{align*}
# $
# <a href="B64_Rotations_Solutions.ipynb#task5">click for our solution</a>
# <h3> Task 6 [Extra] </h3>
#
# We randomly pick an angle $ \theta \in [0,2\pi) $.
#
# We have two separate qubits initially set to state $ \ket{0} $.
#
# The first qubit is rotated by the angle $ \theta $ and the second qubit is rotated by the angle $ \theta + \pi/2 $.
#
# Implement each qubit and its rotation as a separate quantum circuit.
#
# Then, read both quantum states and calculate their dot product.
#
# Check the result of the dot product for different random angles.
#
# your code is here
#
# <a href="B64_Rotations_Solutions.ipynb#task6">click for our solution</a>
# <h3> Task 7 [Extra]</h3>
#
# We randomly pick an angle $ \theta \in [0,2\pi) $.
#
# We have a single qubit initially set to state $ \ket{0} $.
#
# The qubit is rotated by the angle either $ \theta_1 = \theta $ or $ \theta_2 = \theta-\pi/2 $.
#
# You are allowed to do one more rotation $ \theta' $ and then make a measurement.
#
# Can you determine the angle of the first rotation angle by looking/using the measurement result? Is it $ \theta_1 $ or $ \theta_2 $?
#
# Check your solution for different random angles.
# +
from random import randrange
from math import pi
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
random_angle = randrange(3600)/10
rotation_angle1 = random_angle/360*2*pi
rotation_angle2 = rotation_angle1 - pi/2
# we define a quantum circuit with one qubit and one bit
q = QuantumRegister(1) # quantum register with a single qubit
c = ClassicalRegister(1) # classical register with a single bit
qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers
if randrange(2)==0:
qc.ry(2 * rotation_angle1,q[0])
picked_angle = "theta1"
else:
qc.ry(2 * rotation_angle2,q[0])
picked_angle = "theta2"
#
# your code is here
#
your_guess = ""
######################
print("your guess is",your_guess)
print("picked_angle is",picked_angle)
# -
# <a href="B64_Rotations_Solutions.ipynb#task7">click for our solution</a>
# <hr>
#
# ### Initializing a qubit with ry-gate
#
# Rotations can be used to initialize qubits too. Default initialization state of qubits in qiskit is $\ket{0}$. Rotations can be applied to such qubits to have a desired arbitrary initialization. Let us create a circuit with a single qubit and set its state to $ \ket{v} = \myvector{\cos \theta \\ \sin \theta} $ where $\theta= \dfrac{2 \pi}{3}$.
# +
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
# we define a quantum circuit with one qubit and one bit
qreg1 = QuantumRegister(1) # quantum register with a single qubit
creg1 = ClassicalRegister(1) # classical register with a single bit
mycircuit1 = QuantumCircuit(qreg1,creg1) # quantum circuit with quantum and classical registers
# angle of rotation in radian
rotation_angle = 2*pi/3
# rotate the qubit with rotation_angle
mycircuit1.ry(2*rotation_angle,qreg1[0])
# measure the qubit
mycircuit1.measure(qreg1,creg1)
# draw the circuit
mycircuit1.draw(output='mpl')
# +
# execute the program 1000 times
job = execute(mycircuit1,Aer.get_backend('qasm_simulator'),shots=1000)
# print the results
counts = job.result().get_counts(mycircuit1)
print(counts) # counts is a dictionary
# +
from math import sin,cos
# the quantum state
quantum_state = [ cos(rotation_angle) , sin (rotation_angle) ]
the_expected_number_of_zeros = 1000*cos(rotation_angle)**2
the_expected_number_of_ones = 1000*sin(rotation_angle)**2
# expected results
print("The expected value of observing '0' is",round(the_expected_number_of_zeros,4))
print("The expected value of observing '1' is",round(the_expected_number_of_ones,4))
|
bronze/B64_Rotations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] heading_collapsed=true
# # 0.0. IMPORTS
# + hidden=true
import math
import numpy as np
import pandas as pd
import inflection
import datetime
import seaborn as sns
from scipy import stats as ss
from tabulate import tabulate
from matplotlib import pyplot as plt
from IPython.display import Image
from IPython.core.display import HTML
from sklearn.preprocessing import RobustScaler, MinMaxScaler, LabelEncoder
# + [markdown] heading_collapsed=true hidden=true
# ## 0.1. Helper Functions
# + hidden=true
def cramer_v( x, y ):
cm = pd.crosstab( x, y ).values
n = cm.sum()
r, k = cm.shape
chi2 = ss.chi2_contingency( cm )[0]
chi2cor = max( 0, chi2 - (k-1)*(r-1)/(n-1))
rcor = r - ((r-1)**2)/(n-1)
kcor = k - ((k-1)**2)/(n-1)
v= np.sqrt( ( chi2cor/n ) / ( min( kcor-1, rcor-1 ) ) )
return v
def jupyter_settings():
# %matplotlib inline
# %pylab inline
plt.style.use( 'bmh' )
plt.rcParams['figure.figsize'] = [25, 12]
plt.rcParams['font.size'] = 24
display( HTML( '<style>.container { width:100% !important; }</style>') )
pd.options.display.max_columns = None
pd.options.display.max_rows = None
pd.set_option( 'display.expand_frame_repr', False )
sns.set()
# + hidden=true
jupyter_settings()
# + [markdown] heading_collapsed=true hidden=true
# ## 0.2. Loading Data
# + hidden=true
df_sales_raw = pd.read_csv( 'data/train.csv', low_memory=False )
df_store_raw = pd.read_csv( 'data/store.csv', low_memory=False )
# merge
df_raw = pd.merge( df_sales_raw, df_store_raw, how='left', on='Store' )
# + [markdown] heading_collapsed=true
# # 1.0. DESCRICAO DOS DADOS
# + hidden=true
df1 = df_raw.copy()
# + [markdown] heading_collapsed=true hidden=true
# ## 1.1. Rename Columns
# + hidden=true
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday',
'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear',
'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore( x )
cols_new = list( map( snakecase, cols_old ) )
# rename
df1.columns = cols_new
# + [markdown] heading_collapsed=true hidden=true
# ## 1.2. Data Dimensions
# + hidden=true
print( 'Number of Rows: {}'.format( df1.shape[0] ) )
print( 'Number of Cols: {}'.format( df1.shape[1] ) )
# + [markdown] heading_collapsed=true hidden=true
# ## 1.3. Data Types
# + hidden=true
df1['date'] = pd.to_datetime( df1['date'] )
df1.dtypes
# + [markdown] heading_collapsed=true hidden=true
# ## 1.4. Check NA
# + hidden=true
df1.isna().sum()
# + [markdown] heading_collapsed=true hidden=true
# ## 1.5. Fillout NA
# + code_folding=[] hidden=true
#competition_distance
df1['competition_distance'] = df1['competition_distance'].apply( lambda x: 200000.0 if math.isnan( x ) else x )
#competition_open_since_month
df1['competition_open_since_month'] = df1.apply( lambda x:
x['date'].month if math.isnan( x['competition_open_since_month'] ) else
x['competition_open_since_month'], axis=1 )
#competition_open_since_year
df1['competition_open_since_year'] = df1.apply( lambda x:
x['date'].year if math.isnan( x['competition_open_since_year'] ) else
x['competition_open_since_year'], axis=1 )
#promo2_since_week
df1['promo2_since_week'] = df1.apply( lambda x: x['date'].week if math.isnan( x['promo2_since_week'] ) else
x['promo2_since_week'], axis=1 )
#promo2_since_year
df1['promo2_since_year'] = df1.apply( lambda x: x['date'].year if math.isnan( x['promo2_since_year'] ) else
x['promo2_since_year'], axis=1 )
#promo_interval
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna( 0, inplace=True )
df1['month_map'] = df1['date'].dt.month.map( month_map )
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply( lambda x: 0 if x['promo_interval'] == 0 else
1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1 )
# + hidden=true
df1.sample(5)
# + hidden=true
df1.isna().sum()
# + [markdown] heading_collapsed=true hidden=true
# ## 1.6. Change Data Types
# + hidden=true
# competition
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype( int )
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype( int )
# promo2
df1['promo2_since_week'] = df1['promo2_since_week'].astype( int )
df1['promo2_since_year'] = df1['promo2_since_year'].astype( int )
# + [markdown] heading_collapsed=true hidden=true
# ## 1.7. Descriptive Statistics
# + hidden=true
num_attributes = df1.select_dtypes( include=['int64', 'float64'] )
cat_attributes = df1.select_dtypes( exclude=['int64', 'float64', 'datetime64[ns]'] )
# + [markdown] heading_collapsed=true hidden=true
# ### 1.7.1. Numerical Attributes
# + hidden=true
# Central Tendency - mean, median
ct1 = pd.DataFrame( num_attributes.apply( np.mean ) ).T
ct2 = pd.DataFrame( num_attributes.apply( np.median ) ).T
# Dispersion - std, max, min, range, skew, kurtosis
d1 = pd.DataFrame( num_attributes.apply( np.std ) ).T
d2 = pd.DataFrame( num_attributes.apply( min ) ).T
d3 = pd.DataFrame( num_attributes.apply( max ) ).T
d4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() ) ).T
d5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T
d6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T
m =pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index()
m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
m
# + hidden=true
sns.distplot( df1['competition_distance'], kde=False )
# + [markdown] heading_collapsed=true hidden=true
# ### 1.7.2. Categories Attributes
# + hidden=true
cat_attributes.apply( lambda x: x.unique().shape[0] )
# + hidden=true
aux = df1[( df1['state_holiday'] != '0' ) & ( df1['sales'] > 0 )]
plt.subplot( 1, 3, 1 )
sns.boxplot( x='state_holiday' ,y='sales', data=aux )
plt.subplot( 1, 3, 2 )
sns.boxplot( x='store_type' ,y='sales', data=aux )
plt.subplot( 1, 3, 3 )
sns.boxplot( x='assortment' ,y='sales', data=aux )
# + [markdown] heading_collapsed=true
# # 2.0. FEATURE ENGENIREERING
# + hidden=true
df2 = df1.copy()
# + [markdown] heading_collapsed=true hidden=true
# ## 2.1. Mapa Mental de Hipóteses
# + hidden=true hide_input=false
Image('img/mind_map_hyphotesis.png')
# + [markdown] heading_collapsed=true hidden=true
# ## 2.2. Criação das Hipóteses
# + [markdown] heading_collapsed=true hidden=true
# ### 2.2.1. Hipóteses Loja
# + [markdown] hidden=true
# **1.** Lojas com número maior de funcionários deveriam vender mais.
#
# **2.** Lojas com maior capacidade de estoque deveriam vender mais.
#
# **3.** Lojas com maior porte deveriam vender mais.
#
# **4.** Lojas com maior sortimentos deveriam vender mais.
#
# **5.** Lojas com competidores mais próximos deveriam vender menos.
#
# **6.** Lojas com competidores à mais tempo deveriam vendem mais.
# + [markdown] heading_collapsed=true hidden=true
# ### 2.2.2. Hipóteses Produto
# + [markdown] hidden=true
# **1.** Lojas que investem mais em Marketing deveriam vender mais.
#
# **2.** Lojas com maior exposição de produto deveriam vender mais.
#
# **3.** Lojas com produtos com preço menor deveriam vender mais.
#
# **4.** Lojas com promoções mais agressivas ( descontos maiores ), deveriam vender mais.
#
# **5.** Lojas com promoções ativas por mais tempo deveriam vender mais.
#
# **6.** Lojas com mais dias de promoção deveriam vender mais.
#
# **7.** Lojas com mais promoções consecutivas deveriam vender mais.
# + [markdown] heading_collapsed=true hidden=true
# ### 2.2.3. Hipóteses Tempo
# + [markdown] hidden=true
# **1.** Lojas abertas durante o feriado de Natal deveriam vender mais.
#
# **2.** Lojas deveriam vender mais ao longo dos anos.
#
# **3.** Lojas deveriam vender mais no segundo semestre do ano.
#
# **4.** Lojas deveriam vender mais depois do dia 10 de cada mês.
#
# **5.** Lojas deveriam vender menos aos finais de semana.
#
# **6.** Lojas deveriam vender menos durante os feriados escolares.
# + [markdown] heading_collapsed=true hidden=true
# ## 2.3. Lista Final de Hipóteses
# + [markdown] hidden=true
# **1.** Lojas com maior sortimentos deveriam vender mais.
#
# **2.** Lojas com competidores mais próximos deveriam vender menos.
#
# **3.** Lojas com competidores à mais tempo deveriam vender mais.
#
# **4.** Lojas com promoções ativas por mais tempo deveriam vender mais.
#
# **5.** Lojas com mais dias de promoção deveriam vender mais.
#
# **6.** Lojas com mais promoções consecutivas deveriam vender mais.
#
# **7.** Lojas abertas durante o feriado de Natal deveriam vender mais.
#
# **8.** Lojas deveriam vender mais ao longo dos anos.
#
# **9.** Lojas deveriam vender mais no segundo semestre do ano.
#
# **10.** Lojas deveriam vender mais depois do dia 10 de cada mês.
#
# **11.** Lojas deveriam vender menos aos finais de semana.
#
# **12.** Lojas deveriam vender menos durante os feriados escolares.
# + [markdown] heading_collapsed=true hidden=true
# ## 2.4. Feature Engineering
# + hidden=true
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
df2['year_week'] = df2['date'].dt.strftime( '%Y-%W' )
# competition since
df2['competition_since'] = df2.apply( lambda x: datetime.datetime( year=x['competition_open_since_year'],
month=x['competition_open_since_month'], day=1 ), axis=1 )
df2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply( lambda x: x.days ).astype( int )
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ) - datetime.timedelta( days=7 ) )
df2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int )
# assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x=='b' else 'extend' )
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else
'easter_holiday' if x == 'b' else
'christmas' if x == 'c' else 'regular_day' )
# + [markdown] heading_collapsed=true
# # 3.0. FILTRAGEM DE VARIÁVEIS
# + hidden=true
df3 = df2.copy()
# + hidden=true
df3.head()
# + [markdown] heading_collapsed=true hidden=true
# ## 3.1. Filtragem das Linhas
# + hidden=true
df3 = df3[( df3['open'] != '0' ) & ( df3['sales'] > 0 )]
# + hidden=true
df3.head()
# + [markdown] heading_collapsed=true hidden=true
# ## 3.2. Seleção das Colunas
# + hidden=true
cols_drop = ['customers', 'open', 'promo_interval', 'month_map']
df3 = df3.drop( cols_drop, axis=1 )
# + hidden=true
df3.head()
# + [markdown] heading_collapsed=true
# # 4.0. ANALISE EXPLORATÓRIA DOS DADOS
# + hidden=true
df4 = df3.copy()
# + hidden=true
df4.head()
# + [markdown] heading_collapsed=true hidden=true
# ## 4.1. Análise Univariada
# + [markdown] heading_collapsed=true hidden=true
# ### 4.1.1. Response Variable
# + hidden=true
sns.distplot( df4['sales'], kde=False )
# + [markdown] heading_collapsed=true hidden=true
# ### 4.1.2. Numerical Variable
# + hidden=true
num_attributes = df4.select_dtypes( include= ['int64', 'float64'] )
cat_attributes = df4.select_dtypes( exclude=['int64', 'float64', 'datetime64[ns]'] )
# + hidden=true
num_attributes.hist( bins=25 );
# + [markdown] heading_collapsed=true hidden=true
# ### 4.1.3. Categorical Variable
# + hidden=true
df4['assortment'].drop_duplicates()
# + hidden=true
# state_holiday
plt.subplot( 3, 2, 1 )
a = df4[df4['state_holiday'] != 'regular_day']
sns.countplot( a['state_holiday'] )
plt.subplot( 3, 2, 2 )
sns.kdeplot( df4[df4['state_holiday'] == 'public_holiday']['sales'], label='public_holiday', shade=True )
sns.kdeplot( df4[df4['state_holiday'] == 'easter_holiday']['sales'], label='easter_holiday', shade=True )
sns.kdeplot( df4[df4['state_holiday'] == 'christmas']['sales'], label='christmas', shade=True )
# store_type
plt.subplot( 3, 2, 3 )
sns.countplot( df4['store_type'] )
plt.subplot( 3, 2, 4 )
sns.kdeplot( df4[df4['store_type'] == 'a']['sales'], label='a', shade=True )
sns.kdeplot( df4[df4['store_type'] == 'b']['sales'], label='b', shade=True )
sns.kdeplot( df4[df4['store_type'] == 'c']['sales'], label='c', shade=True )
sns.kdeplot( df4[df4['store_type'] == 'd']['sales'], label='d', shade=True )
# assortment
plt.subplot( 3, 2, 5 )
sns.countplot( df4['store_type'] )
plt.subplot( 3, 2, 6 )
sns.kdeplot( df4[df4['assortment'] == 'basic']['sales'], label='basic', shade=True )
sns.kdeplot( df4[df4['assortment'] == 'extra']['sales'], label='extra', shade=True )
sns.kdeplot( df4[df4['assortment'] == 'extend']['sales'], label='extend', shade=True )
# + [markdown] heading_collapsed=true hidden=true
# ## 4.2. Análise Bivariada
# + hidden=true
df4.head()
# + [markdown] heading_collapsed=true hidden=true
# ### **H1.** Lojas com maior sortimentos deveriam vender mais.
# **FALSA** Lojas com COMPETIDORES MAIS PROXIMOS vendem MAIS
# + hidden=true hide_input=false
aux1 = df4[['assortment', 'sales']].groupby( 'assortment' ).sum().reset_index()
sns.barplot( x='assortment', y='sales', data=aux1 )
aux2 = df4[['year_week', 'assortment', 'sales']].groupby( ['year_week', 'assortment'] ).sum().reset_index()
aux2.pivot( index='year_week', columns='assortment', values='sales' ).plot()
aux3 = aux2[aux2['assortment'] == 'extra']
aux3.pivot( index='year_week', columns='assortment', values='sales' ).plot()
# + [markdown] heading_collapsed=true hidden=true
# ### **H2.** Lojas com competidores mais próximos deveriam vender menos.
# **FALSA** Lojas com COMPETIDORES MAIS PROXMIMOS vendem MAIS.
# + hidden=true
aux1 = df4[['competition_distance', 'sales']].groupby( 'competition_distance' ).sum().reset_index()
plt.subplot( 1, 3, 1 )
sns.scatterplot( x='competition_distance', y='sales', data=aux1 );
plt.subplot( 1, 3, 2 )
bins = list( np.arange(0, 20000, 1000) )
aux1['competition_distance_binned'] = pd.cut( aux1['competition_distance'], bins=bins )
aux2 = aux1[['competition_distance_binned', 'sales']].groupby( 'competition_distance_binned' ).sum().reset_index()
sns.barplot( x='competition_distance_binned', y='sales', data=aux2 );
plt.subplot( 1, 3, 3 )
sns.heatmap( aux1.corr( method='pearson' ), annot=True )
# + [markdown] heading_collapsed=true hidden=true
# ### **H3.** Lojas com competidores à mais tempo deveriam vender mais.
# **FALSA** Lojas com COMPETIDORES À MAIS TEMPO vendem MENOS.
# + hidden=true
plt.subplot( 1, 3, 1 )
aux1 = df4[['competition_time_month', 'sales']].groupby( 'competition_time_month' ).sum().reset_index()
aux2 = aux1[( aux1['competition_time_month'] < 120 ) & ( aux1['competition_time_month'] != 0 )]
sns.barplot( x='competition_time_month', y='sales', data=aux2 );
plt.xticks( rotation=90 );
plt.subplot( 1, 3, 2 )
sns.regplot( x='competition_time_month', y='sales', data=aux2 );
plt.subplot( 1, 3, 3 )
sns.heatmap( aux1.corr( method='pearson' ), annot=True )
# + [markdown] heading_collapsed=true hidden=true
# ### **H4.** Lojas com promoções ativas por mais tempo deveriam vender mais.
# **FALSA** Loja com PROMOÇÔES ATIVAS POR MAIS TEMPO vendem MENOS, depois de um certo tempo de promoção.
# + hidden=true
aux1 = df4[['promo_time_week', 'sales']].groupby( 'promo_time_week' ).sum().reset_index()
grid = GridSpec( 2,3 )
plt.subplot( grid[0,0] )
aux2 = aux1[aux1['promo_time_week'] > 0] # promo extendido
sns.barplot( x='promo_time_week', y='sales', data=aux2 );
plt.xticks( rotation=90 );
plt.subplot( grid[0,1] )
sns.regplot( x='promo_time_week', y='sales', data=aux2 );
plt.subplot( grid[1,0] )
aux3 = aux1[aux1['promo_time_week'] < 0] # promo regular
sns.barplot( x='promo_time_week', y='sales', data=aux3 );
plt.xticks( rotation=90 );
plt.subplot( grid[1,1] )
sns.regplot( x='promo_time_week', y='sales', data=aux3 );
plt.subplot( grid[:,2] )
sns.heatmap( aux1.corr( method='pearson' ), annot=True );
# + [markdown] heading_collapsed=true hidden=true
# ### **H5.** <s> Lojas com mais dias de promoção deveriam vender mais. </s>
# + [markdown] heading_collapsed=true hidden=true
# ### **H6.** Lojas com mais promoções consecutivas deveriam vender mais.
# **FALSA** Lojas com mais promoções consecutivas vendem menos
# + hidden=true
df4.columns
# + hidden=true
df4[['promo', 'promo2', 'sales']].groupby( ['promo', 'promo2'] ).sum().reset_index()
# + hidden=true
aux1 = df4[( df4['promo'] == 1 ) & ( df4['promo2'] == 1 )][['year_week','sales']].groupby( 'year_week' ).sum().reset_index()
ax = aux1.plot()
aux2 = df4[( df4['promo'] == 1 ) & ( df4['promo2'] == 0 )][['year_week','sales']].groupby( 'year_week' ).sum().reset_index()
aux2.plot( ax=ax )
ax.legend( labels=['Tradicional & Extendida', 'Tradicional']);
# + [markdown] heading_collapsed=true hidden=true
# ### **H7.** Lojas abertas durante o feriado de Natal deveriam vender mais.
# **FALSA** Lojas abertas durante o feriado de Natal vendem menos.
# + hidden=true
aux = df4[df4['state_holiday'] != 'regular_day']
plt.subplot( 1, 2, 1 )
aux1 = aux[['state_holiday', 'sales']].groupby( 'state_holiday' ).sum().reset_index()
sns.barplot( x='state_holiday', y='sales', data=aux1 );
plt.subplot( 1, 2, 2 )
aux2 = aux[['year', 'state_holiday', 'sales']].groupby( ['year', 'state_holiday'] ).sum().reset_index()
sns.barplot( x='year', y='sales', hue='state_holiday', data=aux2 );
# + [markdown] heading_collapsed=true hidden=true
# ### **8.** Lojas deveriam vender mais ao longo dos anos.
# **FALSA** Lojas vendem menos ao longo dos anos.
# + hidden=true
aux1 = df4[['year', 'sales']].groupby( 'year' ).sum().reset_index()
plt.subplot( 1, 3, 1 )
sns.barplot( x='year', y='sales', data=aux1 );
plt.subplot( 1, 3, 2 )
sns.regplot( x='year', y='sales', data=aux1 );
plt.subplot( 1, 3, 3 )
sns.heatmap( aux1.corr( method='pearson' ), annot=True )
# + [markdown] heading_collapsed=true hidden=true
# ### **9.** Lojas deveriam vender mais no segundo semestre do ano.
# **FALSA** Lojas vendem menos no segundo semestre
# + hidden=true
aux1 = df4[['month', 'sales']].groupby( 'month' ).sum().reset_index()
plt.subplot( 1, 3, 1 )
sns.barplot( x='month', y='sales', data=aux1 );
plt.subplot( 1, 3, 2 )
sns.regplot( x='month', y='sales', data=aux1 );
plt.subplot( 1, 3, 3 )
sns.heatmap( aux1.corr( method='pearson' ), annot=True )
# + [markdown] heading_collapsed=true hidden=true
# ### **10.** Lojas deveriam vender mais depois do dia 10 de cada mês.
# **VERDADEIRA** Lojas vendem mais depois do dia 10 de cada mês
# + hidden=true
aux1 = df4[['day', 'sales']].groupby( 'day' ).sum().reset_index()
plt.subplot( 2, 2, 1 )
sns.barplot( x='day', y='sales', data=aux1 );
plt.subplot( 2, 2, 2 )
sns.regplot( x='day', y='sales', data=aux1 );
plt.subplot( 2, 2, 3 )
sns.heatmap( aux1.corr( method='pearson' ), annot=True );
aux1['before_after'] = aux1['day'].apply( lambda x: 'before_10_days' if x <=10 else 'after_10_days' )
aux2 = aux1[['before_after', 'sales']].groupby( 'before_after' ).sum().reset_index()
plt.subplot( 2, 2, 4 )
sns.barplot( x='before_after', y='sales', data=aux2 );
# + [markdown] heading_collapsed=true hidden=true
# ### **11.** Lojas deveriam vender menos aos finais de semana.
# **VERDADEIRA** Lojas vendem menos aos finais de semana
# + hidden=true
aux1 = df4[['day_of_week', 'sales']].groupby( 'day_of_week' ).sum().reset_index()
plt.subplot( 1, 3, 1 )
sns.barplot( x='day_of_week', y='sales', data=aux1 );
plt.subplot( 1, 3, 2 )
sns.regplot( x='day_of_week', y='sales', data=aux1 );
plt.subplot( 1, 3, 3 )
sns.heatmap( aux1.corr( method='pearson'), annot=True );
# + [markdown] heading_collapsed=true hidden=true
# ### **12.** Lojas deveriam vender menos durante os feriados escolares.
# **VERDADEIRA** Lojas vendem menos durante os feriados escolares, exceto julho e agosto
# + hidden=true
aux1 = df4[['school_holiday', 'sales']].groupby( 'school_holiday' ).sum().reset_index()
plt.subplot( 2, 1, 1 )
sns.barplot( x='school_holiday', y='sales', data=aux1 );
plt.subplot( 2, 1, 2 )
aux2 = df4[['month', 'school_holiday', 'sales']].groupby( ['month', 'school_holiday'] ).sum().reset_index()
sns.barplot( x='month', y='sales', hue='school_holiday', data=aux2 );
# + [markdown] hidden=true
# ### 4.2.1. Resumo das Hipóteses
# + hidden=true
from tabulate import tabulate
# + hidden=true
tab = [['Hipoteses', 'Conclusao', 'Relevancia'],
['H1', 'Falsa', 'Baixa'],
['H2', 'Falsa', 'Media'],
['H3', 'Falsa', 'Media'],
['H4', 'Falsa', 'Baixa'],
['H5', '-', '-'],
['H6', 'Falsa', 'Baixa'],
['H7', 'Falsa', 'Media'],
['H8', 'Falsa', 'Alta'],
['H9', 'Falsa', 'Alta'],
['H10', 'Verdadeira', 'Alta'],
['H11', 'Verdadeira', 'Alta'],
['H12', 'Verdadeira', 'Baixa']]
print( tabulate( tab, headers='firstrow') )
# + hidden=true
## 4.3. Análise Multivariada
# + hidden=true
### 4.3.1. Numerical Attributes
# + hidden=true
correlation = num_attributes.corr( method='pearson' )
sns.heatmap( correlation, annot=True );
# + [markdown] hidden=true
# ### 4.3.2. Category Attributes
# + hidden=true
# only categorical data
a = df4.select_dtypes( include='object' )
# calculate Cramer V
a1 = cramer_v( a['state_holiday'], a['state_holiday'] )
a2 = cramer_v( a['state_holiday'], a['store_type'] )
a3 = cramer_v( a['state_holiday'], a['assortment'] )
a4 = cramer_v( a['store_type'], a['state_holiday'] )
a5 = cramer_v( a['store_type'], a['store_type'] )
a6 = cramer_v( a['store_type'], a['assortment'] )
a7 = cramer_v( a['assortment'], a['state_holiday'] )
a8 = cramer_v( a['assortment'], a['store_type'] )
a9 = cramer_v( a['assortment'], a['assortment'] )
# final dataset
d =pd.DataFrame( { 'state_holiday': [a1, a2, a3],
'store_type':[a2, a3, a4],
'assortment':[a7, a8, a9] } )
d = d.set_index( d.columns )
sns.heatmap( d, annot=True )
# + [markdown] heading_collapsed=true
# # 5.0. DATA PREPARATION
# + hidden=true
df5 = df4.copy()
# + [markdown] heading_collapsed=true hidden=true
# ## 5.1. Normalization
# + hidden=true
# + [markdown] heading_collapsed=true hidden=true
# ## 5.2. Rescaling
# + hidden=true
rs = RobustScaler()
mms = MinMaxScaler()
# competition distance
df5['competition_distance'] = rs.fit_transform( df5[['competition_distance']].values )
# competition time month
df5['competition_time_month'] = rs.fit_transform( df5[['competition_time_month']].values )
# promo time week
df5['promo_time_week'] = mms.fit_transform( df5[['promo_time_week']].values)
# year
df5['year'] = mms.fit_transform( df5[['year']].values)
# + [markdown] heading_collapsed=true hidden=true
# ## 5.3. Transfromation
# + [markdown] hidden=true
# ### 5.3.1. Encoding
# + hidden=true
# state_holiday - One Hot Encoding
df5 = pd.get_dummies( df5, prefix=['state_holiday'], columns=['state_holiday'])
# store_type - Label Encoding
le = LabelEncoder()
df5['assortment'] = le.fit_transform( df5['assortment'] )
# assortment - Ordinal Encoding
assortment_dict = {'basic': 1, 'extra': 2, 'extended': 3}
df5['assortment'] = df5['assortment'].map( assortment_dict )
# + [markdown] hidden=true
# ### 5.3.2. Respose Variable Transformation
# + hidden=true
df5['sales'] = np.log1p( df5['sales'] )
# + [markdown] hidden=true
# ### 5.3.3. Nature Transformation
# + hidden=true
# day of week
df5['day_of_week_sin'] = df5['day_of_week'].apply( lambda x: np.sin( x * ( 2. * np.pi/7 ) ) )
df5['day_of_week_cos'] = df5['day_of_week'].apply( lambda x: np.cos( x * ( 2. * np.pi/7 ) ) )
# month
df5['month_sin'] = df5['month'].apply( lambda x: np.sin( x * ( 2. * np.pi/12 ) ) )
df5['month_cos'] = df5['month'].apply( lambda x: np.cos( x * ( 2. * np.pi/12 ) ) )
# day
df5['day_sin'] = df5['day'].apply( lambda x: np.sin( x * ( 2. * np.pi/30 ) ) )
df5['day_cos'] = df5['day'].apply( lambda x: np.cos( x * ( 2. * np.pi/30 ) ) )
# week of year
df5['week_of_year_sin'] = df5['week_of_year'].apply( lambda x: np.sin( x * ( 2. * np.pi/52 ) ) )
df5['week_of_year_cos'] = df5['week_of_year'].apply( lambda x: np.cos( x * ( 2. * np.pi/52 ) ) )
# + hidden=true
df5.head()
# + hidden=true
|
m04_v01_store_sales_prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Ar6WxXxBzAuk"
# This notebook is inspired from a very good [HuggingFace Tutorial](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=bTjNp2KUYAl8)
# + [markdown] id="2xpl09xvO7hm"
# # pip install
# + id="-3tuYZdqPADr" executionInfo={"status": "ok", "timestamp": 1616536679430, "user_tz": -60, "elapsed": 34422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# %%capture
# !pip install phonemizer
# !apt-get install espeak
# !pip install datasets==1.5.0
# !pip install transformers==4.4.0
# !pip install soundfile
# !pip install jiwer
# + id="dQjxIIBHdgZY" executionInfo={"status": "ok", "timestamp": 1616536686954, "user_tz": -60, "elapsed": 41938, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# %%capture
# # !pip install git+https://github.com/huggingface/datasets.git
# # !pip install git+https://github.com/huggingface/transformers.git
# !pip install torchaudio
# !pip install librosa
# !pip install jiwer
# + [markdown] id="zudrwLDGO9rY"
# # notebook
# + colab={"base_uri": "https://localhost:8080/"} id="qQBWJA2jMNaD" executionInfo={"status": "ok", "timestamp": 1616536686957, "user_tz": -60, "elapsed": 35388, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="4294cdbf-2357-499d-eda2-399a461ec3df"
# !nvidia-smi -L
# + id="hlaKDvP4mpq6" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1616536737287, "user_tz": -60, "elapsed": 84122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="ade6e0e1-2b89-4033-b607-4bf584af0df6"
from google.colab import drive
drive.mount('/content/drive')
# + id="RK0f5qB2WylF" executionInfo={"status": "ok", "timestamp": 1616536748023, "user_tz": -60, "elapsed": 9018, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# Import libraries
from datasets import load_dataset, load_metric, ClassLabel, load_from_disk
import datasets
datasets.set_caching_enabled(False)
import torchaudio
import librosa
import torch
import soundfile as sf
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, Wav2Vec2ForCTC
from transformers import TrainingArguments, Trainer, AdamW, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
from transformers import set_seed
from transformers import trainer_pt_utils
from transformers.trainer_pt_utils import DistributedTensorGatherer
from transformers.trainer_utils import EvalPrediction, denumpify_detensorize, PredictionOutput
from torch.utils.data.dataloader import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
import random
import math
import pandas as pd
import numpy as np
from IPython.display import display, HTML
import re
import json
import os
from tqdm.notebook import tqdm
# phonemizer
from phonemizer import phonemize
# + [markdown] id="5ipHCrkV2LHc"
# # Utils
# + id="gMRMKmC02Kib" executionInfo={"status": "ok", "timestamp": 1616537170271, "user_tz": -60, "elapsed": 1033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# text to phoneme
def text2phoneme(batch):
batch["sentence"] = phonemize(batch["sentence"], language='en-us', backend="espeak")
return batch
# Visualisation
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset)-1)
while pick in picks:
pick = random.randint(0, len(dataset)-1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
display(HTML(df.to_html()))
# Metrics PER
def NeedlemanWunschAlignScore(seq1, seq2, d, m, r, normalize=True):
N1, N2 = len(seq1), len(seq2)
# Fill up the errors
tmpRes_ = [[None for x in range(N2 + 1)] for y in range(N1 + 1)]
for i in range(N1 + 1):
tmpRes_[i][0] = i * d
for j in range(N2 + 1):
tmpRes_[0][j] = j * d
for i in range(N1):
for j in range(N2):
match = r if seq1[i] == seq2[j] else m
v1 = tmpRes_[i][j] + match
v2 = tmpRes_[i + 1][j] + d
v3 = tmpRes_[i][j + 1] + d
tmpRes_[i + 1][j + 1] = max(v1, max(v2, v3))
i = j = 0
res = -tmpRes_[N1][N2]
if normalize:
res /= float(N1)
return res
def get_seq_PER(seqLabels, detectedLabels):
return NeedlemanWunschAlignScore(seqLabels, detectedLabels, -1, -1, 0,
normalize=True)
def generate_per_score(refs, hyps):
score = 0.0
for ref, hyp in zip(refs, hyps):
score += get_seq_PER(ref.replace('[UNK]', ''), hyp.replace('[UNK]', ''))
return score/len(refs)
# Preprocessing
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\—\…\–\«\»]'
def remove_special_characters(batch):
batch["text"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
batch["text"] = batch["text"].replace('`', '’')
return batch
# Vocabulary
def extract_all_chars(batch):
all_text = " ".join(batch["text"])
vocab = list(set(all_text))
return {"vocab": [vocab], "all_text": [all_text]}
# Audio file
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = sf.read(batch["path"])
batch["speech"] = speech_array
batch["sampling_rate"] = sampling_rate
batch["target_text"] = batch["text"]
return batch
def resample(batch):
batch["speech"] = librosa.resample(np.asarray(batch["speech"]), 48_000, 16_000)
batch["sampling_rate"] = 16_000
return batch
# Preparing dataset for training
def prepare_dataset(batch):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
with processor.as_target_processor():
batch["labels"] = processor(batch["target_text"]).input_ids
return batch
# Special Data Collator
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
# Metric
def compute_metrics(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
pred_str = processor.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
# Input preparation
def prepare_inputs(inputs):
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.cuda()
return inputs
# Loss computation
def compute_loss(model, inputs, return_outputs=False):
outputs = model(**inputs)
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
# Prediction Loop
def prediction_loop(data_loader, model, world_size):
num_examples = len(data_loader.dataset)
batch_size = data_loader.batch_size
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples,
make_multiple_of=batch_size)
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
losses_host, preds_host, labels_host = None, None, None
model.eval()
for step, inputs in enumerate(data_loader):
loss, logits, labels = prediction_step(model, inputs)
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
eval_losses_gatherer.add_arrays(trainer_pt_utils.nested_numpify(losses_host))
preds_gatherer.add_arrays(trainer_pt_utils.nested_numpify(preds_host))
labels_gatherer.add_arrays(trainer_pt_utils.nested_numpify(labels_host))
losses_host, preds_host, labels_host = None, None, None
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize()
labels_ids = labels_gatherer.finalize()
preds_ids = np.argmax(preds, axis=-1)
predicted_phonemes = processor.batch_decode(torch.from_numpy(preds_ids))
true_phonemes = processor.batch_decode(torch.from_numpy(labels_ids))
return generate_per_score(true_phonemes, predicted_phonemes)
# Prediction Single Batch
def prediction_step(model, inputs, label_names=["labels"]):
has_labels = all(inputs.get(k) is not None for k in label_names)
inputs = prepare_inputs(inputs)
if hasattr(model, "config"):
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
if has_labels:
labels = trainer_pt_utils.nested_detach(tuple(inputs.get(name) for name in label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = compute_loss(model, inputs, True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss, outputs = None, model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs
logits = trainer_pt_utils.nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
# + [markdown] id="ami3ewqkUaD_"
# # TIMIT Dataset
# IF YOU DON'T HAVE ALREADY THE DATASET PREPROCESSED CONTINUE, OTHERWISE SKIP THIS SECTION
# + [markdown] id="prxikwRLNuGb"
# We are going to download the ukrainian dataset \
# **Note**: Most likely, the common voice link has expired. In this case, just go to [Common Voice's dataset website](https://commonvoice.mozilla.org/en/datasets), select your language, *e.g.* `Ukrainian`, enter your email address to get the "*Download*" button, click right, and click `Copy link address` to fill it in the cell below.
# + id="w_Aa_J-RHqjF" colab={"base_uri": "https://localhost:8080/", "height": 224, "referenced_widgets": ["ecac1a21d0b54286a7e21bd04b78479c", "e22aa650b2bb45858a1b1aaecd5e295e", "e7d2efcbd1fe44b59521d838fa88821e", "747df345229b425fa05dfda1f0d41d16", "2e29fb60359141a695ba72381c1e152d", "9e3c93d1e13d429d81c109af667254ba", "<KEY>", "66fe8cc094f94eee975fa7a28149136c", "<KEY>", "<KEY>", "02136abf295a4ce7818ff12ff181420a", "f933dfaf4b2948ea8db3a365049d48ab", "e59fff4e0485444ca3cb4f0c462b7390", "2f336458d3dc46159315c6dc6b86b917", "<KEY>", "d6a61dee64494500a5d8a7cc8bce4048", "8656fd4f48ed4d9b897392e6441ee552", "0783ce418f324d008f90de85b9e15f7c", "720fb7e9ea1949369fde0809f7729b9f", "16cfe07a749249d8becadf0d5c73d149", "12a4cd04c9734dee8547616be5125f33", "90066f71be214be18a56687e700919ca", "e2c4da72e36a47c6974f61f5671eac8c", "<KEY>", "e9489eb1c4b04087af7fdd10d45b19ef", "<KEY>", "<KEY>", "7b35b56d78f5449d94ed703d818c4a65", "<KEY>", "8f7502216ed3492a944b421432e8ec63", "5e9192d5813242aa865343ded231f154", "e656367bb2c84f3fa9d65fbeffa553c8", "2bae4e8bc87f43bc9e78d44fe865443c", "786f64f737bf4f4daa4c0628124bb0ed", "4b9fd8e846704985a3c62532dd932dc2", "f8b5ca8a90be4700b98a331e6a3131a7", "9cfc7bbfe2f8446c828c2f9891f7cd88", "8f20e072d5d74f65871647184d390088", "aaafdb031106424a8b3fe677c9af2303", "04716cb4059243c9aad473d0a36037a2"]} executionInfo={"status": "ok", "timestamp": 1616536818779, "user_tz": -60, "elapsed": 70718, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="e3d36570-8e3e-4fec-9cba-74fcb2e3679d"
timit = load_dataset("timit_asr")
# + id="vmtlKD_HHthD" executionInfo={"status": "ok", "timestamp": 1616536819539, "user_tz": -60, "elapsed": 751, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
timit = timit.remove_columns(["phonetic_detail", "word_detail", "dialect_region", "id", "sentence_type", "speaker_id"])
# + id="y_Da8kuXGQpL" executionInfo={"status": "ok", "timestamp": 1616536819550, "user_tz": -60, "elapsed": 754, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
timit['train'] = timit['train'].rename_column("file", "path")
timit['train'] = timit['train'].rename_column("text", "sentence")
timit['test'] = timit['test'].rename_column("file", "path")
timit['test'] = timit['test'].rename_column("text", "sentence")
# + id="sLDJz1qeH3_V" executionInfo={"status": "ok", "timestamp": 1616536819551, "user_tz": -60, "elapsed": 751, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
common_voice = timit['train']
common_voice_test = timit['test']
# + [markdown] id="GHIkAICG14lg"
# ## Preprocess
# + id="qnYPgxtU14GO" colab={"base_uri": "https://localhost:8080/", "height": 990} executionInfo={"status": "ok", "timestamp": 1616536819552, "user_tz": -60, "elapsed": 748, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="07389544-eebc-458c-f2a9-8d0459f632c5"
show_random_elements(common_voice.remove_columns(['path']), num_examples=30)
# + [markdown] id="VFj3gHW33RGD"
# We are going to preprocess the text and remove some special symbol `,.?!;` as we don't have any language model at the output
# + id="tUFCciNKR8YG" colab={"base_uri": "https://localhost:8080/", "height": 455, "referenced_widgets": ["b68c5dfd055b469d9842975f5a827349", "f8613f2f4af24828b45a15e6696b6499", "851ad5727c394c03a808dab78604b1f7", "9662a44e23da420fa1f84a7a5c6aeb72", "e5d3fdd105234dc59af016820dc066a1", "<KEY>", "<KEY>", "<KEY>", "8d1039af1f974fcd85a2701ca6ab94ac", "<KEY>", "25dc6676369e45b395e400ac85a3e8dd", "27c6744dc9ae4151a00c6d08400b9e23", "e8503a0b49a3402aa92a5c0ca9fe9342", "d5e82b41ac3c4ac1a5effe3222b468b4", "<KEY>", "5c8847cdfc9c443686a095915f26efec", "0f46e30c55724a179b16126b6c10773e", "<KEY>", "<KEY>", "<KEY>", "ebfd14d68dc743279abf92bd197a276a", "9813b9f6cfd141f59f43ac527adff557", "34e5d1e6e75a499daaf8c150a9b26114", "<KEY>", "d55873b50952473cad050313dec18944", "<KEY>", "<KEY>", "22c1cc9df33e47caa92df7f7d1cabbb9", "84fa4228d7b8497aba8b539ee4d228b1", "<KEY>", "<KEY>", "4b31864a0c194a85b2c333a045279d57", "44e3d94f6462415bac32a6231ac0943d", "ccaae47b42de4ebb8e07ba716c6e79bb", "<KEY>", "c63686466d0b4ea0a0d488e408e3c222", "<KEY>", "8fa86883768c4d3797b56c7fe645b53e", "<KEY>", "<KEY>", "b8d01caf4f0f4a0b8a3f6c13d49fe57f", "<KEY>", "bbe93b5288bd444f93e8ebbe5fb9a1b6", "6e3cbf46efa34241a3c63877abeb166e", "177330506d3042e3acc76e3840d66df7", "<KEY>", "<KEY>", "<KEY>", "99d4864056274735a40f0d4117b30dad", "<KEY>", "65de263c596244e0a12977bdcafc1d92", "<KEY>", "<KEY>", "b8549a3c422a46c09d459d878520f2ba", "d26fd0e427cc4e3ea9d63656fe3a6bda", "9967b1e3aa4b454bb5955e79f215af94", "78674fb485b64da0bd4ce3ec7e83edfc", "3fe6e8ffd80244d4aaf9b8db6e279c96", "b9acaa5389fb473ba955660044f37016", "<KEY>", "5f87641cdc1e42a3b832af1683af5046", "dfb4afbdf7fe4e34bb3eb0a98c3942da", "c2ff150cdf6e4f1c9ec0384199e6ffc8", "e9a3d9d100df443fa2afcbc4f078cf0c"]} executionInfo={"status": "ok", "timestamp": 1616537001258, "user_tz": -60, "elapsed": 182450, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="57f9b422-168c-48ef-fd3b-085320aca728"
common_voice = common_voice.map(text2phoneme, num_proc=4)
common_voice_test = common_voice_test.map(text2phoneme, num_proc=4)
# + id="UrjmzIbZSdO-" colab={"base_uri": "https://localhost:8080/", "height": 990} executionInfo={"status": "ok", "timestamp": 1616537001266, "user_tz": -60, "elapsed": 182452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="0a65de32-0803-4902-d10a-ac0e19bf2341"
show_random_elements(common_voice.remove_columns(['path']), num_examples=30)
# + id="qavL52PA4Y21"
# common_voice = common_voice.map(remove_special_characters, remove_columns=["sentence"])
# common_voice_test = common_voice_test.map(remove_special_characters, remove_columns=["sentence"])
# + [markdown] id="fNUbpCJV4zkf"
# Let's now how the sentence looks like
# + id="nq-lzngY421w"
# show_random_elements(common_voice.remove_columns(["path"]))
# + id="LCNAjkDH--y6" executionInfo={"status": "ok", "timestamp": 1616537001268, "user_tz": -60, "elapsed": 182450, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
common_voice = common_voice.rename_column("sentence", "text")
common_voice_test = common_voice_test.rename_column("sentence", "text")
# + [markdown] id="5mxfaO3p4_mp"
# ## Building Vocabulary
# + [markdown] id="d7GXUJ3S5BUT"
# As we are going to use a CTC (as top layer), we are going to classify speech chunks into letters, so now we will extract all distinct letters and build our vocabulary from that.
# + id="R7gawagYWQF_"
common_voice = common_voice.rename_column("sentence", "text")
common_voice_test = common_voice_test.rename_column("sentence", "text")
# + id="2kgxTh1U5VNL"
vocab_train = common_voice.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice.column_names)
vocab_test = common_voice_test.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names)
# + [markdown] id="5YNn85Yi5i1r"
# Now we will create the union of all distinct letters from both dataset. We will do the same thing as when we are dealing with translation / generation task.
# + id="Z2CwjQQv5uho"
vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
vocab_dict = {v: k for k, v in enumerate(vocab_list)}
vocab_dict
# + id="0FdMWUD4HOvy"
# Adding the blank token, the unknown token and the padding token
vocab_dict["|"] = vocab_dict[" "]
del vocab_dict[" "]
vocab_dict["[UNK]"] = len(vocab_dict)
vocab_dict["[PAD]"] = len(vocab_dict)
print(f"Our final layer will have as output dimension {len(vocab_dict)}")
# + id="8Z0wt0YQHnDC" executionInfo={"status": "ok", "timestamp": 1616537001270, "user_tz": -60, "elapsed": 182448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# Now let's save our dictionary
parent_dir = ['/content/drive/MyDrive/speech_w2v', '/content/drive/MyDrive/3A/MVA/Speech & NLP/speech_w2v']
i = 1
# with open(os.path.join(parent_dir[i], 'eng_phone_vocab.json'), 'w') as vocab_file:
# json.dump(vocab_dict, vocab_file)
# + [markdown] id="pXTLSrSXI5oH"
# ## XLSR Wav2Vec 2.0 Features Extractor
# + colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["3a4fee31f6924125a956b461c412f4a0", "a8da9a9977174468bb5f8014471d2c63", "9dd511836a4048109c028139e3eacf39", "2871737e4aeb4cf38857b632575d3f0e", "84f015738c22433caa778caee5e8a61f", "b65e8d6fa8ef4c5dac0a204be6bab7fb", "950887309005489f82939f04e1cc5c25", "<KEY>", "8d0f1a4ca5e04c4a8518b87a0339d2d5", "45d5ab642a384325b38ddc17f07a1f8e", "<KEY>", "80c1e6af04134690a17d3fa9e207b194", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]} id="PDvLUjQ6JoMw" executionInfo={"status": "ok", "timestamp": 1616537222289, "user_tz": -60, "elapsed": 35412, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="eae27f1c-072f-4f6c-fd26-c1c55c0557be"
# Now we are going to open and store the audio file (represented as a numpy array)
common_voice = common_voice.map(speech_file_to_array_fn, remove_columns=common_voice.column_names)
common_voice_test = common_voice_test.map(speech_file_to_array_fn, remove_columns=common_voice_test.column_names)
# + id="QeJR0xmGI_uo"
# First we have to downsampled the original sample from 48 kHZ to 16kHZ
#common_voice = common_voice.map(resample, num_proc=4)
#common_voice_test = common_voice_test.map(resample, num_proc=4)
# + id="xFnGfLnrQDyN"
# common_voice.save_to_disk('/content/drive/MyDrive/speech_w2v/train_ukrainian_preprocessed.files')
# common_voice_test.save_to_disk('/content/drive/MyDrive/speech_w2v/test_ukrainian_preprocessed.files')
# + [markdown] id="JjTetH_U6xqm"
# # Load locally if already saved the preprocess file
# + [markdown] id="6sYvc6m7gnrr"
# # Split in 10mn, 1h, 8h
# + id="qTNcGxVQ5gEz" executionInfo={"status": "ok", "timestamp": 1616537224750, "user_tz": -60, "elapsed": 2434, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# Loading tokenizer
tokenizer = Wav2Vec2CTCTokenizer(os.path.join(parent_dir[i], 'eng_phone_vocab.json'), unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")
# Load Feature Extractor
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)
# Wrap the feature_extractor and the tokenizer into one class (thanks so much HuggingFace)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# + id="KhqVRMLQFPTl" executionInfo={"status": "ok", "timestamp": 1616537224753, "user_tz": -60, "elapsed": 2430, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}}
# Split into train/dev
np.random.seed(42)
data = common_voice.train_test_split(test_size=0.2, seed=42)
common_voice_train, common_voice_validation = data['train'], data['test']
# + id="r_s1mKyrRkFd" colab={"base_uri": "https://localhost:8080/", "height": 730, "referenced_widgets": ["18f214ef876049eeb15f6fb90ed9dc72", "a199c5b1b1c941dab5721bc6c5b13fd9", "da396764c34b4a179e5c15fbf8ff2e93", "ad91f9a0b65a40a4b137bd0d0dde1bdf", "7a494df2d1c74578b3f03da2260e5577", "23987250928245bb84cc2fb1c35af37b", "52a0043d93694df7b3b8e8eb18188dd0", "<KEY>", "92976f0853e14e03ae72210088928297", "c52003dab0304795bef30119d98d440f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "61795a5f5b574d14beeef03ca1354a04", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "87fa4ff2e222448782894c74a30016f3", "5b232869c8444e27b0cec85e743364ef", "02f502a01ea644249156066c8cc57fbe", "9a6f464e3486474d8b9e0d740c0af7c2", "e71b44ae54524c368552c9c6109a59d0", "<KEY>", "<KEY>", "8eb48473458a419e95ac164708e0078e", "<KEY>", "18089b838a31420983ac17ad61e3a05f", "<KEY>", "<KEY>", "e2909d881e754f1fbd748d064348fe43", "<KEY>", "979563aa1a3c4b658ac65cac33a90258", "52607a51dfc7484985ed8fba9e2336db", "0c3eee5d676843bab2bc43ab8ff5f122", "<KEY>", "2a8fbe8dea204fdfabbc09338aede30b", "<KEY>", "134af067fb3c4e92ac38437a5e56794a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "19e00d454de54a0eba875b07d41a97ee", "<KEY>", "<KEY>", "eeeba7a9fbe7479496639c555cede818", "bcd938743b204dfb9a5ff0851f07f7ac", "6359f5c8b6a343e58ac3a81e591c3a27", "76a986c2e42d4269b18338373d6ee6e5", "<KEY>", "<KEY>", "7e1c2254252c4d1394d0051ff09edf20", "<KEY>", "23c81db175174767accee234431a7206", "6ad54c32afa94237ba945eb0e6e51e93", "ec06c8af690449b7b6de04060f630334", "e9efe308b15e4d0b9fe1db98fb1f181a", "<KEY>", "3fff8dbccd554cc5a77c32e3198b23dd", "75cc519527864355af8a7d8a50505fb8", "<KEY>", "62196f347e5348e1acd0d42d1457ed2d", "d0ff90984aa44df5930e601c3194f438", "<KEY>", "<KEY>", "add521fd08f341eba4725d5f82e64c9c", "<KEY>", "<KEY>", "63ede28f17be46d7897027a7c1e60ebb", "<KEY>", "f32dc41740e847308070fe20645962c5", "4e5d55cd626942de8e6680d462699cb6", "<KEY>", "0e276237ed0a47ff87a0a2a837ac0159", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3d68d0477e7548248736a93d5dfd0c23", "62ab05a82961400ba513ac7e073714c0", "e43f6b9e626448aab698c40e843f13a8", "<KEY>", "29c0f756a9de40eab71ab8954731b888", "33ada1503d2c4e4f9b762dfa89ac5ca0", "<KEY>", "addf0f7b0bda4d6a96894b89162a03b0", "e196d114bd734a7bbe14ae9366999446", "846be113f99c4d5c8765f66012df8ac2", "<KEY>", "8edf04a791ee4aff904ebc7cfe014d85", "8110b027e65e4e3fb5b55aaaa1cd1151", "744ce4e9a63c4a07b505674f6ac6fdd2", "5a23fc26b1ee4bdc99888540cf4b3944"]} executionInfo={"status": "ok", "timestamp": 1616537298410, "user_tz": -60, "elapsed": 76078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="1cdf284f-f3c5-4836-e6b2-5bcea905d901"
common_voice_train = common_voice_train.map(prepare_dataset, remove_columns=common_voice_train.column_names, batch_size=8, num_proc=4, batched=True)
common_voice_validation = common_voice_validation.map(prepare_dataset, remove_columns=common_voice_validation.column_names, batch_size=8, num_proc=4, batched=True)
common_voice_test = common_voice_test.map(prepare_dataset, remove_columns=common_voice_test.column_names, batch_size=8, num_proc=4, batched=True)
# + [markdown] id="FYc-hol0RiDk"
# # Training
# + id="YcU0OqWiCITf" executionInfo={"status": "ok", "timestamp": 1616537298416, "user_tz": -60, "elapsed": 22894, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>4", "userId": "04658984386292213374"}}
parent_dir = ['/content/drive/MyDrive/speech_w2v', '/content/drive/MyDrive/3A/MVA/Speech & NLP/speech_w2v']
i = 1
# Loading tokenizer
tokenizer = Wav2Vec2CTCTokenizer(os.path.join(parent_dir[i], 'eng_phone_vocab.json'), unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")
# Load Feature Extractor
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)
# Wrap the feature_extractor and the tokenizer into one class (thanks so much HuggingFace)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# + id="bePRrNGpbxVs" colab={"base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": ["b3c2dd0a05394766a3817a31f399e0d9", "c657951022e7446baa2f4e91acaddaf6", "a1d905d94d9e40f1a7821e3d361dc641", "c96a5d43707a4f4e886f3764514f81ff", "99aa0fa535224e79a35901330ccfa92b", "238b58be5000435c8712e9efc71553ad", "08ec71b4391043b1820d339e73a973c9", "153bde40b0134a088b517931208502cf"]} executionInfo={"status": "ok", "timestamp": 1616537299501, "user_tz": -60, "elapsed": 23025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh0ByKhwMRvQ74HNe4dIy3OSY2gBlHXyhZEpIVX=s64", "userId": "04658984386292213374"}} outputId="edc62f82-9db7-4bec-e605-c676ca5fc8c3"
# Prepare our data collator
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
# Prepare our metric (wer_metric)
wer_metric = load_metric("wer")
# + [markdown] id="Diou-GNIbsJp"
# # 3h TRAINING
# + [markdown] id="ADs7vPCeV84t"
# The first component of XLSR-Wav2Vec2 consists of a stack of CNN layers that are used to extract acoustically meaningful - but contextually independent - features from the raw speech signal. This part of the model has already been sufficiently trained during pretraining and as stated in the [paper](https://arxiv.org/pdf/2006.13979.pdf) does not need to be fine-tuned anymore.
# Thus, we can set the `requires_grad` to `False` for all parameters of the *feature extraction* part.
# + [markdown] id="Lhsh0PiCr8aH"
# Therefore, I had to play around a bit with different values for dropout, SpecAugment's masking dropout rate, layer dropout, and the learning rate until training seemed to be stable enough.
# + [markdown] id="QV1hzLMcKPTO"
# ```javascript
# function ConnectButton(){
# console.log("Connect pushed");
# document.querySelector("#top-toolbar > colab-connect-button").shadowRoot.querySelector("#connect").click()
# }
# setInterval(ConnectButton,60000);
# ```
# + colab={"base_uri": "https://localhost:8080/"} id="98jur6H-exB1" outputId="d79a823f-72fb-42a5-ebd3-0d7fc4b97db1"
# Cell for training
# Set seed
set_seed(42)
# fname = '/content/wav2vec_small_960h.pt'
# checkpoint = torch.load(fname)
# args = checkpoint["args"]
arch = ["facebook/wav2vec2-large", "facebook/wav2vec2-large-xlsr-53"]
i = 1
architecture = arch[i]
# Load model
model = Wav2Vec2ForCTC.from_pretrained(
architecture,
# "facebook/wav2vec2-base-960h",
attention_dropout=0.1,
hidden_dropout=0.1,
feat_proj_dropout=0.0,
mask_time_prob=0.05,
layerdrop=0.1,
gradient_checkpointing=True,
ctc_loss_reduction="mean",
pad_token_id=processor.tokenizer.pad_token_id,
vocab_size=len(processor.tokenizer)
)
# Freeze the feature extractor
model.freeze_feature_extractor()
#for param in model.wav2vec2.feature_projection.parameters():
#param.requires_grad = False
#for param in model.wav2vec2.encoder.parameters():
#param.requires_grad = False
# Set to GPU
model.cuda()
# Get sampler
model_input_name = processor.feature_extractor.model_input_names[0]
sampler_train = trainer_pt_utils.LengthGroupedSampler(common_voice_train, batch_size=32, model_input_name=model_input_name)
sampler_val = trainer_pt_utils.LengthGroupedSampler(common_voice_validation, batch_size=32, model_input_name=model_input_name)
# Get Loader
train_loader = DataLoader(common_voice_train, batch_size=32, sampler=sampler_train, collate_fn=data_collator, num_workers=4)
valid_loader = DataLoader(common_voice_validation, batch_size=32, sampler=sampler_val, collate_fn=data_collator, num_workers=4)
#
learning_rate = 4e-4
n_epochs = 250
num_update_steps_per_epoch = len(train_loader)
max_steps = math.ceil(n_epochs * num_update_steps_per_epoch)
validation_freq = int(1*num_update_steps_per_epoch)
print_freq = int(1*num_update_steps_per_epoch)
scheduler_on_plateau_freq = int(num_update_steps_per_epoch)
# Optimizer
decay_parameters = trainer_pt_utils.get_parameter_names(model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if n in decay_parameters],
"weight_decay": 0.005,
},
{
"params": [p for n, p in model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.005,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
# Scheduler
num_warmup_steps = int(12 * num_update_steps_per_epoch) # Neccessary Number of steps to go from 0.0 to lr
#warmup_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, max_steps)
warmup_scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, max_steps, lr_end=1e-7)
reduce_lr_plateau = None
## reduce_lr_plateau = ReduceLROnPlateau(optimizer, factor=0.6, patience=7) ## To define when warmup scheduler is finished
model.zero_grad()
current_total_steps = 0
current_best_wer = 2.0
for epoch in range(n_epochs):
print(f"EPOCH : {epoch}")
tr_loss = 0.0
epoch_step = 0
for step, inputs in enumerate(train_loader):
model.train()
inputs = prepare_inputs(inputs)
loss = compute_loss(model, inputs)
loss.backward()
tr_loss += loss.item()
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(1.0)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(1.0)
optimizer.step()
current_total_steps += 1
epoch_step += 1
#if current_total_steps < num_warmup_steps + 1:
#warmup_scheduler.step()
warmup_scheduler.step()
if current_total_steps % print_freq == 0:
print(f"Training Loss : {tr_loss/epoch_step}")
# Initialize the lronplateau as soon as we have finished the warmup
#if reduce_lr_plateau is None and current_total_steps > num_warmup_steps + 1:
#reduce_lr_plateau = ReduceLROnPlateau(optimizer, factor=0.7, patience=5, verbose=1)
model.zero_grad()
if current_total_steps % validation_freq == 0:
world_size = 1
per_score = prediction_loop(valid_loader, model, world_size)
eval_metric = per_score
print(f"ACTUAL PER : {eval_metric}")
if eval_metric < current_best_wer:
print("Hooray! New best wer validation. Saving model")
torch.save(model.state_dict(), os.path.join(parent_dir[i], 'timit_eng_per.pt'))
current_best_wer = eval_metric
#if reduce_lr_plateau is not None:
#reduce_lr_plateau.step(eval_metric)
# + id="spPiMREoDvLU"
|
exp/timit_notebook_training/timit_experiment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
import numpy as np
import pandas as pd
import faiss
import time
from sklearn.cluster import KMeans
from collections import defaultdict
# 这个应该是未经训练的roberta直接输出的结果
res = np.load('./res.npy')
s = res.reshape(1786279,1024)
# ## sklearn
# 非常慢
kmeans = KMeans(n_clusters=135,random_state=0).fit(s)
center = kmeans.cluster_centers_
np.save('./data/bert_center.npy',center)
# ## faiss
# 又快得害怕
n_center = 135
niter = 100
d=1024
fkmeans = faiss.Kmeans(d,n_center,niter=niter,verbose = True)
# %%time
fkmeans.train(s)
kaggle = pd.read_json('./kaggle_data/arxiv-metadata-oai-snapshot.json',lines=True)
#kaggle.head()
label = kaggle['categories'].apply(lambda x : set(x.split()))
label = label.to_list()
#找据数据最近的一个中心,也就是属于那个聚类
D, I = fkmeans.search(s,1)
index = faiss.IndexFlatL2 (1024)
index.add(s)
D, I = index.search (fkmeans.centroids, 100)
#只找一个的话,应该是它本身了吧
D, I = index.search (fkmeans.centroids, 1)
#np.save('./kaggle_data/tmp.npy',I)
I = np.load('./kaggle_data/tmp.npy')
I = list(I)
I[0]
label[int(I[i])]
dic = defaultdict(list)
for i in range(len(I)):
dic[','.join(list(label[int(I[i])]))].append(int(I[i]))
len(dic)
kaggle.head()
'https://arxiv.org/abs/'+kaggle.loc[1676474,'id']
'cs.AI' in dic
dic
# ## 实验结果
# 下一步作什么实验,需要把数据集分成每条一个样本,再跑一次吗?
ans = [0] * 135
for i in range(len(fcenters)):
center = I[i][0]
for j in range(1,100):
index = I[i][j]
if len(label[index] & label[center]) > 0:
ans[i] += 1
dic = {}
for i in range(len(fcenters)):
center = I[i][0]
for j in range(1,100):
index = I[i][j]
if len(label[index] & label[center]) > 0:
dic[','.join(list(label[center]))] =dic.get(','.join(list(label[center])),0)+ 1
#小于135因为有些中心的标签是一样的,也许说明了分类不好
len(dic)
dic
label[1520554]
data = kaggle.join(kaggle['categories'].str.split(' ', expand=True).stack().reset_index(level=1, drop=True).rename('label'))
from collections import Counter
num = Counter(data['label'])
num['cond-mat.dis-nn']
num
|
code/cluster.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# +
using Convex, SCS
# Define function, gradient and hessian
f = x -> (1-x[1])^2 + 5*(x[2] - x[1]^2)^2
∇f = x -> [2*(10*x[1]^3-10*x[1]*x[2]+x[1]-1), 10*(x[2]-x[1]^2)]
H = x -> [-20*(x[2] - x[1]^2) + 40*x[1]^2+2 -20*x[1];
-20*x[1] 10]
##################################################################################
# Define algorithm to solve the local problem
function solve_trust_region_subproblem(f, ∇f, H, x0, δ)
x = Variable(length(x0))
p = minimize(f(x0) + ∇f(x0)'*(x-x0) + quadform(x-x0, H(x0))/2)
p.constraints += norm(x-x0) <= δ
solve!(p, () -> SCS.Optimizer(verbose=false))
return (x.value, p.optval)
end
###################################################################################
using Plots, LaTeXStrings, Printf
pyplot(size=(450, 400))
# add PGFPlotsX (Textlive, MikTeX)
# Rosenbrock function
fp(x1,x2) = (1 - x1)^2 + 5*(x2 - x1^2)^2
x1 = -2:0.05:2
x2 = -2:0.05:2
pl1 = contour(x1, x2, fp, framestyle = :box,
levels = [1,2,3,5,10,20,50,100],
c = cgrad(:viridis, rev = true, scale = :exp),
ylab = L"x_2",
xlab = L"x_1",
xlims = (-2,2),
ylims = (-2,2),
legend = false,
grid = false,
label = false)
# -
η1=0.25
η2=0.5
γ1=0.5
γ2=2.0
δ=0.5 # Radio
# Iteration = 1
x = [-1.75,-1.75]
y = f(x)
@time x′, y′ = solve_trust_region_subproblem(f, ∇f, H, x, δ)
η = (y - f(x′)) / (y - y′)
if η < η1
δ *= γ1
else
x, y = x′, y′
if η > η2
δ *= γ2
end
end
x, δ
# +
θs = range(0,stop=2π,length=101)
cx1 = x[1] .+ δ*cos.(θs)
cx2 = x[2] .+ δ*sin.(θs)
i = 1
msg = @sprintf("Iter = %i", i)
plot!(cx1, cx2, label = msg)
# +
# Iteration = 2
y = f(x)
x′, y′ = solve_trust_region_subproblem(f, ∇f, H, x, δ)
η = (y - f(x′)) / (y - y′)
println(η)
if η < η1
δ *= γ1
else
x, y = x′, y′
if η > η2
δ *= γ2
end
end
println(x, δ)
θs = range(0,stop=2π,length=101)
cx1 = x[1] .+ δ*cos.(θs)
cx2 = x[2] .+ δ*sin.(θs)
i += 1
msg = @sprintf("Iter = %i", i)
plot!(cx1, cx2, label = msg)
# -
x, f(x)
|
Code/Chapter 4 - Local Descent/4.5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning
# The _objectives_ in the following `Notebook` are:
#
# 1. Perform Machine Learning Modelling using <code style="background:yellow;color:black">Random Forest, RNN & LSTM based architecture</code> by proper analysis and description.
# 2. Generating features with the help of feature engineering as in the case of DL the loss is very different.
# 3. Perform Comparision by models and trying to find the best one with the help of our metric function.
#
#
# __Some important points:__
# - For solving this problem we will be following the steps discussed in the NB and for explaining the observation `boxes` willl be used
#
#
# <div class="alert alert-block alert-danger">
#
# <b>Caution:</b>
#
# Due to `complex architecture and low memory in system` we will not train our model extensively.
#
# </div>
#
# > Since we now want to predict the __‘Category_Reporting’__ for rest of the month Of May we have to build a model based on our data so far. We will try to model the distribution so far.
# ## Getting requirements ready
# run the libraries
import pandas as pd
import numpy as np
import warnings
from sklearn import metrics
from termcolor import colored
import tensorflow as tf
from tensorflow.keras import Sequential,Model
from collections import Counter
from tensorflow.keras.layers import Dense, Dropout, LSTM, Activation, TimeDistributed, Flatten,Input,SimpleRNN
from tensorflow.keras.optimizers import Adam
from sklearn.utils import class_weight
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping,TensorBoard
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, power_transform
from sklearn.metrics import f1_score, precision_score, confusion_matrix, recall_score, accuracy_score,classification_report,roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
warnings.filterwarnings("ignore")
# %matplotlib inline
# %run viz.py
# %load_ext tensorboard
# read the data and see it....
df = pd.read_csv('EMP_Cleaned_File.csv')
df.head()
# ## Data Cleaning
# In this we will perfrom 3 main steps:
#
# 1. Select relevant features.
# 2. Normalise the continous variable and `transform` the categorical variable .
# 3. Perform standard scaling and prepare the data for modelling.
# ### Feature selection
#
# > We have selected some features from the data based on our understanding of `domain and necessary relations` for classification of models.
#
#
# > We will use this features for our prediction task.
# +
## feature selection
features = ['Customer_Age','OrderValue','Revenue_Goods','Delivery_Value','Day_Sin','Day_Cos','Month_Sin','Month_Cos','Gender','Category_Reporting','Marketing_Channel','cluster']
## categorical features
cat_features = ['Gender','Marketing_Channel']
# -
# ### Label encoding categorical variables
# +
## make the encoding
ord_enc = LabelEncoder()
for i in cat_features:
df[i] = LabelEncoder().fit_transform(df[i])
## for getting values again of prediction we will make an instance
le = LabelEncoder()
df['Category_Reporting'] = le.fit_transform(df['Category_Reporting'].values)
# -
## let us see plot
visualisation(df[features]).heat_map()
# ### Normalisation of value
## get the values
X = df[features]
X = X.drop(columns = ['Category_Reporting']).values
y = df['Category_Reporting'].values
## normalise the values
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
# ```python
# ## apply box cox transformation
# X = power_transform(X, method='yeo-johnson')
# y = power_transform(X, method='yeo-johnson')
# ```
# ### Metrics
#
# Since we are mostly concerned with accuracy we will check four accuracy metrics:
# - Precision
# - Recall
# - Accuracy
# - F1 Score
def calc_metrics_class(pred,y_test):
# Print's the model's performance overall
print(colored("Generating the results wait for it....",color = 'red', attrs=['bold']))
# Lets see the classification metrics
precision = precision_score(pred, y_test,average='weighted')
recall = recall_score(pred,y_test,average='weighted')
f1 = f1_score(pred,y_test,average='weighted')
accuracy = accuracy_score(pred,y_test)
print('Precision:', precision)
print('Recall:', recall)
print('Accuracy:', accuracy)
print('F1 Score:', f1)
# ## Class balance
## let us see class distribution
Counter(y)
# <div class="alert alert-block alert-info">
# <b>Data Balance:</b> We see our data is not at all balanced. For this purpose we specifically assign weights to our corresponding labels. We do so, by giving higher weight to labels having low frequency and lower to labels having more frequency.</div>
## compute class weight
class_weight = class_weight.compute_class_weight('balanced',
np.unique(y), y)
class_weight = dict(enumerate(class_weight))
# ## ML Model
#
# The machine learning model which we will be using is `Random Forest` for baseline accuracy and we will then go to __Deep Learning__ for more accuracy
## Random Forest with Best Params
def random_forest(X_train,y_train,X_test,y_test):
print(colored("Performing modelling for Random forest",color = 'green', attrs=['bold']))
# Create Random Forest Model
rf_model = RandomForestClassifier(random_state=1)
# Specifying hyperparams for the search(narrowed it down due to computation)
param_grid = {
'n_estimators': [10],
'max_features': [0.3],
'min_samples_split': [3],
'class_weight' : [class_weight]
}
# Fit the model and find best hyperparams
grid_model = GridSearchCV(estimator=rf_model, param_grid=param_grid, cv=5, n_jobs=-1)
grid_model.fit(X_train,y_train)
# Fit the model with best params
print("Best parameters =", grid_model.best_params_)
model_clf = rf_model.set_params(**grid_model.best_params_)
model_clf.fit(X_train, y_train)
# Time to test the model
# Time to test the model for test set
print(colored("Test results for test set",color = 'yellow', attrs=['bold']))
pred = model_clf.predict_proba(X_test).astype(float)
## predict the results
calc_metrics_class(pred,y_test)
# Returning model
return model_clf
## split the data for better accuracy
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=True)
# > Skipping due to low __RAM__ issues
# +
## model of random forest
#rf = random_forest(X_train,y_train,X_test,y_test)
# -
# <div class="alert alert-block alert-warning">
#
# We set a __baseline accuracy__ to begin with and hope for much better results as we move towards sequence based models.
#
# </div>
# ## Deep Learning
# ### Data Tranform
#
# <div class="alert alert-block alert-info">
# <b>Code:</b> Since the models which we are using use sequence to sequence encoding we need to take care of shape of our data. The transformation of data is important in this case. For this purpose we will use the custom window to transform the data from 2D to 3D.</div>
#
def lstm_data_transform(x_data, y_data, num_steps=1):
"""
Changes data to the format for LSTM training for sliding window approach
This function reshape for lstm into 3 dimension shape (batchsize, timestep, features)
Input to this function is X and y and time step
"""
# Prepare the list for the transformed data
X, y = list(), list()
# Loop of the entire data set
for i in range(x_data.shape[0]):
# compute a new (sliding window) index
end_ix = i + num_steps
# if index is larger than the size of the dataset, we stop
if end_ix >= x_data.shape[0]:
break
# Get a sequence of data for x
seq_X = x_data[i:end_ix]
# Get only the last element of the sequency for y
seq_y = y_data[end_ix]
# Append the list with sequencies
X.append(seq_X)
y.append(seq_y)
# Make final arrays
x_array = np.array(X)
y_array = np.array(y)
return x_array, y_array
## transform for LSTM
X_, y_ = lstm_data_transform(X, y, num_steps=2)
## train and test split
X_train, X_test, y_train, y_test = train_test_split(X_, y_, test_size=0.2,random_state=True)
# ## Modelling
#
# Two Types of Deep Learning model used here to train the sequential data are:
#
# 1. `RNN Model:` The reason to choose this model was to check what is the effect of sequence transformation with the introduction of the <code style="background:yellow;color:black">previous cell state.</code> Our hypothesis is it captures the temporal relationship of our data well.
#
#
# 2. `LSTM Model`: This adds to the RNN model with an extra `Forget Gate` which yields better accuracy.
#
# > This model works well when we have more training data. But in our case, we have less data so let's see how well it performs.
#
# ### RNN Model
## define our params
layers = [150,100]
dense_layer = [500,200]
dropout = 0.4
# +
## declare the shape
inp = Input(shape=X_train.shape[1:])
## RNN layer
x = SimpleRNN(layers[0], return_sequences=True)(inp) # add first layer
x = Dropout(dropout)(x)
## Repeat based on values in list with dropout
for i in layers[1:] :
x = SimpleRNN(i, activation='relu', return_sequences=True)(x) # add succesiive layers
x = Dropout(dropout)(x) # add dropout for layer
## Activate the neuron
x = Activation("relu")(x)
## flatten and add dense layer
x = Flatten()(x)
for i in dense_layer:
x = Dense(i,activation='relu')(x)
# add output layer
y = Dense(len(np.unique(y_train).tolist()), activation='softmax')(x) # output layer with linear activation
## Instantiate the model
rnn_model = Model(inp,y)
## Set ADAM opt and see the model
opt = Adam(lr = 0.001)
rnn_model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
rnn_model.summary()
# +
# Create a checkpoint for models
checkpoint = tf.keras.callbacks.ModelCheckpoint("RNN.h5",monitor="val_mse",verbose = 1, # creating a callback for saving the model
save_best_only = False,save_weights_only = False, # save the weights
mode= "auto",save_freq= "epoch",
options=None)
# tensorboard logs
# Define Tensorboard as a Keras callback
tensorboard = TensorBoard(
log_dir='.\RNN_logs', histogram_freq=0,write_graph=True,
write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
keras_callbacks = [tensorboard]
## fit and train
rnn_model.fit(X_train,y_train, epochs=1, batch_size=512,validation_data= (X_test,y_test),class_weight = class_weight,
callbacks = [checkpoint,keras_callbacks])
# -
## let us see the accuracy
## get the pred and the y_test
pred = np.argmax(rnn_model.predict(X_test),axis = 1).flatten().tolist()
## predict the results
calc_metrics_class(pred,y_test)
# %tensorboard --logdir=RNN_logs
# ## LSTM
## define our params
layers = [175,50]
dense_layer = [125,75]
dropout = 0.5
# +
## declare the shape
inp = Input(shape=X_.shape[1:])
## LSTM layer
x = LSTM(layers[0], return_sequences=True)(inp) # add first layer
## Repeat based on values in list with dropout
for i in layers[1:] :
x = LSTM(i, activation='relu', return_sequences=True)(x) # add succesiive layers
x = Dropout(dropout)(x) # add dropout for first layer
## adding time distributed layer
x = TimeDistributed(Dense(5))(x) # add dense layer
x = Activation("relu")(x)
## flatten and add dense layer
x = Flatten()(x)
for i in dense_layer:
x = Dense(i,activation='relu')(x) # add output layer
# add output layer
y = Dense(len(np.unique(y_train).tolist()), activation='softmax')(x) # output layer with linear activation
## Instantiate the model
lstm_model = Model(inp,y)
## Set ADAM opt and see the model
opt = Adam(lr = 0.001)
lstm_model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
lstm_model.summary()
# +
# Create a checkpoint for models
checkpoint = tf.keras.callbacks.ModelCheckpoint("LSTM.h5",monitor="val_mse",verbose = 1, # creating a callback for saving the model
save_best_only = False,save_weights_only = False, # save the weights
mode= "auto",save_freq= "epoch",
options=None)
# tensorboard logs
# Define Tensorboard as a Keras callback
tensorboard = TensorBoard(
log_dir='.\LSTM_logs', histogram_freq=0,write_graph=True,
write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
keras_callbacks = [tensorboard]
lstm_model.fit(X_train,y_train, epochs=1, batch_size=512,validation_data= (X_test,y_test),class_weight = class_weight,
callbacks = [checkpoint,keras_callbacks])
# -
## let us see the accuracy
## get the pred and the y_test
pred = np.argmax(lstm_model.predict(X_test),axis = 1).flatten().tolist()
## predict the results
calc_metrics_class(pred,y_test)
# %tensorboard --logdir=LSTM_logs
# <div class="alert alert-block alert-warning">
#
# </div>
# ## Prediction for May
#
#
# Now we will see the prediction of our Model for the `Month of May`
## read it again
df = pd.read_csv('EMP_Cleaned_File.csv')
## lets get datetime based order
df['Date_Order'] = pd.to_datetime(df['Date_Order'])
df['month_cuttoff'] = [int(i.strftime('%m')) for i in df['Date_Order']]
## get the data for the month of may
df = df[df['month_cuttoff'] == 5].reset_index(drop=True)
# +
## feature selection
features = ['Customer_Age','OrderValue','Revenue_Goods','Delivery_Value','Day_Sin','Day_Cos','Month_Sin','Month_Cos','Gender','Category_Reporting','Marketing_Channel','cluster']
## categorical features
cat_features = ['Gender','Marketing_Channel']
# +
## make the encoding
ord_enc = LabelEncoder()
for i in cat_features:
df[i] = LabelEncoder().fit_transform(df[i])
## for getting values again of prediction we will make an instance
le = LabelEncoder()
df['Category_Reporting'] = le.fit_transform(df['Category_Reporting'].values)
# -
## get the values
X = df[features]
X = X.drop(columns = ['Category_Reporting']).values
y = df['Category_Reporting'].values
## normalise the values
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
## transform for LSTM
X, y = lstm_data_transform(X, y, num_steps=2)
# _Get the prediction right and get the job!!_
# +
## get the predictions
predict_lstm= np.argmax(lstm_model.predict(X),axis = 1).flatten().tolist()
predict_lstm= le.inverse_transform(predict_lstm)
Counter(predict_lstm)
# +
## get the predictions
predict_rnn= np.argmax(rnn_model.predict(X),axis = 1).flatten().tolist()
predict_rnn= le.inverse_transform(predict_rnn)
Counter(predict_rnn)
# -
# <div class="alert alert-block alert-info">
# <b>Prediction:</b>
#
# I predict customers will be more interested in `Funshirts & Hoodies - Apparel and Fashion - Accessories` in May 2021.
#
# </div>
# <div class="alert alert-block alert-success">
# <b>Success:</b>
# Both these models gives us better accuracy and results and I finally wait for our prediction to be accurate this summer to get a job...... :)
# </div>
|
Interview_Case_Studies/EMP_DataScientist_CS/04_DL_EMP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Internship
#
# ## 5. To Explore Business Analytics (Level - Beginner & Intermediate)
#
# ● Perform ‘Exploratory Data Analysis’ on the provided dataset ‘SampleSuperstore’
# ● As a business manager, try to find out the weak areas where you can work to make more profit.
# ● What all business problems you can derive by exploring the data?
# ### Importing Libraries
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import numpy as np
df=pd.read_csv("E:\AI-Application-Implementation\TSF-Internship\SampleSuperstore.csv")
df.head()
df.shape
df.columns
df.isnull().sum()
df.describe()
# ### Correlation Matrix
cor=df.corr()
sb.heatmap(cor, annot=True)
# ### Numerical Features
numerical_features = [i for i in df.columns if df[i].dtypes != 'O']
numerical_features
# ### Categorical Features
categorical_features = [i for i in df.columns if df[i].dtypes == 'O']
categorical_features
for i in categorical_features:
print('Feature : {}\nNumber of unique values : {}\n'.format(i, len(df[i].unique())))
# +
plot=["Ship Mode","Segment","Region","Category","Sub Category"]
for i in plot:
print("***********",i,"**********")
if i=="Sub Category":
plt.figure(figsize = (15, 6))
sm = sb.countplot('Sub-Category', data = df,palette = "Set1")
for p in sm.patches:
sm.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points')
plt.show()
break
plt.figure(figsize = (9, 6))
sm = sb.countplot(i, data = df,palette = "Set1")
for p in sm.patches:
sm.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points')
plt.show()
# -
# ## Sales with respect to Category
# +
s_group = df.groupby('Category')['Sales'].sum().reset_index()
print(s_group)
labels = df['Category'].unique()
plt.figure(figsize = (6, 6))
plt.pie(s_group['Sales'], autopct='%1.1f%%',labels = labels, explode = (0.03,0.03,0.03))
plt.show()
# -
# ## Creating Subgroups of Profit
# profit - whose profits are greater than 0
# neutral - whose profits are equal to 0
# loss - whose profits are less than 0
profit = df[df['Profit'] > 0]
neutral = df[df['Profit'] == 0]
loss = df[df['Profit'] < 0]
# ## Profit with respect to Category
# +
p_group = profit.groupby('Category')['Profit'].sum().reset_index()
print(p_group)
labels = df['Category'].unique()
plt.figure(figsize = (6, 6))
plt.pie(p_group['Profit'],autopct='%1.1f%%',labels = labels, explode = (0.03,0.03,0.03))
plt.title('Profit with respect to Category', fontweight = 'bold', size = 15)
plt.show()
# -
#
#
#
#
# ## Profit with respect to Region
# +
rp_group = profit.groupby('Region')['Profit'].sum().reset_index()
print(rp_group)
labels = df['Region'].unique()
plt.figure(figsize = (6, 6))
plt.pie(rp_group['Profit'],autopct='%1.1f%%',labels = labels, explode = (0.03,0.03,0.03, 0.03))
plt.show()
# -
# ## Profit with respect to Segment
# +
sp_group = profit.groupby('Segment')['Profit'].sum().reset_index()
print(sp_group)
labels = df['Segment'].unique()
plt.figure(figsize = (6, 6))
plt.pie(sp_group['Profit'],autopct='%1.1f%%',labels = labels, explode = (0.03,0.03, 0.03))
plt.show()
# -
# ## Profit with respect to Discount
# +
disc = profit[profit['Discount'] > 0]
w_disc = profit[profit['Discount'] == 0]
labels = ['Discount', 'Without Discount']
size = [disc['Profit'].mean(), w_disc['Profit'].mean()]
plt.figure(figsize = (6, 6))
plt.pie(size, autopct='%1.1f%%', labels = labels, explode = (0.03, 0.03))
plt.show()
# -
# ## Profit with respect to Sub-Category
# +
sscb_group = profit.groupby('Sub-Category')['Profit'].sum().reset_index().sort_values(by = 'Profit', ascending=False)
plt.figure(figsize = (20, 8))
pcat = sb.barplot('Sub-Category', 'Profit', data = sscb_group)
for p in pcat.patches:
pcat.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points')
plt.show()
# -
# ## Cities with greater Profit
# +
sscbct_group = profit.groupby('City')['Profit'].sum().reset_index().sort_values(by = 'Profit', ascending=False)
sscbct_group = sscbct_group.head(5)
plt.figure(figsize = (9, 6))
pcatc = sb.barplot('City', 'Profit', data = sscbct_group)
for p in pcatc.patches:
pcatc.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points')
plt.show()
# -
# ## Loss with respect to Category
# +
lc_group = loss.groupby('Category')['Profit'].sum().reset_index()
lc_group['Profit'] = lc_group['Profit'].apply(lambda x: x*-1)
print(lc_group)
labels = df['Category'].unique()
plt.figure(figsize = (6, 6))
plt.pie(lc_group['Profit'], autopct='%1.1f%%',labels = labels, explode = (0.03,0.03,0.03))
plt.show()
# -
# ## Loss with respect to Region
lr_group = loss.groupby('Region')['Profit'].sum().reset_index()
lr_group['Profit'] = lr_group['Profit'].apply(lambda x: x*-1)
print(lr_group)
labels = df['Region'].unique()
plt.figure(figsize = (6, 6))
plt.pie(lr_group['Profit'], autopct='%1.1f%%',labels = labels, explode = (0.03,0.03,0.03, 0.03))
plt.show()
# ## Loss with respect to Segment
# +
lsc_group = loss.groupby('Segment')['Profit'].sum().reset_index()
lsc_group['Profit'] = lsc_group['Profit'].apply(lambda x: x*-1)
print(lsc_group)
labels = df['Segment'].unique()
plt.figure(figsize = (6, 6))
plt.pie(lsc_group['Profit'], autopct='%1.1f%%',labels = labels, explode = (0.03, 0.03, 0.03))
plt.show()
# -
# ## 10 Cities with greater loss
# +
sscbctl_group = loss.groupby('City')['Profit'].sum().reset_index().sort_values(by = 'Profit', ascending=False)
sscbctl_group['Profit'] = sscbctl_group['Profit'].apply(lambda x: x*-1)
sscbctl_group = sscbctl_group.head(10)
plt.figure(figsize = (12, 6))
pcatcl = sb.barplot('City', 'Profit', data = sscbctl_group)
for p in pcatcl.patches:
pcatcl.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points')
plt.show()
# -
# ## Conclusion
#
#
# ### Dataset Quality:
# The dataset was quite good and well organized. There is enough observation that helped us to analyze the business conditions and perform Exploratory Data Analysis and there was no missing values also.
#
#
# #### Statistical Informations:
# #### Total Sales: 2297200.86
# #### Total Profit: 442528.31
# #### Total Loss: 156131.29
#
#
#
#
# #### Customer Behaviour:
#
# Most of the customer choose Standard Class over the other three shipping modes.
# We got a large amount of customers from the West and East region.
# Customers prefer Office Supplies Products from us most.
# Binders, Paper, Furnishings, Phones and Accessories are the top wanted items from our shop.
#
# #### Profit:
#
# We got highest sales and profit (41.6%) from Technology. Interestingly, we got highest profit by selling less number of items from Technology.
# As our customers are mostly from the West side, we got most of our profit from there.
# Our profit got decreased, when we offer any discount on a product. We have got 64.6% of our profit from the products without discount and only 36.4% of our profit from the products with discount
# Top 10 products which brought most of the profits: Binders, Copiers, Phones, Accessories, Chairs, Paper, Machines, Storage, Appliances, Furnishings
# Top 5 Cities with greater profit: New York City, Los Angeles, Seattle, San Francisco, Detroit
#
# #### Places need to be improved:
#
# Customers are preferring the Standard Class more. We should upgrade the other shipping modes and make them more customer-friendly.
# Furniture category brings most of the losses. We will have to look into it and solve the issues.
# We got less customer from the South with a higher loss value. In the Southern pert of the country, we will have to be more customer-friendly and come to know there needs.
# As Discount couldn't improve profit scale, we should be more careful regarding our Discount policy.
# Top 10 Cities with greater loss: Coppell, Homestead Elyria, Pensacola, Loveland, North Miami, Ormond Beach, Port Saint Lucie, Thomasville, Des Moines. We will have to look into these cities and find out the reasons of losses.
|
5. To Explore Business Analytics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="azdRK1MoR87I"
# !pip install transformers[sentencepiece] datasets
# + colab={"base_uri": "https://localhost:8080/", "height": 359, "referenced_widgets": ["0f82ca2682fb4db3ba350fae8a228494", "0398799725d3438b8ec0d0539e2fb61a", "6e98994eaeec4ade891aae120ed4f100", "ece4f4c698b04afea5058e183a7e0d05", "<KEY>", "43d244c2fda64b91a1ce0ffdca5421cf", "eacc44033a06429aa5cc8c54d190d943", "1fe1a293a45942a192fd410fc2319d25", "<KEY>", "915b948bb17440ddb54eef8b6d8de713", "952095683b9547bea8049fe837610185", "34bcf3a88bac420baf3e5b93407e8fb3", "b8ff3cea496d4addb4b0ea39c9296815", "d60f11d863074f899907f68c8d5771ca", "3a427996bf4b4ab39029c4a9b266d0b0", "724909ddd8da400ca235a974b17c92f7", "2c0d9f366d9a4e4b99767c6329e55cf0", "1dee34ca4fa741b8a7c31325118dc896", "77312b04e3af48fba29ade7b330c5f7d", "7a1ea104cfe74244a470d77b79355a2f", "2fab8f941ffd4bc1925ed9c977132be2", "58ba42ae99f44883a8446375371ab042"]} id="aedQ2BojSKd0" executionInfo={"status": "ok", "timestamp": 1648647862773, "user_tz": -420, "elapsed": 6306, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="97d4296c-0d7f-4e93-90c2-d37332fced82"
from datasets import load_dataset
spanish_dataset = load_dataset("amazon_reviews_multi", "es")
english_dataset = load_dataset("amazon_reviews_multi", "en")
english_dataset
# + colab={"base_uri": "https://localhost:8080/"} id="UYiAPwTOSX2p" executionInfo={"status": "ok", "timestamp": 1648618007433, "user_tz": -420, "elapsed": 18, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="7f377423-54c4-4f8a-d64c-e7fb9dd54f9d"
spanish_dataset
# + colab={"base_uri": "https://localhost:8080/"} id="C5Lpxd0USdv5" executionInfo={"status": "ok", "timestamp": 1648618007433, "user_tz": -420, "elapsed": 6, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="276ec46a-f7eb-4425-9690-142478247e62"
def show_samples(dataset, num_samples=3, seed=42):
sample = dataset["train"].shuffle(seed=seed).select(range(num_samples))
for example in sample:
print(f"\n'>> Title: {example['review_title']}'")
print(f"'>> Review: {example['review_body']}'")
show_samples(english_dataset)
# + colab={"base_uri": "https://localhost:8080/"} id="1qiY0n1zSpXJ" executionInfo={"status": "ok", "timestamp": 1648618039468, "user_tz": -420, "elapsed": 506, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="d5176ac7-4754-4ce6-e3e3-34f04f883d53"
english_dataset.set_format("pandas")
english_df = english_dataset["train"][:]
# Show counts for top 20 products
english_df["product_category"].value_counts()[:20]
# + id="OXq6JCkZSynK" executionInfo={"status": "ok", "timestamp": 1648647873665, "user_tz": -420, "elapsed": 330, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
def filter_books(example):
return (
example["product_category"] == "book"
or example["product_category"] == "digital_ebook_purchase"
)
# + id="aLIKApUSAbuF"
english_dataset.reset_format()
# + colab={"base_uri": "https://localhost:8080/"} id="2ltIPSSoAfej" executionInfo={"status": "ok", "timestamp": 1648647874600, "user_tz": -420, "elapsed": 5, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="3a775361-5dec-4cc4-99d2-4a6c3502574e"
spanish_books = spanish_dataset.filter(filter_books)
english_books = english_dataset.filter(filter_books)
# + colab={"base_uri": "https://localhost:8080/", "height": 385, "referenced_widgets": ["bf5ec4a6ab2d43f1982d24a5818babe1", "a1118b162900440c9de04d691d8de1d1", "7e32f56f48da48b39cb3f86a31b6be68", "d3d554dec13246dab5f87e850458cce3", "d6d2b0a5db144d298bea32d93b69b9ff", "<KEY>", "93393f666edb45ba88941ca77ee5b6d1", "<KEY>", "<KEY>", "ef737940ede3416a92d6aafbd531d5fb", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "08ee72db6ecc4f91967f013aadd6346c", "<KEY>", "<KEY>", "9a8fa9ff4619481e98652c20e029adb0", "7d26ff6ae40745d7b97650adfb9e525f", "ee182b91212d48758ab1db4626a890d8", "2776739438eb4d86ac4536b04b6146bb", "<KEY>", "de42a0b90c514e6da248a2edfbdf9f28", "cdff419ed1794a28b9d7e363a2c6d206", "<KEY>", "<KEY>", "<KEY>", "42078d6993d34e27aea019f338c9aa2f", "bca4401dad874bd6a6fabe713a24fc81", "318ab3554d674eb88ce70626045d2ea7", "<KEY>", "<KEY>", "5d168ae9ab244c9e86e12401b5a8ad92", "<KEY>", "45fadd9e83a1417cb44c2437c295401e", "<KEY>", "<KEY>", "17f76ca1bca64723a5c397d9e660923d", "fbf1e9f7ce3d436a8ce0b40183f85343", "<KEY>", "35ef290d39954af4b5ee6409e129e53e", "<KEY>", "7b901339f66b41ba8ddae6e99ebe62c4", "7de37263676c4493b3ba2f96a6ba2f47", "<KEY>", "5dca4b1ee3f1495fb5c108a11821bef4", "08e6d3ad896d4f4d82e4ece61b311caf", "<KEY>", "<KEY>", "4fc9a6666fa245de8e481c57a93fe0e8", "<KEY>", "<KEY>", "<KEY>", "f85ea42712d7462387fdcfef23a58e72", "243beb9ab19745aabf4632d02c1fb425", "b93d3e7e55a843af903a0de71be9f6da", "a7ab811fd4cd46f79223732eed42d986", "76f4962ec1fb476b8de36863593f5d68", "c921a3e399a34764abac50eb1a107485", "38a94d4351874b27b89d28630bb9a8ce", "6428b12ccf544f9a9b9b58032e4a4330", "55e3b59016214fefba34811787de1319", "f07f38656a884cbabb1115b81ad2af67", "b35707a1c64249be9d4481d1fadbe196", "d55701f61d604f658d78392800701493", "0585332ff07d4b46910ca4272db93641"]} id="8TSderrES1GV" executionInfo={"status": "ok", "timestamp": 1648618083910, "user_tz": -420, "elapsed": 8026, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="17441aba-14a4-4312-a267-d9d4508d40a6"
show_samples(english_books)
# + id="8JDt1L39An5B"
from datasets import concatenate_datasets, DatasetDict
books_dataset = DatasetDict()
for split in english_books.keys():
books_dataset[split] = concatenate_datasets(
[english_books[split], spanish_books[split]]
)
books_dataset[split] = books_dataset[split].shuffle(seed=42)
# + colab={"base_uri": "https://localhost:8080/"} id="IE-tB2Z2TATR" executionInfo={"status": "ok", "timestamp": 1648618134398, "user_tz": -420, "elapsed": 307, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="f29a1bff-b28f-4b4b-b495-2d372937f162"
# Peek at a few examples
show_samples(books_dataset)
# + id="bou1J6LWTHxs"
books_dataset = books_dataset.filter(lambda x: len(x["review_title"].split()) > 2)
# + id="1XOScfJCTSJf" executionInfo={"status": "ok", "timestamp": 1648647899253, "user_tz": -420, "elapsed": 3478, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
from transformers import AutoTokenizer
model_checkpoint = "google/mt5-small"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# + colab={"base_uri": "https://localhost:8080/"} id="ORXw0-GCUCUp" executionInfo={"status": "ok", "timestamp": 1648618384733, "user_tz": -420, "elapsed": 379, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="0bd46eea-c4de-4ad5-d9c4-2201e7d87bd6"
inputs = tokenizer("I loved reading the Hunger Games!")
inputs
# + colab={"base_uri": "https://localhost:8080/"} id="5T9TmbLnUHtY" executionInfo={"status": "ok", "timestamp": 1648618406228, "user_tz": -420, "elapsed": 297, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="34f4fe03-f421-4008-fd38-068bb17ccaca"
tokenizer.convert_ids_to_tokens(inputs.input_ids)
# + id="kAI0YLyRUXLM" executionInfo={"status": "ok", "timestamp": 1648647899254, "user_tz": -420, "elapsed": 6, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
max_input_length = 512
max_target_length = 30
def preprocess_function(examples):
model_inputs = tokenizer(
examples["review_body"], max_length=max_input_length, truncation=True
)
# Set up the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
examples["review_title"], max_length=max_target_length, truncation=True
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
# + colab={"base_uri": "https://localhost:8080/", "height": 104, "referenced_widgets": ["3898b63914024c45bd56cac18479956e", "<KEY>", "83289e9012ef412ab0d17f21820280b4", "47a928e9c0cd46ec9988fe16c52ef2a7", "<KEY>", "bd70e1766aca4e37a5437566556f9b8e", "3db593c1cc054a95ba53712c3aa56ea7", "<KEY>", "34feec3290444328adc0fe8f4f247335", "04834fd778db443e8dd8328319681ae5", "6fbd724e6b664961b59e2e61266efeb0"]} id="ohQyiNSyWd86" executionInfo={"status": "ok", "timestamp": 1648647903267, "user_tz": -420, "elapsed": 874, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="229e467c-7e16-41bd-9f9b-89446994b28d"
tokenized_datasets = books_dataset.map(
preprocess_function,
batched=True,
remove_columns=books_dataset["train"].column_names
)
# + colab={"base_uri": "https://localhost:8080/"} id="kuzpAc8uXWVE" executionInfo={"status": "ok", "timestamp": 1648619381333, "user_tz": -420, "elapsed": 484, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="32403cb5-daa6-4501-d0db-8937b0086ce5"
tokenized_datasets
# + id="I0xcZ2R_Wg9O"
generated_summary = "I absolutely loved reading the Hunger Games"
reference_summary = "I loved reading the Hunger Games"
# + id="24XT4P4YWlLR"
# !pip install rouge_score
# + id="zFZy9YxqWngY" executionInfo={"status": "ok", "timestamp": 1648647918124, "user_tz": -420, "elapsed": 1746, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
from datasets import load_metric
rouge_score = load_metric("rouge")
# + colab={"base_uri": "https://localhost:8080/"} id="CN0uSFq3WpxH" executionInfo={"status": "ok", "timestamp": 1648619070322, "user_tz": -420, "elapsed": 471, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="cc8195c7-e0d7-4f97-ea1c-77681a0e6d3f"
scores = rouge_score.compute(
predictions=[generated_summary], references=[reference_summary]
)
scores
# + colab={"base_uri": "https://localhost:8080/"} id="aRYWA41sWyUS" executionInfo={"status": "ok", "timestamp": 1648647926821, "user_tz": -420, "elapsed": 389, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="b8195ea8-27f9-4e4f-c9d0-48ea8ceb8cde"
import nltk
nltk.download("punkt")
# + colab={"base_uri": "https://localhost:8080/"} id="ZgjqdtjoW2E2" executionInfo={"status": "ok", "timestamp": 1648647930183, "user_tz": -420, "elapsed": 652, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="320f30a7-7bec-4842-fafb-a68335ccdff5"
from nltk.tokenize import sent_tokenize
def three_sentence_summary(text):
return "\n".join(sent_tokenize(text)[:3])
print(three_sentence_summary(books_dataset["train"][1]["review_body"]))
# + id="Rqku_cUbW5un"
def evaluate_baseline(dataset, metric):
summaries = [three_sentence_summary(text) for text in dataset["review_body"]]
return metric.compute(predictions=summaries, references=dataset["review_title"])
# + colab={"base_uri": "https://localhost:8080/"} id="Q8OcDX7aW7Hw" executionInfo={"status": "ok", "timestamp": 1648619141266, "user_tz": -420, "elapsed": 829, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="8d7eae33-2f82-4344-bcee-d0dcee22e2b6"
import pandas as pd
score = evaluate_baseline(books_dataset["validation"], rouge_score)
rouge_names = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
rouge_dict = dict((rn, round(score[rn].mid.fmeasure * 100, 2)) for rn in rouge_names)
rouge_dict
# + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["37e39f1f90e34522a14469d708a33280", "cb6e037b3ade4e59a7d270ced91c24a2", "18c51caeed6842d9a00c346baa2d88cd", "7b3ae5ab1559403ea8a0e007435e940e", "8560339351c1452384e7abfe08b9b29c", "e7c878fde81d42fb8edf06b7ca571738", "841aa816cec046fbab58b4ecf92f9803", "4d6be3dde3ff4b8587addce21fd69d72", "8f3106929f6041248a0ebe281098d117", "2c67a558117b42298b5226768c8ab19d", "dc14d513d92b4a5e8c5f07aca53238ac"]} id="R8wLvNHgXJlA" executionInfo={"status": "ok", "timestamp": 1648648002567, "user_tz": -420, "elapsed": 64421, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="483ba922-904a-43a6-8933-9937f1a24e61"
from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
# + id="oPKvFZb3XNfO" executionInfo={"status": "ok", "timestamp": 1648648009099, "user_tz": -420, "elapsed": 6537, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
from transformers import DataCollatorForSeq2Seq
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
# + id="O5-5sAo-X6yF" executionInfo={"status": "ok", "timestamp": 1648648081071, "user_tz": -420, "elapsed": 351, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
from transformers import Seq2SeqTrainingArguments
train_batch_size = 8
eval_batch_size = 32
num_train_epochs = 5
# Show the training loss with every epoch
logging_steps = len(tokenized_datasets["train"]) // train_batch_size
args = Seq2SeqTrainingArguments(
output_dir="summarization_en_es",
evaluation_strategy="epoch",
learning_rate=5.6e-5,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=eval_batch_size,
weight_decay=0.01,
save_total_limit=2,
num_train_epochs=num_train_epochs,
predict_with_generate=True,
logging_steps=logging_steps,
push_to_hub=True,
)
# + id="yWJf25v-FVPQ" executionInfo={"status": "ok", "timestamp": 1648648089505, "user_tz": -420, "elapsed": 286, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
# Decode generated summaries into text
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
# Decode reference summaries into text
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# ROUGE expects a newline after each sentence
decoded_preds = ["\n".join(sent_tokenize(pred.strip())) for pred in decoded_preds]
decoded_labels = ["\n".join(sent_tokenize(label.strip())) for label in decoded_labels]
# Compute ROUGE scores
result = rouge_score.compute(
predictions=decoded_preds, references=decoded_labels, use_stemmer=True
)
# Extract the median scores
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
return {k: round(v, 4) for k, v in result.items()}
# + id="pi6nnZMpFbdn" executionInfo={"status": "ok", "timestamp": 1648648123539, "user_tz": -420, "elapsed": 4998, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
from transformers import Seq2SeqTrainer
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="8WERHxxDFeI2" executionInfo={"status": "ok", "timestamp": 1648651519175, "user_tz": -420, "elapsed": 3357331, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="f7b6f084-8c92-4fa7-9b86-a83d6e8df4d7"
trainer.train()
# + colab={"base_uri": "https://localhost:8080/", "height": 245} id="ctqPqqDaFlJO" executionInfo={"status": "ok", "timestamp": 1648651537210, "user_tz": -420, "elapsed": 10304, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="6a0003d7-aa21-4f42-ce06-30d36df7790a"
trainer.evaluate()
# + id="z2UV2OO7Zs2u"
from transformers import pipeline
model_checkpoint = "summarization_en_es"
summarizer = pipeline("summarization", model=model_checkpoint)
# + id="icEUYWFUZumW" executionInfo={"status": "ok", "timestamp": 1648651784746, "user_tz": -420, "elapsed": 264, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}}
def print_summary(idx):
review = books_dataset["test"][idx]["review_body"]
title = books_dataset["test"][idx]["review_title"]
summary = summarizer(books_dataset["test"][idx]["review_body"])[0]["summary_text"]
print(f"'>>> Review: {review}'")
print(f"\n'>>> Title: {title}'")
print(f"\n'>>> Summary: {summary}'")
# + id="DPN8PE12Zwif" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1648651786865, "user_tz": -420, "elapsed": 819, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="eb018b15-8040-4c7b-cd51-d1f0e0b9b2a8"
print_summary(100)
# + id="Aa3pEeLjZzkG" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1648651790703, "user_tz": -420, "elapsed": 1010, "user": {"displayName": "<NAME>", "userId": "00567169550630174790"}} outputId="c0823b72-0ee3-43c6-ca80-e9777bbdbe30"
print_summary(0)
|
huggingface_course/summarization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The surjection operad
#
# In this notebook we describe the $E_\infty$-operad $\mathcal X$ known as the surjection operad. It is equipped with a filtration by $E_n$-operads which we also describe. It was introduced in [[McS]](#mcs) and also studied by [[BF]](#bf) with a different sign conventions. We implement both conventions but describe the constructions only up to signs. We will assume familiarity with chain complexes and operads.
#
# **Contents**
#
# 1. [Symmetric module structure](#symmetric_module)
# 2. [Operadic composition](#operadic_composition)
# 3. [Complexity filtration](#complexity_filtration)
# 5. [References](#references)
#
# ## Symmetric module structure <a name="symmetric_module"></a>
#
# For a positive integer $r$ let $\mathcal X(r)_d$ be the free $R$-module generated by all functions from $\{1, \dots, d+r\}$ to $\{1, \dots, r\}$ modulo the $R$-submodule generated by degenerate functions, i.e., those which are either non-surjective or have a pair of equal consecutive values. We represent such maps as sequences of their values, e.g. $\big( s(1), \dots, s(n+r) \big)$. Up to signs, the boundary of a basis element $s$ in this complex is given
#
# \begin{equation*}
# \partial s = \sum_{i = 1}^{r+d} \pm s \circ \delta_i = \sum_{i = 1}^{r+d} \pm \big( s(1), \dots, \widehat{s(i)}, \dots, s(n+r) \big).
# \end{equation*}
#
# There is a left action of $\mathrm S_r$ on $\mathcal X(r)$ which is up to signs defined on basis elements by $\pi s = \pi \circ s$.
# where $\delta_i \colon \{1, \dots, r+d-1\} \to \{1, \dots, r+d\}$ is the inclusion that misses $i$.
#
# We introduce a subclass of `FreeModuleElement` modeling surjection operad elements. Instances of `SurjectionElement` inherit addition, subtraction, and left multiplication by integers. Furthermore, it implements a `boundary` method, and the left action by `SymmetricRingElement` objects. The choice of sign convention is determined by the attribute `convention`.
# + pycharm={"name": "#%%\n"}
from comch import SurjectionElement, SymmetricRingElement
key = (2,1,2,1,3)
x = SurjectionElement({key:1}, convention='Berger-Fresse')
y = SurjectionElement({key:1}, convention='McClure-Smith')
rho = SymmetricRingElement({(3,2,1):1})
print(f'rho = {rho}')
print(f'Berger-Fresse | McClure-Smith')
print(f'x = {x} | y = {y}')
print(f'rho * x = {rho * x} | rho * y = {rho * y}')
print(f'dx = {x.boundary()} | dy = {y.boundary()}')
# -
# ## Operadic composition <a name="operadic_composition"></a>
#
# \begin{equation*}
# {\circ}_{i}: X(r) \times X(s) \to X(r + s - 1).
# \end{equation*}
#
# can be described
#
# TBW
# + pycharm={"name": "#%%\n"}
x = SurjectionElement({(1, 2, 1, 3): 1})
y = SurjectionElement({(1, 2, 1): 1})
print(f'x = {x}\ny = {y}\nx o_2 y = {x.compose(y, 1)}')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Complexity filtration <a name="complexity_filtration"></a>
#
# The complexity of a finite binary sequence (i.e. a sequence of two distinct values) is defined as the number of consecutive distinct elements in it. For example, (1,2,2,1) and (1,1,1,2) have complexities 2 and 1 respectively. The **complexity** of a basis surjection element is defined as the maximum value of the complexities of its binary subsequences. Notice that for arity 2, the complexity of an element agrees with its degree. It is proven in [McCS] that the subcomplex generated by basis surjection elements of complexity at most $n$ define a suboperad of $\mathcal X$ modeling an $E_{n+1}$-operad.
#
# The class `SurjectionElement` implements an attribute `complexity` modeling this concept.
# + pycharm={"name": "#%%\n"}
x = SurjectionElement({(1, 2, 1): 1})
y = SurjectionElement({(1, 2, 1, 3, 1): 1})
print(f'The complexity of {x} is {x.complexity}')
print(f'The complexity of {y} is {y.complexity}')
# + [markdown] pycharm={"name": "#%% md\n"}
# There is a preferred element in the symmetric orbit of a basis surjection element, it is the one satisfying that the first occurrences of each integer appear in increasing order.
#
# The class `SurjectionElement` implements the method `obit` that returns the preferred representative of the orbit containing *self*. This orbit can be computed with respect to the trivial or sign representation of $\Sigma_r$ on $R$, and also depends on the sign convention of the surjection element.
# + pycharm={"name": "#%%\n"}
key = (3,2,1,3,1)
x = SurjectionElement({key: 1}, convention='Berger-Fresse')
y = SurjectionElement({key: 1}, convention='McClure-Smith')
print(f' Berger-Fresse | McClure-Smith')
print(f'Trivial: {x.orbit("trivial")} | {y.orbit("trivial")}')
print(f'sign : {x.orbit("sign")} | {y.orbit("sign")}')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## References <a name="references"></a>
#
# [McCS]<a name="McCS"></a> McClure, James, and <NAME>. "Multivariable cochain operations and little 𝑛-cubes." Journal of the American Mathematical Society 16.3 (2003): 681-704.
#
# [BF]<a name="BF"></a> <NAME>, and <NAME>. "Combinatorial operad actions on cochains." Mathematical Proceedings of the Cambridge Philosophical Society. Vol. 137. No. 1. Cambridge University Press, 2004.
|
notebooks/surjection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ##### 1. Consider the tuple tuple1=("A","B","C" ), what is the result of the following operation tuple1[-1]?
tuple1=("A","B","C" )
tuple1[-1]
# ###### Ans: 'C'
# ##### 2. Consider the tuple A=((1,),[2,3],[4]), that contains a tuple and list. What is the result of the following operation A[2]:
A=((1,),[2,3],[4])
A[2]
# ###### Ans: [4]
# ##### 3. Consider the tuple A=((11,12),[21,22]), that contains a tuple and list. What is the result of the following operation A[0][1]:
A=((11,12),[21,22])
A[0][1]
# ###### Ans: 12
# ##### 4. What is the result of the following operation: '1,2,3,4'.split(',')
'1,2,3,4'.split(',')
# ###### Ans: ['1', '2', '3', '4']
# ##### 5. The method append does the following:
[1].append([2, 3, 4, 5])
# ###### Ans: adds one element to a list
# ##### 6. What is an important difference between lists and tuples?
# ###### Ans: Lists are mutable tuples are not
# ##### 7. consider the following list : A=["hard rock",10,1.2]. what will list A contain affter the following command is run: del(A[0])
A=["hard rock",10,1.2]
del(A[0])
print(A)
# ###### Ans: [10, 1.2]
# ##### 8. what is the syntax to clone the list A and assign the result to list B
# ###### Ans: B=A[:]
# ##### 9. what is the result of the following: len((“disco”,10))
len((“disco”,10))
len(("disco",10))
# ###### Ans: 2
|
Coursera/Python for Data Science-IBM/Quiz/Week-2/Lists-and-Tuples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Serialization - saving, loading and checkpointing
#
# At this point we've already covered quite a lot of ground.
# We know how to manipulate data and labels.
# We know how to construct flexible models capable of expressing plausible hypotheses.
# We know how to fit those models to our dataset.
# We know of loss functions to use for classification and for regression,
# and we know how to minimize those losses with respect to our models' parameters.
# We even know how to write our own neural network layers in ``gluon``.
#
# But even with all this knowledge, we're not ready to build a real machine learning system.
# That's because we haven't yet covered how to save and load models.
# In reality, we often train a model on one device
# and then want to run it to make predictions on many devices simultaneously.
# In order for our models to persist beyond the execution of a single Python script,
# we need mechanisms to save and load NDArrays, ``gluon`` Parameters, and models themselves.
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd
from mxnet import gluon
ctx = mx.cpu()
# ctx = mx.gpu()
# ## Saving and loading NDArrays
#
# To start, let's show how you can save and load a list of NDArrays for future use. Note that while it's possible to use a general Python serialization package like ``Pickle``, it's not optimized for use with NDArrays and will be unnecessarily slow. We prefer to use ``ndarray.save`` and ``ndarray.load``.
# +
X = nd.ones((100, 100))
Y = nd.zeros((100, 100))
import os
dir_name = 'checkpoints'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
filename = os.path.join(dir_name, "test1.params")
nd.save(filename, [X, Y])
# -
# It's just as easy to load a saved NDArray.
A, B = nd.load(filename)
print(A)
print(B)
# We can also save a dictionary where the keys are strings and the values are NDArrays.
mydict = {"X": X, "Y": Y}
filename = os.path.join(dir_name, "test2.params")
nd.save(filename, mydict)
C = nd.load(filename)
print(C)
# ## Saving and loading the parameters of ``gluon`` models
#
# Recall from [our first look at the plumbing behind ``gluon`` blocks](P03.5-C01-plumbing.ipynb])
# that ``gluon`` wraps the NDArrays corresponding to model parameters in ``Parameter`` objects.
# We'll often want to store and load an entire model's parameters without
# having to individually extract or load the NDarrays from the Parameters via ParameterDicts in each block.
#
# Fortunately, ``gluon`` blocks make our lives very easy by providing a ``.save_params()`` and ``.load_params()`` methods. To see them in work, let's just spin up a simple MLP.
num_hidden = 256
num_outputs = 1
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_outputs))
# Now, let's initialize the parameters by attaching an initializer and actually passing in a datapoint to induce shape inference.
net.collect_params().initialize(mx.init.Normal(sigma=1.), ctx=ctx)
net(nd.ones((1, 100), ctx=ctx))
# So this randomly initialized model maps a 100-dimensional vector of all ones to the number 362.53 (that's the number on my machine--your mileage may vary).
# Let's save the parameters, instantiate a new network, load them in and make sure that we get the same result.
filename = os.path.join(dir_name, "testnet.params")
net.save_params(filename)
net2 = gluon.nn.Sequential()
with net2.name_scope():
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_outputs))
net2.load_params(filename, ctx=ctx)
net2(nd.ones((1, 100), ctx=ctx))
# Great! Now we're ready to save our work.
# The practice of saving models is sometimes called *checkpointing*
# and it's especially important for a number of reasons.
# 1. We can preserve and syndicate models that are trained once.
# 2. Some models perform best (as determined on validation data) at some epoch in the middle of training. If we checkpoint the model after each epoch, we can later select the best epoch.
# 3. We might want to ask questions about our trained model that we didn't think of when we first wrote the scripts for our experiments. Having the parameters lying around allows us to examine our past work without having to train from scratch.
# 4. Sometimes people might want to run our models who don't know how to execute training themselves or can't access a suitable dataset for training. Checkpointing gives us a way to share our work with others.
# <!-- ## Serializing models themselves
#
# [PLACEHOLDER] -->
# ## Next
# [Convolutional neural networks from scratch](../chapter04_convolutional-neural-networks/cnn-scratch.ipynb)
# For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
|
chapter03_deep-neural-networks/serialization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### create figure for hierarchy example
# 1. get latent distances vs sequential distances
# 2. get MI of markov model
# 3. get MI of recursively generated sequence - store the average latent distance for each of the sequential distances
# 4. Plot latent distance by MI
# 5. plot sequential distance by MI
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from childes_mi.utils.paths import DATA_DIR, FIGURE_DIR, ensure_dir
from childes_mi.utils.general import flatten,save_fig
# #### Make an example tree
r = 3 # branching factor
h = 3 # height
btree = nx.balanced_tree(r=r,h=h)
n_final_nodes = r**h
list(btree.nodes)[:10]
list(btree.edges)[:10]
nx.shortest_path(btree, 0, 27)
pos=nx.drawing.nx_agraph.graphviz_layout(btree,prog='dot')
#pos[0] = (pos[0][0], pos[0][1]-32)
fig, ax = plt.subplots(figsize=(12,5))
nx.draw(btree,pos,with_labels=False,arrows=False, width = 3, ax=ax)
r = 2 # branching factor
h = 4 # height
btree = nx.balanced_tree(r=r,h=h)
n_final_nodes = r**h
pos=nx.drawing.nx_agraph.graphviz_layout(btree,prog='dot')
fig, ax = plt.subplots(figsize=(12,5))
nx.draw(btree,pos,with_labels=False,arrows=False, width = 3, ax=ax)
# #### get distances between nodes
import pandas as pd
from tqdm.autonotebook import tqdm
from joblib import parallel_backend
from joblib import Parallel, delayed
loc_df = pd.DataFrame(pos).T
loc_df.columns = ['x', 'y']
loc_df[:3]
r = 2 # branching factor
h = 10 # height
btree = nx.balanced_tree(r=r,h=h)
n_final_nodes = r**h
len(btree.nodes)
lowest_level_nodes = list(list(btree.nodes)[-n_final_nodes:])
lowest_level_nodes
len(lowest_level_nodes)
l = [
[(node1, node2, n1 - n2) for n2, node2 in enumerate(lowest_level_nodes) if n2 < n1]
for n1, node1 in enumerate(lowest_level_nodes)
]
pair_list = [item for sublist in l for item in sublist]
latent_vs_sequential_dist = pd.DataFrame(
pair_list, columns=["node1", "node2", "sequential_distance"]
)
len(pair_list)
with Parallel(n_jobs=1, prefer="threads") as parallel:
latent_distances = [
parallel(delayed(nx.shortest_path_length)(btree, row.node1, row.node2)
for idx, row in tqdm(
latent_vs_sequential_dist.iterrows(), total=len(latent_vs_sequential_dist)
)
)
]
latent_vs_sequential_dist['latent_distances'] = latent_distances[0]
row = latent_vs_sequential_dist.iloc[0]
nx.shortest_path_length(btree, row.node1, row.node2)
latent_vs_sequential_dist[:3]
# summarize distances
dist_summary = latent_vs_sequential_dist.groupby(['sequential_distance']).agg(['mean', 'std'])
dist_summary[:3]
# +
# fit logarithmic growth model
# +
fig, ax = plt.subplots(figsize=(5,5))
ax.scatter(dist_summary.index.values, dist_summary.latent_distances['mean'], color = 'k')
ax.set_xlim([0, 50])
ax.set_ylim([0, 16])
ax.set_xlabel('Sequential distance', fontsize=18)
ax.set_ylabel('Latent distance', fontsize=18)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(3)
ax.spines[axis].set_color('k')
ax.grid(False)
ax.tick_params(which='both', direction='in', labelsize=14, pad=10)
ax.tick_params(which='major', length=10, width =3)
ax.tick_params(which='minor', length=5, width =2)
# +
import lmfit
def residuals(y_true, y_model, x, logscaled=False):
if logscaled:
return np.abs(np.log(y_true) - np.log(y_model)) * (1 / (np.log(1 + x)))
else:
return np.abs(y_true - y_model)
def model_res(p, x, y, fit, model):
if fit == "lin":
return residuals(y, model(p, x), x)
else:
return residuals(y, model(p, x), x, logscaled=True)
# fitting model
def fit_model_iter(model, n_iter=10, **kwargs):
""" re-fit model n_iter times and choose the best fit
chooses method based upon best-fit
"""
models = []
AICs = []
for iter in np.arange(n_iter):
results_model = model.minimize(**kwargs)
models.append(results_model)
AICs.append(results_model.aic)
return models[np.argmin(AICs)]
def get_y(model, results, x):
return model({i: results.params[i].value for i in results.params}, x)
def exp_decay(p, x):
return p["e_init"] * np.exp(-x * p["e_decay_const"]) + p["intercept"]
def log_growth(p, x):
return p['a'] * np.log(p['b'] * x) + p['intercept']
# decay types
def powerlaw_decay(p, x):
return p["p_init"] * x ** (p["p_decay_const"]) + p["intercept"]
p_log_growth = lmfit.Parameters()
p_log_growth.add_many(
("a", 2.5, True),
("b", 1, True),
("intercept", 1.0, True),
)
# -
# #### Fit log growth
fit='linear'
n_iter=1
method=["nelder", "leastsq", "least-squares"]
d = dist_summary.index.values[:50]
sig = dist_summary.latent_distances['mean'][:50]
plt.plot(d, sig)
results_lg_min = lmfit.Minimizer(
model_res, p_log_growth, fcn_args=(d, sig, fit, log_growth), nan_policy="omit"
)
results_lg = fit_model_iter(results_lg_min, n_iter=n_iter, **{"method": 'nelder'})
#y_lg = log_growth(p={'a':5,'b':0.5, 'intercept':1.0}, x=d)
d = np.arange(51)
y_lg = get_y(log_growth, results_lg, d)
print(results_lg.params)
FIGURE_DIR
# +
fig, ax = plt.subplots(figsize=(5,5))
ax.scatter(dist_summary.index.values, dist_summary.latent_distances['mean'], color = 'k')
ax.plot(d, y_lg, alpha = 0.5, color= 'k', lw=5)
ax.set_xlim([0, 50])
ax.set_ylim([0, 16])
ax.set_xlabel('Sequential distance', fontsize=18)
ax.set_ylabel('Latent distance', fontsize=18)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(3)
ax.spines[axis].set_color('k')
ax.grid(False)
ax.tick_params(which='both', direction='in', labelsize=14, pad=10)
ax.tick_params(which='major', length=10, width =3)
ax.tick_params(which='minor', length=5, width =2)
ensure_dir(FIGURE_DIR/'model_fig')
save_fig(FIGURE_DIR/ 'model_fig' / 'latent-sequential-distance')
# +
### look at latent growth
# -
fig, ax = plt.subplots(figsize=(5,5))
ax.scatter(dist_summary.index.values, dist_summary.latent_distances['mean'], s=10, color = 'k')
ax.set_xscale('log')
#ax.set_xlim([1,50])
#ax.set_ylim([0, 16])
dist_summary.to_pickle('latent_vs_sequential_distance.pickle')
dist_summary
|
notebooks/hierarchy-model/create_graph.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/renanalencar/projeto-md-emocoes/blob/main/Assign9_BaseEstresse.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="5pOn8CDHEtl1"
# # 1. Dados
# + id="Uvx59ETwExt9" colab={"base_uri": "https://localhost:8080/"} outputId="c7ebc6b5-e0ed-4c73-c9c5-85e66eda50c8"
from google.colab import drive
drive.mount('/content/drive')
# + id="imjqTSPSE0N8"
# importar os pacotes necessários
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# + id="tqzHS6G1E5Ii" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="30f54178-6608-46f6-accd-6e28f837be64"
arquivo_tedio = "/content/drive/Shareddrives/MINERAÇÃO DE DADOS ENG74291/Database/dadosEstresseB.xlsx"
df_tedio = pd.read_excel(arquivo_tedio)
# CSV apenas com as emoções. Pontos euclidianos foram removidos
# deixar no dataframe somente com as linhas correspondentes ao game_id 1
df_tedio = df_tedio.drop(columns=['game_name','p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p17', 'p18', 'p19', 'p20', 'p21', 'p22', 'p23','p24'])
filtro = df_tedio["game_id"] == 1
df_tedio = df_tedio[filtro]
# visualizar as 8 primeiras entradas do df
df_tedio.head(8)
# + id="_PgrEKH1E9DD" colab={"base_uri": "https://localhost:8080/"} outputId="12c441bb-decc-4121-c795-cf51c2ac5384"
# verificar o tamanho do df
print("Variáveis:\t {}".format(df_tedio.shape[1]))
print("Entradas:\t {}".format(df_tedio.shape[0]))
# + id="4a67XjVOE_Qr" colab={"base_uri": "https://localhost:8080/"} outputId="1311909b-a959-4591-ea35-01a23a6742a2"
# vizualisar os nomes das colunas
df_tedio.columns
# + id="deRAC6RfFBJU" colab={"base_uri": "https://localhost:8080/"} outputId="0c221fca-09b1-4a2d-cfaf-b813c847ca51"
# descobrir os tipos das variáveis
df_tedio.dtypes
# + [markdown] id="9UjhHXvXFGf0"
# ## 1.2 Criando classes para a base de dados
# + id="7X1hKMjtFHU1"
# função para classificar os dados como tédio (1) ou estresse (0)
def def_tedio(c):
if c['angry'] >= 0.1 and c['disgusted'] >= 0.1:
return 1
elif c['sad'] >= 0.1 and c['surprised'] >= 0.1:
return 1
else:
return 0
# + id="WySuYpemFKJ7" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="2fa721e8-1396-49e3-837b-6890d6cf9d27"
# criar a coluna 'target' para o df_tedio
df_tratado = pd.DataFrame(df_tedio)
df_tratado['target'] = df_tedio.apply(def_tedio, axis=1)
df_tratado.head(8)
# + id="XEOPbVPoFOFE" colab={"base_uri": "https://localhost:8080/"} outputId="c240c434-4f3c-460e-d938-4ba179723345"
# verificar o tipo de dado de cada coluna
df_tratado.dtypes
# + id="9YiHbL6uFRgR" colab={"base_uri": "https://localhost:8080/"} outputId="285c8709-de52-4eb5-f080-796add0f522c"
# verificar que colunas tem valores NaN (Not a Number)
df_tratado.isnull().any()
# + id="nvZK6hcDbLVb"
# Completando os registros que tem valor NaN com 0.0
df_tratado = df_tratado.fillna(0.0)
# + [markdown] id="1wmEViSFFYNp"
# ## 1.3 Conjunto de dados para treinamento e teste
# + id="qQwuUp-gFdmC"
# criar os conjuntos de dados e classes para treinamento e teste
from sklearn.model_selection import train_test_split
X_treinamento, X_teste, y_treinamento, y_teste = train_test_split(df_tratado.drop(columns=['target']), df_tratado.target, test_size=0.3)
# + [markdown] id="9GPD14CRFhNb"
# ## 1.4 Correlação
# Baseado em [Como selecionar as melhores features para seu modelo de Machine Learning](https://paulovasconcellos.com.br/como-selecionar-as-melhores-features-para-seu-modelo-de-machine-learning-2e9df83d062a)
# + colab={"base_uri": "https://localhost:8080/", "height": 445} id="wblj-4QlFlb9" outputId="ab825239-6ec4-4ac8-c41b-972539b93e32"
import seaborn as sns
plt.figure(figsize=(10, 7))
sns.heatmap(df_tratado.corr(),
annot = True,
fmt = '.2f',
cmap='Blues')
plt.title('Correlação entre variáveis do dataset de tédio')
plt.show()
# + [markdown] id="UCeDkz9mGLv-"
# O valor mostrado para cada correlação vai de -1 — que indica uma correlação negativa perfeita — a +1 — uma correlação positiva perfeita. Vale lembrar que a função .corr() traz, por padrão, a correlação de Pearson, mostrando um relacionamento linear entre as variáveis. Em casos onde há um relacionamento não-linear, a matriz pode não ser uma boa medida.
# + [markdown] id="cQLvtOuYGOgw"
# ## 1.5 Feature Importance
# O feature_importance_ retornar um array onde cada elemento dele é uma feature do seu modelo. Ele irá dizer, em proporções, quão importante aquela feature é para o modelo, onde quanto maior o valor, mais importante a feature é para o modelo.
# + colab={"base_uri": "https://localhost:8080/"} id="FziStAjzGRRe" outputId="2adf8130-ee3f-41e5-bf50-9b9a95636a79"
from sklearn.ensemble import RandomForestClassifier
clf_RFC = RandomForestClassifier()
clf_RFC.fit(X_treinamento, y_treinamento)
# Mostrando importância de cada feature
clf_RFC.feature_importances_
# + [markdown] id="QjOFxalYGUxc"
# É retornado um array com quatro elementos. Se você somar todos eles, verá que o resultado será 1. Ao analisar esse array, podemos ver que a feature mais importante para o algoritmo Floresta Randômica foi a variável 'surprised'.
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="3GDFAXthGYAN" outputId="daa0d100-1623-418c-fcbf-dfdfbbd11d56"
importances = pd.Series(data=clf_RFC.feature_importances_, index=df_tratado.columns[0:9])
sns.barplot(x=importances, y=importances.index, orient='h').set_title('Importância de cada feature')
# + [markdown] id="doxegZRCGbg-"
# As vezes, os valores mostrados pelo feature_importances_ pode ser enviesado dependendo dos parâmetros definidos na criação do objeto. Evitar usar os parâmetros default do Floresta Randômica.
# + [markdown] id="KOde8tskHpqs"
# # 2. Regressões
# + [markdown] id="KqITYIOVGpYv"
# ## 2.1 Regressão Linear Simples e Multipla
#
#
# * [Linear Regression Example](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html)
# * [Implementando Regressão Linear Simples em Python](https://medium.com/data-hackers/implementando-regress%C3%A3o-linear-simples-em-python-91df53b920a8)
# * [Regressão Linear](https://www.kaggle.com/marilivb/4-regress-o-linear)
# * [https://www.datageeks.com.br/regressao-linear/](https://www.datageeks.com.br/regressao-linear/)
# + id="kn4Qc8-XGwZ6"
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
# + id="jO8xmDLZGz3u"
# Criar um objeto de regressão linear
regr = linear_model.LinearRegression()
# Treinar o modelo usando os conjuntos de treinamento
regr.fit(X_treinamento, y_treinamento)
# Fazer predições usando o conjunto de teste
y_pred = regr.predict(X_teste)
# + colab={"base_uri": "https://localhost:8080/"} id="exvw2aRYG2UC" outputId="6b272f7c-2972-47e6-8e39-53912aefea8d"
# Os coeficientes encontrados
print('Coeficientes: \n', regr.coef_)
# O Erro Médio Quadrático (EMQ)
print('Erro Médio Quadrático (EMQ): %.2f'
% mean_squared_error(y_teste, y_pred))
# O coeficiente de determinação: 1 é a predição perfeita
print('Coeficiente de determinação: %.2f'
% r2_score(y_teste, y_pred))
# + [markdown] id="QK1eG-s3G5xm"
# ### 2.1.1 Diagrama de Dispersão
# + colab={"base_uri": "https://localhost:8080/"} id="fM-WBfwnG7e0" outputId="f206f488-1d79-4ebe-b9a5-450589bd1032"
# plotar saídas
plt.scatter(X_teste.iloc[:,0].values, y_teste, color='black')
plt.plot(X_teste, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# + [markdown] id="RV10sdS7HAxW"
# ## 2.2 Regressão Logística
#
#
# * [Logistic Regression using Python (scikit-learn)](https://towardsdatascience.com/logistic-regression-using-python-sklearn-numpy-mnist-handwriting-recognition-matplotlib-a6b31e2b166a)
# * [Regressão Logística e Métricas de Classificação em Python](http://neylsoncrepalde.github.io/2019-11-25-regressao_logistica_python/)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="oOakVAdEG_Sy" outputId="f7174841-de2a-4215-ce3e-665aa3f7aada"
from sklearn.linear_model import LogisticRegression
clf_LR = LogisticRegression(random_state=0).fit(X_treinamento, y_treinamento)
clf_LR.predict(X_teste)
# + colab={"base_uri": "https://localhost:8080/"} id="WKSvVatgHFbN" outputId="2a48bbc0-e6fc-4424-f8a1-28a8514a675b"
clf_LR.predict_proba(X_teste)
# + colab={"base_uri": "https://localhost:8080/"} id="naZMLGCAHG3q" outputId="815e4b08-acfc-4ea3-aab2-a18418d53a92"
clf_LR.score(X_treinamento, y_treinamento)
# + [markdown] id="1mOUK41FHM9G"
# ### 2.2.1 Diagrama de Dispersão
# + colab={"base_uri": "https://localhost:8080/", "height": 252} id="uuLRWhciHJlv" outputId="78ccba6f-c304-4e2a-f70d-e0118817e5e9"
# plotar saídas
plt.scatter(X_teste.iloc[:,0].values, y_teste, color='black')
plt.plot(X_teste, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# + [markdown] id="Xj4iZ82YJiyH"
# # 3. Detecção de anomalias
# Referências:
#
#
# * [Anomaly Detection Techniques in Python](https://medium.com/learningdatascience/anomaly-detection-techniques-in-python-50f650c75aaf)
# * [4 Automatic Outlier Detection Algorithms in Python](https://machinelearningmastery.com/model-based-outlier-detection-and-removal-in-python/)
# * [Learn how to develop highly accurate models to detect anomalies using Artificial Neural Networks with the Tensorflow library in Python3.](https://outline.com/D8jZMf)
#
# + [markdown] id="50muSTiipaKj"
# ## 3.1 Métodos Estatísticos
# + [markdown] id="Gnm44wf_pdkD"
# ### 3.1.1 Paramétricos: Diagrama de Caixa
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="8axwjIOCdoJC" outputId="8910e95d-1ad7-4aa8-9ebe-0e2ae3cf1c9a"
boxplot = df_tratado.boxplot(column=['angry', 'disgusted', 'fear', 'sad', 'surprised', 'happy'])
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="I458mNlWuG-K" outputId="40a80575-e99c-4e2a-f775-655747a8cf72"
df_emocoes = df_tratado.copy(deep=True)
df_emocoes = df_emocoes.drop(columns=['game_id', 'uuid', 'timestamp', 'target'])
ax = sns.violinplot(data=df_emocoes)
# + [markdown] id="d2QSTDoapiDA"
# ### 3.1.2 Não Paramétricos: Análise de Histograma
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="gX_o3sSVi8zk" outputId="2130d8a3-38f5-496a-e9a2-ce70570d450a"
#ax = df_emocoes.plot.hist(bins=12, alpha=0.5)
ax = df_emocoes.plot.hist()
# + [markdown] id="8LYrjzY5plQs"
# ## 3.2 Métodos Algorítmicos
# + [markdown] id="D42mzAjkpqjt"
# ### 3.2.1 Proximidade: Local Outlier Factor (LOF)
# Referências:
#
# * [2.7. Novelty and Outlier Detection](https://scikit-learn.org/stable/modules/outlier_detection.html)
# * [sklearn.neighbors.LocalOutlierFactor](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.LocalOutlierFactor.html)
# * [Outlier detection with Local Outlier Factor (LOF)](https://scikit-learn.org/stable/auto_examples/neighbors/plot_lof_outlier_detection.html)
# * [Anomaly detection with Local Outlier Factor (LOF)](https://towardsdatascience.com/anomaly-detection-with-local-outlier-factor-lof-d91e41df10f2)
# + id="HvyBtHbK6t3Z"
# + [markdown] id="hsguuBIQpt_F"
# ### 3.2.2 Redes Neurais: Redes Neurais Supervisionadas
# Referências:
#
#
# * [Comparing anomaly detection algorithms for outlier detection on toy datasets](https://scikit-learn.org/0.20/auto_examples/plot_anomaly_comparison.html)
# * [How to use machine learning for anomaly detection and condition monitoring](https://towardsdatascience.com/how-to-use-machine-learning-for-anomaly-detection-and-condition-monitoring-6742f82900d7)
#
#
# + [markdown] id="j_HWU1AKpwux"
# ## 3.3 Machine Learning
# + [markdown] id="4FLYQE11qJ-m"
# ### 3.3.1. Árvores de decisão
# + [markdown] id="Zn3DEFYiqNm7"
# ### 3.3.2 Florestas de isolamento (*Isolation Forest*)
# Referências:
#
# * [2.7. Novelty and Outlier Detection](https://scikit-learn.org/stable/modules/outlier_detection.html)
# * [sklearn.ensemble.IsolationForest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html)
# * [Anomaly Detection with Isolation Forest & Visualization](https://towardsdatascience.com/anomaly-detection-with-isolation-forest-visualization-23cd75c281e2)
# * [Anomaly Detection Using Isolation Forest in Python](https://blog.paperspace.com/anomaly-detection-isolation-forest/)
#
# + colab={"base_uri": "https://localhost:8080/"} id="Q38oExSG6-II" outputId="808eccf9-ace1-4e2c-d7e5-fd4cf1ca74c0"
from sklearn.ensemble import IsolationForest
model=IsolationForest(n_estimators=50, max_samples='auto', contamination=float(0.1),max_features=1.0)
model.fit(df_emocoes[['angry']])
# + colab={"base_uri": "https://localhost:8080/"} id="i-wU8DRt7isD" outputId="0278fdbe-170d-46af-ef62-48ee7ea93791"
df = pd.DataFrame(df_emocoes['angry'])
df['scores']=model.decision_function(df[['angry']])
df['anomaly']=model.predict(df[['angry']])
df.head(20)
# + colab={"base_uri": "https://localhost:8080/"} id="SqH1L6gs8EGv" outputId="88cbbf62-edba-4b95-89fc-6ee0934e7d53"
anomaly=df.loc[df['anomaly']==-1]
anomaly_index=list(anomaly.index)
print(anomaly)
# + colab={"base_uri": "https://localhost:8080/"} id="ijIrQgZw8sLx" outputId="09fd157e-2a00-4905-9484-6ccffdd95884"
outliers_counter = len(df[df['angry'] > 0.5])
outliers_counter
# + colab={"base_uri": "https://localhost:8080/"} id="3xN4NinX81s-" outputId="5fba61c9-5667-4626-fcfd-560976575007"
print("Accuracy percentage:", 100*list(df['anomaly']).count(-1)/(outliers_counter))
# + colab={"base_uri": "https://localhost:8080/"} id="XoobMdZs9UPn" outputId="72070f6a-e75a-4d0e-8ed7-877aa49b0a59"
#specify the 12 metrics column names to be modelled
to_model_columns=df_emocoes[0:7]
to_model_columns
clf=IsolationForest(n_estimators=100, max_samples='auto', contamination=float(.12), max_features=1.0, bootstrap=False, n_jobs=-1, random_state=42, verbose=0)
clf.fit(df_emocoes)
pred = clf.predict(df_emocoes)
df_emocoes['anomaly']=pred
outliers=df_emocoes.loc[df_emocoes['anomaly']==-1]
outlier_index=list(outliers.index)
print(outlier_index)
#Find the number of anomalies and normal points here points classified -1 are anomalous
print(df_emocoes['anomaly'].value_counts())
# + colab={"base_uri": "https://localhost:8080/"} id="k_26CGxe_g9P" outputId="ebb16ff1-8dd1-405c-c96d-c986c8a775c3"
from sklearn.decomposition import PCA
pca = PCA(2)
pca.fit(df_emocoes)
res=pd.DataFrame(pca.transform(df_emocoes))
Z = np.array(res)
plt.title("IsolationForest")
plt.contourf( Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(res[0], res[1], c='green', s=20,label="normal points")
b1 =plt.scatter(res.iloc[outlier_index,0],res.iloc[outlier_index,1], c='green',s=20, edgecolor="red",label="predicted outliers")
plt.legend(loc="upper right")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="T9nOn40EArDG" outputId="ba5faaeb-c3f2-4dd4-d3b2-1138126d6c81"
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
pca = PCA(n_components=3) # Reduce to k=3 dimensions
scaler = StandardScaler()
#normalize the metrics
X = scaler.fit_transform(df_emocoes)
X_reduce = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_zlabel("x_composite_3")# Plot the compressed data points
ax.scatter(X_reduce[:, 0], X_reduce[:, 1], zs=X_reduce[:, 2], s=4, lw=1, label="inliers",c="green")# Plot x's for the ground truth outliers
ax.scatter(X_reduce[outlier_index,0],X_reduce[outlier_index,1], X_reduce[outlier_index,2],
lw=2, s=60, marker="x", c="red", label="outliers")
ax.legend()
plt.show()
|
2020.1/Grupo 2/code/entregas/Assign9_BaseEstresse.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# name: python382jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# ---
# +
# -*- coding: utf-8 -*-
from IPython.display import display
import numpy as np
import pandas as pd
#import pandas_profiling
#import pandas_bokeh
import matplotlib.pyplot as plt
import matplotlib
print(matplotlib.matplotlib_fname())
plt.rcParams['font.sans-serif'] = ['Calibri']
plt.rcParams['axes.unicode_minus'] = False
pd.set_option("display.max_colwidth", 1000, 'display.width', 1000)
#pandas_bokeh.output_notebook()
items = [
'嗜碱性粒细胞计数(BASO#)(10^9/L)',
'血小板平均体积(MPV)(fL)',
'中性粒细胞计数(NEUT#)(10^9/L)',
'中性粒细胞百分比(NEUT%)(%)',
'血小板压积(PCT)(%)',
'血小板分布宽度(PDW)(%)',
'大血小板比率(P-LCR)',
'血小板总数(PLT)(10^9/L)',
'红细胞计数(RBC)(10^12/L)',
'红细胞分布宽度CV(RDW-CV)(%)',
'红细胞分布宽度-SD(RDW-SD)(fL)',
'单核细胞百分比(MONO%)(%)',
'单核细胞计数(MONO#)(10^9/L)',
'平均红细胞体积(MCV)(fL)',
'嗜碱性粒细胞百分比(BASO%)(%)',
'C-反应蛋白(CRP)(mg/L)',
'嗜酸性粒细胞计数(EO#)(10^9/L)',
'嗜酸性粒细胞百分比(EO%)(%)',
'红细胞压积(HCT)(%)',
'血红蛋白(HGB)(g/L)',
'淋巴细胞计数(LYMPH#)(10^9/L)',
'淋巴细胞百分比(LYMPH%)(%)',
'平均血红蛋白含量(MCH)(pg)',
'平均血红蛋白浓度(MCHC)(g/L)',
'白细胞数目(WBC)(10^9/L)'
]
items_ref = [x + '_ref' for x in items]
df = pd.read_excel('杜子期血常规.xlsx', engine='openpyxl')
#display(df)
df_new = pd.DataFrame([], index=[rv for r in zip(items, items_ref) for rv in r])
for index, row in df.iteritems():
df_new[index] = ''
for i, item in enumerate(row):
if item in items:
df_new[index][item] = row[i + 1]
df_new[index][item + '_ref'] = row[i + 2]
#df_new = df_new.T
display(df_new)
mydf = df_new.loc['血小板总数(PLT)(10^9/L)'].T
print(mydf.index)
mydf.index = mydf.index.to_numpy(dtype='datetime64')
mydf.plot()
#print(df_new.index)
#print(df_new['血小板总数(PLT)(10^9/L)'])
#print(df_new['淋巴细胞百分比(LYMPH%)(%)'])
#df_new.plot_bokeh(x=df_new.index, y=['血小板总数(PLT)(10^9/L)', '淋巴细胞百分比(LYMPH%)(%)'])
#print(df_new.index)
#print(df_new.columns)
#plt.plot(df_new.loc['血小板总数(PLT)(10^9/L)'])
#df_new.plot(x=df_new.columns, y=['血小板总数(PLT)(10^9/L)', '淋巴细胞百分比(LYMPH%)(%)'])
#for index, row in df_new.iteritems():
# try:
# row.plot(legend=True, figsize=(20, 5))
# #df.plot_bokeh.line(x=)
# except:
# pass
#df_new.profile_report()
#df_new.to_excel('result.xlsx')
#df_new.style.applymap(lambda v : 'background-color: %s' %'#FFCCFF' if v else'background-color: %s'% '#FFCCEE')
#with pd.ExcelWriter('df_style.xlsx', engine='openpyxl') as writer:
#writer = pd.ExcelWriter('result.xlsx', engine='openpyxl')
# df_new.to_excel(writer, index=True, sheet_name='sheet')
# -
|
seven.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from cgspan_mining.config import parser
from cgspan_mining.main import main
# %pylab inline
args_str = '-s 2 -l 5 -p True -w True graphdata/graph.data.simple.5'
FLAGS, _ = parser.parse_known_args(args=args_str.split())
cgs = main(FLAGS)
# ## plot graphs in database
for g in cgs.graphs.values():
g.plot()
|
main.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
import re
import requests
import praw
import pickle
import itertools
from bs4 import BeautifulSoup
from time import sleep
s = "Hello's"
print(s.title())
s = 'Fox'
lis = ["no", "way", "lol"]
s = list(map(' '.join, itertools.product(*((c[0].upper() + c[1:], c) for c in lis))))
print(s)
# +
m = re.compile(r"\[\[[^\]]*\]\]")
def supers(text):
response = " "
split = text.split()
for i in range(len(split)):
if i != 0:
response += " "
response += "^" + split[i]
return response
def findItem(name):
name = name.replace("'","%27").split()
nameLen = len(name)
for i in range(len(name)):
test = itemLookup(name[:nameLen-i])
if test != "":
return test
return ""
def itemLookup(name):
# Try a URL
possibleURLs = list(map('_'.join, itertools.product(*((c[0].upper() + c[1:].lower(), c.lower()) for c in name))))
for possibleURL in possibleURLs:
print(possibleURL)
r = requests.get("http://www.pathofexile.gamepedia.com/" + possibleURL)
if r.status_code != 404:
break
if r.status_code == 404:
return ""
soup = BeautifulSoup(r.text, "html.parser")
itemspan = soup.find("span", { "class" : "infobox-page-container"})
# Break if an invalid page has been reached
if not itemspan:
return ""
itemspan = itemspan.find("span", { "class" : "item-box" })
# Get item title
itemTitleRaw = itemspan.find("span", { "class" : "header"}).children
itemTitle = ""
for item in itemTitleRaw:
if item.string:
itemTitle += item.string + " "
itemTitle = itemTitle[:-1]
# Get item stats
itemStats = itemspan.find("span", { "class" : "item-stats"}).find("span")
# Get item mods
itemMods = itemspan.findAll("span", { "class" : "-mod"})
# Get image URL
itemURL = itemspan.findAll("a", { "class" : "image"})[0].img["src"]
# Print
response = ""
response += "[**" + itemTitle + "**](" + itemURL + ")"
response += "[[Wiki]](http://www.pathofexile.gamepedia.com/" + possibleURL + ")\n\n"
# Print Stats
for item in itemStats.children:
try:
temp = ""
for child in item.children:
if child.string:
temp += supers(child.string)
response += temp
if temp == "":
response += " ^| "
except:
response += supers(item)
response += "\n\n"
#Print Mods
for i in range(len(itemMods)):
for item in itemMods[i].children:
if item.string:
if len(itemMods) == 2 and i == 0:
response += "*" + item.string + "*"
else:
response += item.string
else:
response += "\n\n"
response += "\n\n"
return response
# -
findItem("sorcerer boots bob")
r = praw.Reddit('bot1')
def respond(lim, rate):
with open('ids.pickle', 'rb') as handle:
ids = pickle.load(handle)
while True:
subreddit = r.subreddit("test")
for submission in subreddit.new(limit=lim):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if "[[" in com.body and "]]" in com.body and com.id not in ids:
ids.append(com.id)
print("Found Comment:" + com.id)
reply = ""
for item in m.findall(com.body)[:10]:
temp = findItem(item[2:-2])
reply += temp
if temp != "":
reply += "\n\n---------\n\n"
if reply != "":
reply += "I am a bot. Reply to me with up to 7 [[item names]]."
com.reply(reply)
else:
print("False Reply ^")
comment_queue.extend(com.replies)
with open('ids.pickle', 'wb') as handle:
pickle.dump(ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
sleep(rate)
respond(50, 30)
|
.ipynb_checkpoints/POE Bot-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Lecture 3 Exercises
# ## Exercise 1: Data Types and Operators
# **Task 1.1:** Try how capitalization affects string comparison, e.g., compare "datascience" to "Datascience".
print(str("datascience" == "Datascience"))
# **Task 1.2:** Try to compare floats using the `==` operator defined as expressions of integers, e.g., whether 1/3 is equal to 2/6. Does that work?
# It works
float(1/3) == float(2/6)
# **Task 1.3:** Write an expression that compares the "floor" value of a float to an integer, e.g., compare the floor of 1/3 to 0. There are two ways to calculate a floor value: using `int()` and using `math.floor()`. Are they equal? What is the data type of the returned values?
int(1/3) == 0
oneThirdInt= int(1/3)
print("int version's type:",type(oneThirdInt))
import math
oneThirdFloor= math.floor(1/3)
print("math package version's type:",type(oneThirdFloor))
print("Are the int and math.floor versions equal?",oneThirdInt == oneThirdFloor)
print("Data type for comparison between oneThirdInt and oneThirdFloor:", type(oneThirdInt == oneThirdFloor))
# ## Exercise 3: Functions and If
# Write a function that takes two integers. If either of the numbers can be divided by the other without a remainder, print the result of the division. If none of the numbers can divide the other one, print an error message.
def func(a,b):
if a % b == 0:
return a / b
elif b % a == 0:
return b / a
else:
returnval= "Error! There should not be a remainder when division is performed."
return returnval
print("Result when 6 is divided by 3 is:", func(3,6))
print("The function produces the same result regarless of the order the numbers are in:", func(6,3))
print("""The function will return an error when neither number can be divided by the other without a remainder,
like in the case of 5 and 2:""", func(5,2))
# ## Exercise 4: Lists
#
# * Create a list for the Rolling Stones: Mick, Keith, Charlie, Ronnie.
# * Create a slice of that list that contains only members of the original lineup (Mick, Keith, Charlie).
# * Add the stones lists to the the bands list.
# intializations
beatles = ["Paul", "John", "George", "Ringo"]
zeppelin = ["Jimmy", "Robert", "John", "John"]
bands = [beatles, zeppelin]
rolling_stones= ["Mick", "Keith", "Charlie", "Ronnie"]
rolling_stones
rolling_stones_slice = rolling_stones[0:3]
rolling_stones_slice
bands= [beatles, zeppelin, rolling_stones]
bands
# ## Exercise 5.1: While
#
# Write a while loop that computes the sum of the 100 first positive integers. I.e., calculate
#
# $1+2+3+4+5+...+100$
total = 0
for number in range(1,101):
total = total + number
total
# ## Exercise 5.2: For
#
# Use a for loop to create an array that contains all even numbers in the range 0-50, i.e., an array: [2, 4, 6, ..., 48, 50]
evens= []
for number in range(0,51):
if number % 2 == 0:
evens.append(number)
evens
# Create a new array for the Beatles main instruments: Ringo played drums, George played lead guitar, John played rhythm guitar and Paul played bass. Assume that the array position associated the musician with his instrument. Use a for loop to print:
#
# ```
# Paul: Bass
# John: Rythm Guitar
# George: Lead Guitar
# Ringo: Drums
# ```
instruments= ["Bass", "Rythm Guitar", "Lead Guitar", "Drums"]
for number in range(0,4):
print(beatles[number] + ": " + instruments[number])
# ## Exercise 6: Recursion
#
# Write a recursive function that calculates the factorial of a number.
def factorial(n):
if n == 1:
return n
else:
return n * factorial(n-1)
# Example
factorial(5)
# ## Exercise 7: List Comprehension
# Write a list comprehension function that creates an array with the length of each word in the following sentence:
#
# "the quick brown fox jumps over the lazy dog"
#
# The result should be a list:
#
# ```python
# [3,5,...,3]
# ```
# setting up the array
sentence = "the quick brown fox jumps over the lazy dog"
word_list = sentence.split()
word_list
length_list = []
for word in word_list:
length_list.append(len(word))
length_list
|
03-basic-python-II/lecture-3-exercises.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import display, HTML
import spot
import buddy
spot.setup(show_default='.tavb')
def horiz(*args):
"""Display multiple arguments side by side in a table."""
s = '<table><tr>'
for arg in args:
s += '<td style=\"vertical-align: top;\">' + arg.data + '</td>'
return HTML(s + '</tr></table>')
# -
# # Anatomy of a product
#
# In this notebook, we write a Python function that constructs the product of two automata.
#
# This is obviously not a new feature: Spot can already make a product of two automata using its `product()` function.
# For instance:
a1 = spot.translate('X(a W b)')
a2 = spot.translate('G(Fc U b)')
prod = spot.product(a1, a2)
horiz(a1.show(), a2.show(), prod.show())
# The builtin `spot.product()` function produces an automaton whose language is the intersection of the two input languages. It does so by building an automaton that keeps track of the runs in the two input automata. The states are labeled by pairs of input states so that we can more easily follow what is going on, but those labels are purely cosmetic. The acceptance condition is the conjunction of the two acceptance condition, but the acceptance sets of one input automaton have been shifted to not conflict with the other automaton.
#
# In fact, that automaton printer has an option to shift those sets in its output, and this is perfect for illustrating products. For instance `a.show('+3')` will display `a1` with all its acceptance sets shifted by 3.
#
# Let's define a function for displaying the three automata involved in a product, using this shift option so we can follow what is going on with the acceptance sets.
# +
def show_prod(a1, a2, res):
s1 = a1.num_sets()
display(horiz(a1.show(), a2.show('.tavb+{}'.format(s1)), res.show()))
show_prod(a1, a2, prod)
# -
# # Building a product
#
# Let's now rewrite `product()` in Python. We will do that in three steps.
#
#
# ## First attempt
#
# First, we build a product without taking care of the acceptance sets. We just want to get the general shape of the algorithm.
#
# We will build an automaton of type `twa_graph`, i.e., an automaton represented explicitely using a graph. In those automata, states are numbered by integers, starting from `0`. (Those states can also be given a different name, which is why the the `product()` shows us something that appears to be labeled by pairs, but the real identifier of each state is an integer.)
#
# We will use a dictionary to keep track of the association between a pair `(ls,rs)` of input states, and its number in the output.
# +
def product1(left, right):
# A bdd_dict object associates BDD variables (that are
# used in BDDs labeleing the edges) to atomic propositions.
bdict = left.get_dict()
# If the two automata do not have the same BDD dict, then
# we cannot easily detect compatible transitions.
if right.get_dict() != bdict:
raise RuntimeError("automata should share their dictionary")
result = spot.make_twa_graph(bdict)
# This will be our state dictionary
sdict = {}
# The list of output states for which we have not yet
# computed the successors. Items on this list are triplets
# of the form (ls, rs, p) where ls,rs are the state number in
# the left and right automata, and p is the state number if
# the output automaton.
todo = []
# Transform a pair of state number (ls, rs) into a state number in
# the output automaton, creating a new state if needed. Whenever
# a new state is created, we can add it to todo.
def dst(ls, rs):
pair = (ls, rs)
p = sdict.get(pair)
if p is None:
p = result.new_state()
sdict[pair] = p
todo.append((ls, rs, p))
return p
# Setup the initial state. It always exists.
result.set_init_state(dst(left.get_init_state_number(),
right.get_init_state_number()))
# Build all states and edges in the product
while todo:
lsrc, rsrc, osrc = todo.pop()
for lt in left.out(lsrc):
for rt in right.out(rsrc):
cond = lt.cond & rt.cond
if cond != buddy.bddfalse:
result.new_edge(osrc, dst(lt.dst, rt.dst), cond)
return result
p1 = product1(a1, a2)
show_prod(a1, a2, p1)
# -
# Besides the obvious lack of acceptance condition (which defaults to `t`) and acceptance sets, there is a less obvious problem: we never declared the set of atomic propositions used by the result automaton. This as two consequences:
# - calling `p1.ap()` will return an empty set of atomic propositions
print(a1.ap())
print(a2.ap())
print(p1.ap())
# - the `bdd_dict` instance that is shared by the three automata knows that the atomic propositions `a` and `b` are used by automata `a1` and that `b` and `c` are used by `a2`. But it is unaware of `p1`. That means that if we delete automata `a1` and `a2`, then the `bdd_dict` will release the associated BDD variables, and attempting to print automaton `p1` will either crash (because it uses bdd variables that are not associated to any atomic proposition) or display different atomic propositions (in case the BDD variables have been associated to different propositions in the meantime).
#
# These two issues are fixed by either calling `p1.register_ap(...)` for each atomic proposition, or in our case `p1.copy_ap_of(...)` to copy the atomic propositions of each input automaton.
# ## Second attempt: a working product
#
# This fixes the list of atomtic propositions, as discussed above, and also sets the correct acceptance condition.
# The `set_acceptance` method takes two arguments: a number of sets, and an acceptance function. In our case, both of these arguments are readily computed from the number of states and acceptance functions of the input automata.
# +
def product2(left, right):
bdict = left.get_dict()
if right.get_dict() != bdict:
raise RuntimeError("automata should share their dictionary")
result = spot.make_twa_graph(bdict)
# Copy the atomic propositions of the two input automata
result.copy_ap_of(left)
result.copy_ap_of(right)
sdict = {}
todo = []
def dst(ls, rs):
pair = (ls, rs)
p = sdict.get(pair)
if p is None:
p = result.new_state()
sdict[pair] = p
todo.append((ls, rs, p))
return p
result.set_init_state(dst(left.get_init_state_number(),
right.get_init_state_number()))
# The acceptance sets of the right automaton will be shifted by this amount
shift = left.num_sets()
result.set_acceptance(shift + right.num_sets(),
left.get_acceptance() & (right.get_acceptance() << shift))
while todo:
lsrc, rsrc, osrc = todo.pop()
for lt in left.out(lsrc):
for rt in right.out(rsrc):
cond = lt.cond & rt.cond
if cond != buddy.bddfalse:
# membership of this transitions to the new acceptance sets
acc = lt.acc | (rt.acc << shift)
result.new_edge(osrc, dst(lt.dst, rt.dst), cond, acc)
return result
p2 = product2(a1, a2)
show_prod(a1, a2, p2)
print(p2.ap())
# -
# ## Third attempt: a more usable product
#
# We could stop with the previous function: the result is a correct product from a theoretical point of view. However our function is still inferior to `spot.product()` in a couple of points:
# - states are not presented as pairs
# - the properties of the resulting automaton are not set
#
# The former point could be addressed by calling `set_state_names()` and passing an array of strings: if a state number is smaller than the size of that array, then the string at that position will be displayed instead of the state number in the dot output. However we can do even better by using `set_product_states()` and passing an array of pairs of states. Besides the output routines, some algorithms actually retrieve this vector of pair of states to work on the product.
#
# Regarding the latter point, consider for instance the deterministic nature of these automata. In Spot an automaton is deterministic if it is both existential (no universal branching) and universal (no non-deterministic branching). In our case we will restrict the algorithm to existantial input (by asserting `is_existential()` on both operands), so we can consider that the `prop_universal()` property is an indication of determinism:
print(a1.prop_universal())
print(a2.prop_universal())
print(prod.prop_universal())
print(p1.prop_universal())
# Because `a1` and `a2` are deterministic, their product is necessarily deterministic. This is a property that the `spot.product()` algorithm will preserve, but that our version does not *yet* preserve. We can fix that by adding
#
# if left.prop_universal() and right.prop_universal():
# result.prop_universal(True)
#
# at the end of our function. Note that this is **not** the same as
#
# result.prop_universal(left.prop_universal() and right.prop_universal())
#
# because the results the `prop_*()` family of functions take and return instances of `spot.trival` values. These `spot.trival`, can, as their name implies, take one amongst three values representing `yes`, `no`, and `maybe`. `yes` and `no` should be used when we actually know that the automaton is deterministic or not (not deterministic meaning that there actually exists some non determinitic state in the automaton), and `maybe` when we do not know.
#
# The one-liner above is wrong for two reasons:
#
# - if `left` and `right` are non-deterministic, their product could be deterministic, so calling prop_universal(False) would be wrong.
#
# - the use of the `and` operator on `trival` is misleading in non-Boolean context. The `&` operator would be the correct operator to use if you want to work in threed-valued logic. Compare:
yes = spot.trival(True)
no = spot.trival(False)
maybe = spot.trival_maybe()
for u in (no, maybe, yes):
for v in (no, maybe, yes):
print("{u!s:>5} & {v!s:<5} = {r1!s:<5} {u!s:>5} and {v!s:<5} = {r2!s:<5}"
.format(u=u, v=v, r1=(u&v), r2=(u and v)))
# The reason `maybe and no` is equal to `maybe` is that Python evaluate it like `no if maybe else maybe`, but when a trival is evaluated in a Boolean context (as in `if maybe`) the result is True only if the trival is equal to yes.
#
# So our
#
# if left.prop_universal() and right.prop_universal():
# result.prop_universal(True)
#
# is OK because the `if` body will only be entered of both input automata are known to be deterministic.
# However the question is in fact more general than just determinism: the product of two weak automata is weak, the product of two stutter-invariant automata is stutter-invariant, etc. So when writing an algorithm one should consider which of the [property bits](https://spot.lrde.epita.fr/hoa.html#property-bits) are naturally preserved by the algorithm, and set the relevant bits: this can save time later if the resulting automaton is used as input for another algorithm.
# +
def product3(left, right):
# the twa_graph.is_existential() method returns a Boolean, not a spot.trival
if not (left.is_existential() and right.is_existential()):
raise RuntimeError("alternating automata are not supported")
bdict = left.get_dict()
if right.get_dict() != bdict:
raise RuntimeError("automata should share their dictionary")
result = spot.make_twa_graph(bdict)
result.copy_ap_of(left)
result.copy_ap_of(right)
pairs = [] # our array of state pairs
sdict = {}
todo = []
def dst(ls, rs):
pair = (ls, rs)
p = sdict.get(pair)
if p is None:
p = result.new_state()
sdict[pair] = p
todo.append((ls, rs, p))
pairs.append((ls, rs)) # name each state
return p
result.set_init_state(dst(left.get_init_state_number(),
right.get_init_state_number()))
shift = left.num_sets()
result.set_acceptance(shift + right.num_sets(),
left.get_acceptance() & (right.get_acceptance() << shift))
while todo:
lsrc, rsrc, osrc = todo.pop()
for lt in left.out(lsrc):
for rt in right.out(rsrc):
cond = lt.cond & rt.cond
if cond != buddy.bddfalse:
acc = lt.acc | (rt.acc << shift)
result.new_edge(osrc, dst(lt.dst, rt.dst), cond, acc)
# Remember the origin of our states
result.set_product_states(pairs)
# Loop over all the properties we want to preserve if they hold in both automata
for p in ('prop_universal', 'prop_complete', 'prop_weak', 'prop_inherently_weak',
'prop_terminal', 'prop_stutter_invariant', 'prop_state_acc'):
if getattr(left, p)() and getattr(right, p)():
getattr(result, p)(True)
return result
p3 = product3(a1, a2)
show_prod(a1, a2, p3)
print(p3.prop_universal())
# -
# For development, it is useful to know that we can force the automaton printer to show the real state numbers (not the pairs) by passing option `1`, and that we can retrieve the associated pairs ourselves.
display(p3.show('.1'))
pairs = p3.get_product_states()
for s in range(p3.num_states()):
print("{}: {}".format(s, pairs[s]))
# # Timings
#
# As an indication of how slow it is to implement such an algorithm using the Python bindings of Spot, consider the following comparison:
# %timeit product3(a1, a2)
# %timeit spot.product(a1, a2)
# Depending on the machine where this notebook has been run, using the C++ version of the product can be 1 to 2 order of magnitude faster. This is due to all the type conversions (converting Python types to C++ types) that occurs everytime a function/method of Spot is called from Python. When calling high-level C++ functions (such as `spot.product()`) from Python, the overhead is negligible because most of the time is spent on the C++ side, actually executing the function. However when calling low-level functions (such as `new_edge()`, `new_state()`, `out()`) most of the time is spent converting the arguments from Python to C++ and the results from C++ to Python.
#
# Despite that speed difference, Python can be useful to prototype an algorithm before implementing it in C++.
|
spot/tests/python/product.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! pip install wordcloud wikipedia
# +
import wikipedia
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
page = wikipedia.page("Python programming")
text = page.content
cloud = WordCloud().generate(text)
plt.figure(figsize=(8,8), facecolor=None)
plt.imshow(cloud, interpolation="bilinear")
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
cloud.to_file("wordclod.png")
# -
|
my_cloud (3).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from datetime import datetime
import sys
# %matplotlib inline
evaluation=pd.read_csv('submit.txt')
product_info=pd.read_csv('product_info.txt')
product_quantity=pd.read_csv('product_quantity.txt')
Mytrain_day=pd.read_csv('Mytrain_day.csv')
# + deletable=true editable=true
def random_read_train_data(k,table):
_Mytrain_day=table.take(np.random.permutation(len(table))[:k])
_features = _Mytrain_day.drop('ciiquantity',axis=1)
_targets = _Mytrain_day['ciiquantity']
_Myfeatures=_features.as_matrix()
_Mytargets=_targets.as_matrix()
_Mytargets.shape[0]
_Mytargets.shape = (_Mytargets.shape[0], 1)
_Mytargets.transpose()
return _Myfeatures,_Mytargets
# + deletable=true editable=true
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
in_shape=13
epochs=1000
batch=50000
l_r=0.01
x = tf.placeholder(tf.float32, shape=[None, in_shape])
y = tf.placeholder(tf.float32, [None,1])
l1 = add_layer(x, in_shape, 10, activation_function=tf.nn.sigmoid)
prediction = add_layer(l1, 10, 1, activation_function=None)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - prediction),
reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(l_r).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
start = datetime.now()
#tf_features,tf_targets = cut_train_data(epoch*1000,(epoch+1)*1000)
tf_features,tf_targets = random_read_train_data(batch,Mytrain_day)
tf_features_text,tf_targets_text = random_read_train_data(batch,Mytrain_day)
sess.run(train_step, feed_dict={x: tf_features, y: tf_targets})
temploss = sess.run(loss, feed_dict={x: tf_features_text, y: tf_targets_text})
end = datetime.now()
sys.stdout.write('\rEpoch {:>2} Loss: {:.4f} time: {}'.format(epoch + 1,temploss,end-start))
# +
def getweek(product_date):
yyyy=int(product_date[0:4])
mm=int(product_date[5:7])
dd=int(product_date[8:10])
return datetime(yyyy,mm,dd).weekday()+1
def addweekday(table):
table['week']=table['product_date'].apply(getweek)
dummies = pd.get_dummies(table['week'], prefix='week', drop_first=False)
table = pd.concat([table, dummies], axis=1)
table = table.drop('week', axis=1)
table = table.drop('product_date', axis=1)
return table
# + deletable=true editable=true
import calendar
def get_evaluation(x):
_date=evaluation[evaluation.index==x].product_month.as_matrix()[0]
_id=evaluation[evaluation.index==x].product_id.as_matrix()[0]
_yyyy=int(_date[0:4])
_mm=int(_date[5:7])
_days=calendar.monthrange(_yyyy,_mm)[1]
_table=pd.DataFrame({'product_date':range(_days)})
_table['product_date']=_table['product_date'].apply(lambda x:_date[0:8]+str(x+1).zfill(2))
_table['product_id']=_id
_features=['eval','eval2','eval3','eval4','voters','maxstock']
for feature in _features:
value=product_info[product_info.product_id==_id][feature].as_matrix()[0]
mean, std = product_info[feature].mean(), product_info[feature].std()
value = (value - mean)/std
_table[feature]=value
_table=addweekday(_table)
_table=_table.drop('product_id', axis=1)
#_table=addfeature(_table,_features)
#_table=Scalingfeature(_table,_features)
_table=_table.as_matrix()
return _table
y_array=np.sum(sess.run(prediction, feed_dict={x: get_evaluation(0)}))
print(y_array)
ciiquantity_month=np.zeros(evaluation.shape[0])
#'''
start = datetime.now()
for i in range(evaluation.shape[0]):
#for i in range(1000):
_date=evaluation[evaluation.index==i].product_month.as_matrix()[0]
_mm=int(_date[5:7])
month_data=get_evaluation(i)
month_data_sum=np.sum(sess.run(prediction, feed_dict={x: month_data}))
if(_mm==10):
print(_mm)
ciiquantity_month[i]=month_data_sum*2
else:
ciiquantity_month[i]=month_data_sum*1.2
if i % 100 == 0:
end = datetime.now()
sys.stdout.write('\ri:{}/{} ciiquantity month:{} time:{}'.format(i+1,evaluation.shape[0],month_data_sum,end-start))
start = datetime.now()
#'''
evaluation['ciiquantity_month']=ciiquantity_month
evaluation.to_csv('my_ansower_quick_nn.csv',index=False)
evaluation
# -
|
mydata.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## What is NumPy?
#
# <ul><li>NumPy is a Python library used for working with arrays.</li>
#
# <li>It has functions for working in domain of linear algebra, fourier transform, and matrices.</li>
#
# <li>NumPy was created in 2005 by <NAME>. It is an open source project and you can use it freely.</li></ul>
# ## Why Use NumPy?
#
# <ul><li>In Python we have lists that serve the purpose of arrays, but they are slow to process.</li>
#
# <li>NumPy aims to provide an array object that is up to 50x faster than traditional Python lists.</li>
#
# <li>The array object in NumPy is called <b>ndarray</b>, it provides a lot of supporting functions that make working with <b>ndarray</b> very easy.</li></ul>
# ## Import NumPy
#
# Once NumPy is installed, import it in your applications by adding the <b>import</b> keyword:
import numpy
# ## NumPy as np
#
# <ul><li>NumPy is usually imported under the np alias.</li>
# <li>Create an alias with the as keyword while importing:</li></ul>
import numpy as np
# ## Checking NumPy Version
#
# The version string is stored under __ __version__ __ attribute.
# +
import numpy as nk
print(nk.__version__)
# -
# ## Create a NumPy ndarray Object
# <ul><li>NumPy is used to work with arrays.</li>
# <li>The array object in NumPy is called <b>ndarray</b>.</li>
# <li>We can create a NumPy <b>ndarray</b> object by using the <b>array()</b> function.</li>
# +
import numpy as np
arr = np.array([101, 201, 301, 401, 501])
print(arr)
print(type(arr))
# -
# To create an <b>ndarray</b>, we can pass a list, tuple or any array-like object into the <b>array()</b> method, and it will be converted into an <b>ndarray</b>:
# +
import numpy as np
nameList = ['Angel', "Shemi", "Marvel", "Linda"]
ageTuple = (41, 32, 21, 19)
gradeDict = {"CSC102": 89, "MTH 102": 77, "CHM 102": 69, "GST 102": 99}
arr_nameList = np.array(nameList)
arr_ageTuple = np.array(ageTuple)
arr_gradeDict = np.array(gradeDict)
print(arr_nameList)
print(arr_ageTuple)
print(arr_gradeDict)
# -
# ## Dimensions in Array
# A dimension in arrays is one level of array depth (nested arrays).
# ### 0-Dimension
# 0-D arrays, or Scalars, are the elements in an array. Each value in an array is a 0-D array.
# +
import numpy as np
classNum = int(input("How many students are in the CSC 102 class?"))
class_arr = np.array(classNum)
if (class_arr == 1):
print("There is only ", class_arr, "student in CSC 102 class" )
else:
print("There are", class_arr, "students in CSC 102 class" )
# -
# ### 1-D Arrays
# <ul><li>An array that has 0-D arrays as its elements is called uni-dimensional or 1-D array.</li>
# <li>These are the most common and basic arrays.</li>
# </ul>
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr)
# -
# ### 2-D Arrays
# <ul><li>An array that has 1-D arrays as its elements is called a 2-D array.</li>
# <li>These are often used to represent matrix or 2nd order tensors.</li></ul>
# +
import numpy as np
arr = np.array([[1, 2, 3], [4, 5, 6]])
print(arr)
# -
# ### 3-D arrays
# <ul><li>An array that has 2-D arrays (matrices) as its elements is called 3-D array.</li>
# <li>These are often used to represent a 3rd order tensor.</li></ul>
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
print(arr)
# -
# ## Check Number of Dimensions?
# NumPy Arrays provides the <b>ndim</b> attribute that returns an integer that tells us how many dimensions the array have
# +
import numpy as np
a = np.array(42)
b = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
c = np.array([[1, 2, 3], [4, 5, 6]])
d = np.array([1, 2, 3, 4, 5])
print(a.ndim)
print(b.ndim)
print(c.ndim)
print(d.ndim)
# -
# ## Higher Dimensional Arrays
# <ul><li>An array can have any number of dimensions.</li>
# <li>When the array is created, you can define the number of dimensions by using the ndmin argument.</li>
# <li>In this array the innermost dimension (5th dim) has 4 elements, the 4th dim has 1 element that is the vector, the 3rd dim has 1 element that is the matrix with the vector, the 2nd dim has 1 element that is 3D array and 1st dim has 1 element that is a 4D array.</li></ul>
# +
import numpy as np
arr = np.array([1, 2, 3, 4], ndmin=6)
print(arr)
print('number of dimensions :', arr.ndim)
# -
# ## Access Array Elements
# +
import numpy as np
arr = np.array([1, 2, 3, 4])
print(arr[1])
# -
# ## Access 2-D Arrays
# +
import numpy as np
arr = np.array([[1,2,3,4,5], [6,7,8,9,10]])
print('5th element on 2nd row: ', arr[1, 4])
# -
# ## Access 3-D Arrays
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr[0, 1, 2])
# -
# ## Negative Indexing
# Use negative indexing to access an array from the end.
# +
import numpy as np
arr = np.array([[1,2,3,4,5], [6,7,8,9,10]])
print('Last element from 2nd dim: ', arr[1, -1])
# -
# ## Slicing arrays
# <ul><li>Slicing in python means taking elements from one given index to another given index.</li>
# <li>We pass slice instead of index like this: <b>[start:end]</b>.</li>
# <li>We can also define the step, like this: <b>[start:end:step]</b>.</li>
# <li>If we don't pass start its considered 0</li>
# <li>If we don't pass end its considered length of array in that dimension</li>
# <li>If we don't pass step its considered 1</li></ul>
# +
# Slice elements from index 1 to index 5 from the following array:
import numpy as np
arr = np.array([1, 2, 3, 4, 5, 6, 7])
print(arr[1:5])
# +
# Slice elements from index 4 to the end of the array:
import numpy as np
arr = np.array([1, 2, 3, 4, 5, 6, 7])
print(arr[4:])
# +
# Slice elements from the beginning to index 4 (not included):
import numpy as np
arr = np.array([1, 2, 3, 4, 5, 6, 7])
print(arr[:4])
# -
# ## Checking the Data Type of an Array
# +
import numpy as np
int_arr = np.array([1, 2, 3, 4])
str_arr = np.array(['apple', 'banana', 'cherry'])
print(int_arr.dtype)
print(str_arr.dtype)
# -
# ## NumPy Array Copy vs View
# #### The Difference Between Copy and View
# <ul><li>The main difference between a copy and a view of an array is that the copy is a new array, and the view is just a view of the original array.</li>
# <li>The copy owns the data and any changes made to the copy will not affect original array, and any changes made to the original array will not affect the copy.</li>
# <li>The view does not own the data and any changes made to the view will affect the original array, and any changes made to the original array will affect the view.</li></ul>
# ### Copy
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
x = arr.copy()
arr[0] = 42
print(arr)
print(x)
# -
# ### View
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
x = arr.view()
arr[0] = 42
print(arr)
print(x)
# -
# ## Check if Array Owns its Data
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
x = arr.copy()
y = arr.view()
print(x.base)
print(y.base)
# -
# ## Get the Shape of an Array
# +
# Print the shape of a 2-D array:
import numpy as np
arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
print(arr.shape)
# +
import numpy as np
arr = np.array([1, 2, 3, 4], ndmin=5)
print(arr)
print('shape of array :', arr.shape)
# -
# ## Iterating Arrays
# +
#Iterate on each scalar element of the 2-D array:
import numpy as np
arr = np.array([[1, 2, 3], [4, 5, 6]])
for x in arr:
for y in x:
print(y,x)
# +
# Iterate on the elements of the following 3-D array:
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
print(x[0][1])
print(x[1][0])
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
for y in x:
for z in y:
print(z,y,x)
# -
# ## Joining NumPy Arrays
# We pass a sequence of arrays that we want to join to the concatenate() function, along with the axis. If axis is not explicitly passed, it is taken as 0.
# +
# Join two arrays
import numpy as np
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.concatenate((arr1, arr2))
print(arr)
# -
# ## Splitting NumPy Arrays
# <ul><li>Splitting is reverse operation of Joining.</li>
# <li>Joining merges multiple arrays into one and Splitting breaks one array into multiple.</li>
# <li>We use <b>array_split()</b> for splitting arrays, we pass it the array we want to split and the number of splits.</li></ul>
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5, 6])
newarr = np.array_split(arr, 3)
print(newarr)
# +
# Access splitted arrays
import numpy as np
arr = np.array([1, 2, 3, 4, 5, 6])
newarr = np.array_split(arr, 3)
print(newarr[0])
print(newarr[1])
print(newarr[2])
# -
# ## Splitting 2-D Arrays
# +
import numpy as np
arr = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
newarr = np.array_split(arr, 3)
print(newarr)
# -
|
Week_8/.ipynb_checkpoints/Week 8 - Numerical Python (NumPy) Practice-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Recurrent Neural Network - Linear Model
# Linear model of `Recurrent Neural Network`:
# $$S_{k} = f(S_{k-1} * W_{rec} + X_k * W_x)$$
#
# The model we are going to train is to count how many 1's it sees on a binary input stream, and output the total count at the end of the sequence. Obviously, we know $w_{rec} = w_x = 1$. The point here is to understand the training process of RNN.
#
# It can also be visicualized as:
# <p align="center">
# <img src="https://raw.githubusercontent.com/weichetaru/weichetaru.github.com/master/notebook/machine-learning/img/SimpleRNN01.png"></p>
# Python imports
import numpy as np # Matrix and vector computation package
import matplotlib
import matplotlib.pyplot as plt # Plotting library
from matplotlib import cm # Colormaps
from matplotlib.colors import LogNorm # Log colormaps
# Allow matplotlib to plot inside this notebook
# %matplotlib inline
# Set the seed of the numpy random number generator so that the tutorial is reproducable
np.random.seed(seed=1)
# ### Prepare Dataset
# Create Dataset
nb_of_samples = 20
sequence_len = 10
# Create Sequence
X = np.zeros((nb_of_samples, sequence_len))
for idx in range(nb_of_samples):
X[idx, :] = np.around(np.random.rand(sequence_len)).astype(int)
# Create the tagets of each squence
t = np.sum(X, axis=1)
# ### Forward Step
# +
# Define the forward step functions
def update_state(xk, sk, wx, wRec):
"""
Compute state k from the previous state (sk) and current input (xk),
by use of the input weights (wx) and recursive weights (wRec).
"""
return xk * wx + sk * wRec
def forward_states(X, wx, wRec):
"""
Unfold the network and compute all state activations given the input X,
and input weights (wx) and recursive weights (wRec).
Return the state activations in a matrix, the last column S[:,-1] contains the
final activations.
"""
# Initialize the matrix S that holds all status for all input sequences.
# The initial state s0 is set to 0 here (can be set as one of the parameter as well)
S = np.zeros((X.shape[0], X.shape[1]+1))
# Use the recurrce relation defined by update_state to update the states through time
for k in range(0, X.shape[1]):
# S[k] = S[k-1] * WRec + X[k] * wx
S[:, k+1] = update_state(X[:, k], S[:, k], wx, wRec)
return S
def cost(y, t):
"""
Return the MSE between the targets t and the outputs y.
"""
return ((t - y)**2).sum() / nb_of_samples
# -
# ### Backward Propagation
#
# Start from getting the gradient of cost function $\xi$ by $\partial \xi / \partial y$. Then use this gradient for backward propagation through time (i.e. layer by layer).
#
# The recurrent relation of gradient for each state during back propagation is:
#
# $$\frac{\partial \xi}{\partial S_{k-1}}
# = \frac{\partial \xi}{\partial S_{k}} \frac{\partial S_{k}}{\partial S_{k-1}}
# = \frac{\partial \xi}{\partial S_{k}} w_{rec}$$
#
# starts at:
# $$\frac{\partial \xi}{\partial y} = \frac{\partial \xi}{\partial S_{n}}$$
#
# The update rules for the weights are sum of the n and k states of:
# $$
# \frac{\partial \xi}{\partial w_x}
# = \sum_{k=0}^{n} \frac{\partial \xi}{\partial S_{k}} x_k
# \\
# \frac{\partial \xi}{\partial w_{rec}}
# = \sum_{k=1}^{n} \frac{\partial \xi}{\partial S_{k}} S_{k-1}
# $$
# +
def output_gradient(y, t):
"""
Comput the gradient of the MSE cost function with respect to the output y.
"""
return 2.0 * (y-t) / nb_of_samples
def backward_gradient(X, S, grad_out, wRec):
"""
Backpropagate the gradient computed at the output (grad_out) through the network.
Accumulate the parameter gradients for wX and wRec by for each layer by addition.
Return the parameter gradients as a tuple, and the gradients at the output of each layer.
"""
# Initialize the array that stores the gradients of the cost with respect to the states.
grad_over_time = np.zeros((X.shape[0], X.shape[1]+1)) # the same size as S
grad_over_time[:, -1] = grad_out # the result of output_gradient()
# Set the gradient accumulations to 0
wx_grad = 0
wRec_grad = 0
for k in range(X.shape[1], 0, -1):
# Comput the paameter gradients and accumulate the results
wx_grad += np.sum(grad_over_time[:, k] * X[:, k-1])
wRec_grad += np.sum(grad_over_time[:, k] * S[:, k-1])
# Compute the gradient at the output of the previous layer
grad_over_time[:, k-1] = grad_over_time[:, k] * wRec
return (wx_grad, wRec_grad), grad_over_time
# -
# ### The instability of gradient in RNN
#
# Because
#
# $$\frac{\partial \xi}{\partial S_{k-1}}
# = \frac{\partial \xi}{\partial S_{k}} \frac{\partial S_{k}}{\partial S_{k-1}}
# = \frac{\partial \xi}{\partial S_{k}} w_{rec}$$
#
# For ex, The gradient of a state $S_k$ between a state mm timesteps back $S_{k−m}$ can then be written as:
#
# $$
# \frac{\partial S_{k}}{\partial S_{k-m}}
# = \frac{\partial S_{k}}{\partial S_{k-1}} * \cdots * \frac{\partial S_{k-m+1}}{\partial S_{k-1}} = w_{rec}^m
# $$
#
# So in our model the gradient grows exponentially if $|w_{rec}|>1$ (known as `exploding gradient`). And that the gradient shrinks exponentially if $|w_{rec}|<1$ (known as `vanishing gradient`). As you can see from the graph below.
# +
# Define plotting functions
# Define points to annotate (wx, wRec, color)
points = [(2,1,'r'), (1,2,'b'), (1,-2,'g'), (1,0,'c'), (1,0.5,'m'), (1,-0.5,'y')]
def get_cost_surface(w1_low, w1_high, w2_low, w2_high, nb_of_ws, cost_func):
"""Define a vector of weights for which we want to plot the cost."""
w1 = np.linspace(w1_low, w1_high, num=nb_of_ws) # Weight 1
w2 = np.linspace(w2_low, w2_high, num=nb_of_ws) # Weight 2
ws1, ws2 = np.meshgrid(w1, w2) # Generate grid
cost_ws = np.zeros((nb_of_ws, nb_of_ws)) # Initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(nb_of_ws):
for j in range(nb_of_ws):
cost_ws[i,j] = cost_func(ws1[i,j], ws2[i,j])
return ws1, ws2, cost_ws
def plot_surface(ax, ws1, ws2, cost_ws):
"""Plot the cost in function of the weights."""
surf = ax.contourf(ws1, ws2, cost_ws, levels=np.logspace(-0.2, 8, 30), cmap=cm.pink, norm=LogNorm())
ax.set_xlabel('$w_{in}$', fontsize=15)
ax.set_ylabel('$w_{rec}$', fontsize=15)
return surf
def plot_points(ax, points):
"""Plot the annotation points on the given axis."""
for wx, wRec, c in points:
ax.plot(wx, wRec, c+'o', linewidth=2)
def get_cost_surface_figure(cost_func, points):
"""Plot the cost surfaces together with the annotated points."""
# Plot figures
fig = plt.figure(figsize=(10, 4))
# Plot overview of cost function
ax_1 = fig.add_subplot(1,2,1)
ws1_1, ws2_1, cost_ws_1 = get_cost_surface(-3, 3, -3, 3, 100, cost_func)
surf_1 = plot_surface(ax_1, ws1_1, ws2_1, cost_ws_1 + 1)
plot_points(ax_1, points)
ax_1.set_xlim(-3, 3)
ax_1.set_ylim(-3, 3)
# Plot zoom of cost function
ax_2 = fig.add_subplot(1,2,2)
ws1_2, ws2_2, cost_ws_2 = get_cost_surface(0, 2, 0, 2, 100, cost_func)
surf_2 = plot_surface(ax_2, ws1_2, ws2_2, cost_ws_2 + 1)
plot_points(ax_2, points)
ax_2.set_xlim(0, 2)
ax_2.set_ylim(0, 2)
# Show the colorbar
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.12, 0.03, 0.78])
cbar = fig.colorbar(surf_1, ticks=np.logspace(0, 8, 9), cax=cax)
cbar.ax.set_ylabel('$\\xi$', fontsize=15, rotation=0, labelpad=20)
cbar.set_ticklabels(['{:.0e}'.format(i) for i in np.logspace(0, 8, 9)])
fig.suptitle('Cost surface', fontsize=15)
return fig
def plot_gradient_over_time(points, get_grad_over_time):
"""Plot the gradients of the annotated point and how the evolve over time."""
fig = plt.figure(figsize=(6.5, 4))
ax = plt.subplot(111)
# Plot points
for wx, wRec, c in points:
grad_over_time = get_grad_over_time(wx, wRec)
x = np.arange(-grad_over_time.shape[1]+1, 1, 1)
plt.plot(x, np.sum(grad_over_time, axis=0), c+'-', label='({0}, {1})'.format(wx, wRec), linewidth=1, markersize=8)
plt.xlim(0, -grad_over_time.shape[1]+1)
# Set up plot axis
plt.xticks(x)
plt.yscale('symlog')
plt.yticks([10**8, 10**6, 10**4, 10**2, 0, -10**2, -10**4, -10**6, -10**8])
plt.xlabel('timestep k', fontsize=12)
plt.ylabel('$\\frac{\\partial \\xi}{\\partial S_{k}}$', fontsize=20, rotation=0)
plt.grid()
plt.title('Unstability of gradient in backward propagation.\n(backpropagate from left to right)')
# Set legend
leg = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False, numpoints=1)
leg.set_title('$(w_x, w_{rec})$', prop={'size':15})
def get_grad_over_time(wx, wRec):
"""Helper func to only get the gradient over time from wx and wRec."""
S = forward_states(X, wx, wRec)
grad_out = output_gradient(S[:,-1], t).sum()
_, grad_over_time = backward_gradient(X, S, grad_out, wRec)
return grad_over_time
# +
# Plot cost surface and gradients
# Get and plot the cost surface figure with markers
fig = get_cost_surface_figure(lambda w1, w2: cost(forward_states(X, w1, w2)[:,-1] , t), points)
# Get the plots of the gradients changing by backpropagating.
plot_gradient_over_time(points, get_grad_over_time)
# Show figures
plt.show()
# -
# ### Resilient Backpropagation
# One way to handle the unstable gradients is by using a technique called `resilient backpropagation` (Rprop). The Rprop algorithm can be defined as:
#
# * Set initial weight update value $\Delta$ to a nonzero value.
# * For each parameter $w$:
# * if $sign(\partial \xi /\partial w(i)) \neq sign(\partial \xi /\partial w(i-1))$
# * Multiply the weight update value $\Delta$ by $\eta^-$, with $\eta^-<1$.
# $\Delta(i)=\Delta(i)∗\eta^-$
# * else if $sign(\partial \xi /\partial w(i)) = sign(\partial \xi /\partial w(i-1))$
# * Multiply the weight update value $\Delta$ by $\eta^+$, with $\eta^+<1$.
# $\Delta(i)=\Delta(i)∗\eta^+$
#
# The hyperparameters are usually set as $\eta^+$=1.2 and $\eta^-$=0.5. Note that the weight update value $\Delta$ is similar to the momentum's velocity parameter, the difference is that the weight update value only reflects the size of the velocity for each parameter. The direction is determined by the sign of the current gradient.
# Define Rprop optimization function
def update_rprop(X, t, W, W_prev_sign, W_delta, eta_p, eta_n):
"""
Update Rprop values in one iteration.
X: input data.
t: targets.
W: Current weight parameters.
W_prev_sign: Previous sign of the W gradient.
W_delta: Rprop update values (Delta).
eta_p, eta_n: Rprop hyperparameters.
"""
# Perform forward and backward pass to get the gradients
S = forward_states(X, W[0], W[1])
grad_out = output_gradient(S[:, -1], t)
W_grads, _ = backward_gradient(X, S, grad_out, W[1])
W_sign = np.sign(W_grads)
# Update the Delta for each weight parameter separately
for i, _ in enumerate(W):
if W_sign[i] == W_prev_sign[i]:
W_delta[i] *= eta_p
else:
W_delta[i] *= eta_n
return W_delta, W_sign
# +
# Perform Rprop optimisation
# Set hyperparameters
eta_p = 1.2
eta_n = 0.5
# Set initial parameters
W = [-1.5, 2] # [wx, wRec]
W_delta = [0.001, 0.001] # Update values (Delta) for W
W_sign = [0, 0] # Previous sign of W
ls_of_ws = [(W[0], W[1])] # List of weights to plot
# Iterate over 500 iterations
for i in range(500):
# Get the update values and sign of the last gradient
W_delta, W_sign = update_rprop(X, t, W, W_sign, W_delta, eta_p, eta_n)
# Update each weight parameter seperately
for i, _ in enumerate(W):
W[i] -= W_sign[i] * W_delta[i]
ls_of_ws.append((W[0], W[1])) # Add weights to list to plot
print('Final weights are: wx = {0}, wRec = {1}'.format(W[0], W[1]))
# -
test_inpt = np.asmatrix([[0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1]])
test_outpt = forward_states(test_inpt, W[0], W[1])[:,-1]
print('Target output: {:d} vs Model output: {:.2f}'.format(test_inpt.sum(), test_outpt[0]))
# The sample codes in this note come from [peterroelants.github.io](http://peterroelants.github.io/) where providing more details on neural netwrok and deep learning. It's very informative and highly recommanded. Here is more like my personal memo.
|
notebook/machine-learning/deep_learning-recurrent-neural-network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BRAp37uklN9X"
# # Class \#4 activities
# + [markdown] id="0dWbgIX_lSyQ"
# ## Calculating absolute value using if-elif-else statements
#
# Recall that absolute value means the magnitude, or distance, of a number from 0 without regard to its sign. For example:
#
# | 5 | = 5
#
# | –5 | = 5
#
# | 0 | = 0
#
# Use *if* and/or *elif* and/or *else* statements to calculate the absolute value of a variable '**x**'. Store its absolute value in a variable called '**abs_value**' and print it.
# + id="_MJ3DXpgnCQ3" executionInfo={"status": "ok", "timestamp": 1602799613147, "user_tz": 420, "elapsed": 591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="79bb1a58-1d4c-44b1-dc79-12a592e7f52b" colab={"base_uri": "https://localhost:8080/", "height": 107}
# Version with if/else:
x = -5
if x >= 0:
abs_value = x
else:
abs_value = -1 * x # another way of writing this is simply -x
print(abs_value)
# Version with if/elif:
x = -5
if x >= 0:
abs_value = x
elif x < 0:
abs_value = -1 * x
print(abs_value)
# Bonus: version that can handle strings
x = 'hello'
if type(x) is not int and type(x) is not float:
print('Oops: x is not a number')
abs_value = x
elif x >= 0:
abs_value = x
else:
abs_value = -1 * x
print(abs_value)
# + [markdown] id="6IBZNIPQEPj5"
# ## Converting units using for loops
#
# Recall that 0°C (Celsius) = 273.15 K (Kelvin). In other words:
#
# *temp_kelvin* = *temp_celsius* + (273.15 K)
#
# Convert the temperatures in the following lists from °C to K. Print the list of converted temperatures.
# + id="hQsIbBT2EQ2z" executionInfo={"status": "ok", "timestamp": 1602820179777, "user_tz": 420, "elapsed": 369, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="f4b6a764-846b-4599-dd09-d1d95845a7fe" colab={"base_uri": "https://localhost:8080/", "height": 402}
# Part 1: Use a single for loop.
temps_celsius = [-4.9,-3.8,-2.3,-1.1,0.2,2.4,5.3,6.1,12.7,13.1,15.1,19.9] # units: °C
# VERSION 1 using .append(), looping over indices
temps_kelvin = []
for index in range(len(temps_celsius)):
# OPTION 1:
converted_temp = temps_celsius[index] + 273.15
temps_kelvin.append(converted_temp)
# OPTION 2:
# temps_kelvin.append(temps_celsius[index] + 273.15)
print(temps_kelvin)
# VERSION 2 also using .append(), but looping directly over the temperatures
temps_kelvin = []
for temp_celsius in temps_celsius:
temps_kelvin.append(temp_celsius + 273.15)
print(temps_kelvin)
# VERSION 3 using .copy(), looping over indices
temps_kelvin = temps_celsius.copy()
for index in range(len(temps_kelvin)):
# OPTION 1:
converted_temp = temps_kelvin[index] + 273.15
temps_kelvin[index] = converted_temp
# OPTION 2:
# temps_kelvin[index] = temps_kelvin[index] + 273.15
# OPTION 3:
# temps_kelvin[index] += temps_kelvin[index] + 273.15
print(temps_kelvin)
# VERSION 4 using enumerate()
temps_kelvin = temps_celsius.copy()
for index, temp in enumerate(temps_kelvin):
temps_kelvin[index] = temp + 273.15
print(temps_kelvin)
# VERSION 5 using a list comprehension
temps_kelvin = [temp_celsius + 273.15 for temp_celsius in temps_celsius]
print(temps_kelvin)
###########################
# Part 2: Use nested for loops (two for loops, one inside the other).
temps_celsius = [[-4.9,-3.8,-2.3,-1.1],
[0.2,2.4,5.3,6.1],
[12.7,13.1,15.1,19.9]] # units: °C
# this version uses .copy(), since .append() isn't practical here
temps_kelvin = temps_celsius.copy()
outer_dimension = len(temps_celsius)
inner_dimension = len(temps_celsius[0])
for outer_idx in range(outer_dimension):
print('The outer index is',outer_idx)
for inner_idx in range(inner_dimension):
print('>>> The inner index is',inner_idx)
new_temp = temps_celsius[outer_idx][inner_idx] + 273.15
temps_kelvin[outer_idx][inner_idx] = new_temp
print(temps_kelvin)
|
materials/class/class_4_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Blackjack Game
#
# #### Our Blackjack House Rules
#
# - The deck is unlimited in size.
# - There are no jokers.
# - The Jack/Queen/King all count as 10.
# - The the Ace can count as 11 or 1.
# - Use the following list as the deck of cards:
# - cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
# - The cards in the list have equal probability of being drawn.
# - Cards are not removed from the deck as they are drawn.
# - The computer is the dealer
import random
from replit import clear
# +
def deal_card():
'''Esta função escolhe aleatoriamente uma carta do baralho'''
import random
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
choice = random.choice(cards)
return choice
def calculate_score(listofcards):
'''Retorna o score do jogo a partir de uma lista de cartas do jogador'''
score = sum(listofcards)
if score == 21 and len(listofcards) == 2:
score = 0
if 11 in listofcards and score > 21:
listofcards.remove(11)
listofcards.append(1)
score = sum(listofcards)
return score
def compare(user_score, computer_score):
'''Esta função faz a comparação entre os scores do jogador e do computador e retorna o resultado do jogo'''
if user_score == computer_score:
check = "Draw 🙃"
elif computer_score == 0:
check = 'Lose, opponent has Blackjack 😱.'
elif user_score == 0:
check = 'Win with a Blackjack 😎'
elif user_score > 21:
check = 'You went over. You lose 😭'
elif computer_score > 21:
check = 'Opponent went over. You win 😁'
elif computer_score > user_score:
check = 'You win 😃'
else:
check = 'You lose 😤'
return check
def playthegame():
logo = """
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
"""
print(logo)
user_cards = []
computer_cards = []
game_ends = False
#Adicionar as cartas escolhidas aletoriamente as listas de cartas do usuário e do computador.
user_cards.append(deal_card())
user_cards.append(deal_card())
computer_cards.append(deal_card())
computer_cards.append(deal_card())
while game_ends == False:
#Calculando e mostrando os scores do usuário e do computador
user_score = calculate_score(user_cards)
computer_score = calculate_score(computer_cards)
print(f"Your cards: {user_cards}, current score: {user_score}")
print(f"Computer's first card: {computer_cards[0]}")
#Comparando
if user_score == 0 or computer_score == 0 or user_score > 21:
game_ends = True
else:
if input("Type 'y' to get another card, type 'n' to pass: ") == 'y':
user_cards.append(deal_card())
else:
game_ends = True
#O código deverá continuar adicionando cartas a lista do computador enquanto a soma total não for > 17 e
#o score diferente de zero.
while sum(computer_cards) < 17 and computer_score != 0:
computer_cards.append(deal_card())
computer_score = calculate_score(computer_cards)
print(f" Your final hand: {user_cards}, final score: {user_score}")
print(f" Computer's final hand: {computer_cards}, final score: {computer_score}")
print(compare(user_score, computer_score))
#Enquanto o usuário decidir continuar jogando, a função do jogo será acionada.
while input("Do you want to play a game of Blackjack? Type 'y' or 'n': ") == "y":
clear()
playthegame()
# -
playthegame()
|
100 Days of Code The Complete Python Pro Bootcamp for 2022/Day 11 - Blackjack Game.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gensim.models.word2vec import Word2Vec
import keras
from nltk.tokenize import word_tokenize
from nltk.stem import SnowballStemmer
import pandas as pd
data = pd.read_csv("../datasets/train.csv")
data = data.dropna()
# +
stemmer = SnowballStemmer('english')
def tokenize(text, stem=True):
words = word_tokenize(text)
if not stem:
return words
return [stemmer.stem(w) for w in words]
# -
data['question1_words'] = data.question1.apply(tokenize)
data['question2_words'] = data.question2.apply(tokenize)
sentences = list(data.question1_words.values) + list(data.question2_words.values)
model = Word2Vec(sentences, size=128, iter=10)
model.wv.save_word2vec_format('../models/word2vec.bin', binary=True)
|
core/embedding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2WPi0C6TGwm3" colab_type="text"
# # Basic Tutorial for TensorFlow
#
# + id="ntBLc93DGSfC" colab_type="code" colab={}
# !pip -q install aquvitae
# + id="K1bsXjbMI79G" colab_type="code" colab={}
# !wget -q --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1D28_TKauDVFO2nVlUeK_fuZX85zAO5o4' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1D28_TKauDVFO2nVlUeK_fuZX85zAO5o4" -O teacher.h5 && rm -rf /tmp/cookies.txt
# + id="u1ePiVBpGTyD" colab_type="code" colab={}
import tensorflow as tf
from aquvitae import dist, ST
# + id="xw_RtxlbGZjP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="845109ef-bb3e-47b4-b3f2-17cc804c9299"
dataset = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# + id="KGKcN11uGrm4" colab_type="code" colab={}
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(64)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(64)
# + id="cJLDG5YQGt0Y" colab_type="code" colab={}
teacher = tf.keras.applications.ResNet50(weights=None, input_shape=(32, 32, 3), classes=10)
teacher.load_weights('teacher.h5')
student = tf.keras.applications.MobileNetV2(weights=None, input_shape=(32, 32, 3), classes=10)
# + id="1nFlA-hBSpx2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3bebf177-3676-4d59-c26b-087d534c7688"
student = dist(
teacher=teacher,
student=student,
algo=ST(alpha=0.6, T=2.5),
optimizer=tf.keras.optimizers.Adam(),
train_ds=train_ds,
test_ds=test_ds,
iterations=15000
)
|
tutorials/BasicTutorialForTensorFlow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="PYvHGli8ukum" executionInfo={"status": "ok", "timestamp": 1627923278562, "user_tz": -330, "elapsed": 430, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="13ebf5a3-cc81-48f5-a72e-da8c43407b2c"
import os
project_name = "reco-tut-spr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
# !cp /content/drive/MyDrive/mykeys.py /content
import mykeys
# !rm /content/mykeys.py
path = "/content/" + project_name;
# !mkdir "{path}"
# %cd "{path}"
import sys; sys.path.append(path)
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "recotut"
# !git init
# !git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git
# !git pull origin "{branch}"
# !git checkout main
else:
# %cd "{project_path}"
# + id="nz2yphhRkbhY"
# !git status
# !git add . && git commit -m 'commit' && git push origin main
# + id="4DRkSJS6doCK" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627922633540, "user_tz": -330, "elapsed": 15555, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cf307ca9-44dd-4abf-cc81-1e835ead723e"
# !pip install -q git+https://github.com/sparsh-ai/recochef
# + id="arnAepMAeH0m" executionInfo={"status": "ok", "timestamp": 1627923363965, "user_tz": -330, "elapsed": 477, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import gc
from recochef.datasets.santander import Santander
# %reload_ext google.colab.data_table
# + id="0ZZNOmlKePE4" executionInfo={"status": "ok", "timestamp": 1627923283488, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
data = Santander()
# + colab={"base_uri": "https://localhost:8080/"} id="zdwamklYgkg4" executionInfo={"status": "ok", "timestamp": 1627923436079, "user_tz": -330, "elapsed": 20644, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="29b87f97-8d5d-47f4-a070-a663e93d0536"
# %%time
# train = data.load_train()
train = pd.read_parquet('train.parquet.gz')
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="Zf568IexxqeI" executionInfo={"status": "ok", "timestamp": 1627923436082, "user_tz": -330, "elapsed": 29, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ddf58b88-a2e0-466e-d9f4-257e1749f20b"
train.head()
# + [markdown] id="PJsQYXmBgpsL"
# Let's rename all the column names with english name to understand what's going on...
# + id="COrnc_76hW2N" executionInfo={"status": "ok", "timestamp": 1627923436084, "user_tz": -330, "elapsed": 26, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
train.columns = ['Month_status_date', 'Customer_ID', 'Employee_Index', 'Customer_country', 'Sex', 'Age', 'Join_date',
'New_customer', 'Relnshp_Mnths', 'Relnshp_flag','Last_date_Prim_Cust', 'Cust_type_beg_Mth', 'Cust_Reln_type_beg_mth',
'Residence_flag', 'Forigner_flag', 'Emp_spouse_flag', 'Channel_when_joined', 'Deceased_flag',
'Address_type', 'Customer_address', 'Address_detail', 'Activity_flag', 'Gross_household_income',
'Segment', 'Saving_account', 'Guarantees', 'Cur_account', 'Derivative_account', 'Payroll_account',
'Junior_account', 'Particular_acct1', 'Particular_acct2', 'Particular_acct3', 'Short_term_deposites',
'Med_term_deposites', 'Long_term_deposites', 'e-account', 'Funds', 'Mortgage', 'Pension', 'Loans',
'Taxes', 'Credit_card', 'Securities', 'Home_account', 'Payroll', 'Pensions', 'Direct_debit']
# + colab={"base_uri": "https://localhost:8080/"} id="OaEJ7mgIgogQ" executionInfo={"status": "ok", "timestamp": 1627919099382, "user_tz": -330, "elapsed": 736, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6f54c747-fde4-475c-84b7-1f0951516194"
train.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 640} id="WxiFBEgehcvs" executionInfo={"status": "ok", "timestamp": 1627919146282, "user_tz": -330, "elapsed": 899, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2ce885ad-26bb-4721-b619-f28ea87577ea"
desc = train.describe()
desc.loc['Unique'] = [len(train[col].unique()) for col in desc.columns]
desc.loc["Missing"] = [train[col].isnull().sum() for col in desc.columns]
desc.loc['Datatype'] = [train[col].dtype for col in desc.columns]
desc.T
# + id="9SxG84Dmx-IG" executionInfo={"status": "ok", "timestamp": 1627923307956, "user_tz": -330, "elapsed": 1858, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# test = data.load_test()
test = pd.read_parquet('test.parquet.gz')
# + colab={"base_uri": "https://localhost:8080/", "height": 292} id="5KKRZR0eh0y5" executionInfo={"status": "ok", "timestamp": 1627923163313, "user_tz": -330, "elapsed": 28, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9524050d-9e99-4fe9-f1a4-3cb0b00db2de"
test.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="_z5B02Djktj6" executionInfo={"status": "ok", "timestamp": 1627923307958, "user_tz": -330, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7dde3101-90d4-4d2c-a89a-3bab475a3781"
test.columns = ['Month_status_date', 'Customer_ID', 'Employee_Index', 'Customer_country', 'Sex', 'Age', 'Join_date',
'New_customer', 'Relnshp_Mnths', 'Relnshp_flag','Last_date_Prim_Cust', 'Cust_type_beg_Mth', 'Cust_Reln_type_beg_mth',
'Residence_flag', 'Forigner_flag', 'Emp_spouse_flag', 'Channel_when_joined', 'Deceased_flag',
'Address_type', 'Customer_address', 'Address_detail', 'Activity_flag', 'Gross_household_income',
'Segment']
desc = test.describe()
desc.loc['Unique'] = [len(test[col].unique()) for col in desc.columns]
desc.loc["Missing"] = [test[col].isnull().sum() for col in desc.columns]
desc.loc['Datatype'] = [test[col].dtype for col in desc.columns]
desc.T
# + [markdown] id="W0iiBpbhk1r9"
# > Note: we have far less numeric features in test data. This is because we do not have any of the 24 products information in test data, as the objective of the project is to predict the products a customer is going to buy
# + id="JSTaWABEk2oW" executionInfo={"status": "ok", "timestamp": 1627923307959, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
eng_num_features = ['Employee_Index','Age','New_customer', 'Relnshp_Mnths', 'Relnshp_flag','Residence_flag',
'Forigner_flag', 'Emp_spouse_flag','Deceased_flag', 'Activity_flag', 'Gross_household_income']
eng_target_features = ['Saving_account', 'Guarantees', 'Cur_account', 'Derivative_account', 'Payroll_account',
'Junior_account', 'Particular_acct1', 'Particular_acct2', 'Particular_acct3', 'Short_term_deposites',
'Med_term_deposites', 'Long_term_deposites', 'e-account', 'Funds', 'Mortgage', 'Pension', 'Loans',
'Taxes', 'Credit_card', 'Securities', 'Home_account', 'Payroll', 'Pensions', 'Direct_debit']
span_eng_feat_dict = {'fecha_dato': 'Month_status_date', 'ncodpers': 'Customer_ID', 'ind_empleado': 'Employee_Index',
'pais_residencia':'Customer_country', 'sexo': 'Sex', 'age': 'Age', 'fecha_alta': 'Join_date',
'ind_nuevo': 'New_customer', 'antiguedad':'Relnshp_Mnths', 'indrel': 'Relnshp_flag',
'ult_fec_cli_1t': 'Last_date_Prim_Cust', 'indrel_1mes': 'Cust_type_beg_Mth', 'tiprel_1mes':'Cust_Reln_type_beg_mth',
'indresi': 'Residence_flag', 'indext': 'Forigner_flag', 'conyuemp': 'Emp_spouse_flag', 'canal_entrada':'Channel_when_joined',
'indfall': 'Deceased_flag','tipodom':'Address_type', 'cod_prov':'Customer_address','nomprov': 'Address_detail',
'ind_actividad_cliente': 'Activity_flag', 'renta': 'Gross_household_income', 'segmento' :'Segment' }
# + colab={"base_uri": "https://localhost:8080/"} id="V-I4T84dlHvR" executionInfo={"status": "ok", "timestamp": 1627919849517, "user_tz": -330, "elapsed": 1794, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e81d9f9d-4ce1-4f92-8559-cd8c342a4f9c"
print("Unique customers in train:", len(train['Customer_ID'].unique()))
print("Unique customers in test:", len(test['Customer_ID'].unique()))
print("Common customers in train and test:", len(set(train['Customer_ID'].unique()).intersection(set(test['Customer_ID'].unique()))))
# + [markdown] id="hDS40PvvlUVA"
# > Tip: Happy to see that every customer in test is also there in train data
# + [markdown] id="BI2D3sVfyQv2"
# Let's first take a random sample, because it would be hard to do eda with the full data
# + id="7kUfUrXNyZPD" executionInfo={"status": "ok", "timestamp": 1627923439810, "user_tz": -330, "elapsed": 3749, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
train = train.sample(frac=0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="hUGRhWqkyvbh" executionInfo={"status": "ok", "timestamp": 1627923439815, "user_tz": -330, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7329e083-1e80-45ef-fef3-00e9cdd2f015"
gc.collect()
# + [markdown] id="RE_WG3KjlVt7"
# > Note: Let's first explore all numeric features
# + colab={"base_uri": "https://localhost:8080/"} id="rfCSHXgxlkQZ" executionInfo={"status": "ok", "timestamp": 1627923439816, "user_tz": -330, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="da547704-afad-4556-917c-d1c1a6a7aed4"
train["Age"] = train["Age"].replace(to_replace = ' NA', value = np.nan)
train["Age"] = train["Age"].astype("float")
train["Age"].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="lG3rYgwzluLh" executionInfo={"status": "ok", "timestamp": 1627923441097, "user_tz": -330, "elapsed": 1298, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="57464d5e-9aff-4d46-c160-059abb97698f"
age_series = train.Age.value_counts()
plt.figure(figsize=(20,4), dpi=80)
sns.barplot(age_series.index.astype('int'), age_series.values)
plt.ylabel('Number of customers', fontsize=12)
plt.xlabel('Age', fontsize=10)
plt.show()
# + [markdown] id="cWrsgUqBmFGI"
# **Observations**
#
# - We have a bimodal distribution for the age. Let's see if we can find any reason for this.
# - Also we have customer ages from 0 to 164.
# - Looks like there might be some products for small children under 18, some product for young generation.
# - It is not possible to have customers having age 164.. Lets cap the age at 100.
# + colab={"base_uri": "https://localhost:8080/", "height": 256} id="4-6xB7MCnbkz" executionInfo={"status": "ok", "timestamp": 1627923442062, "user_tz": -330, "elapsed": 974, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="64eac178-b010-4514-9fa0-24383f5b416e"
#Age vs Segment
train.groupby(["Segment", "Age"])["Customer_ID"].nunique("Customer_ID").unstack()
# + [markdown] id="x0fuFDAInf8z"
# Looks like only **PARTICULARS** segment has age group <16. This segment might be served some specific products...
#
# Lets look at segments for young generations only
# + colab={"base_uri": "https://localhost:8080/", "height": 152} id="oobJpcd3oDdL" executionInfo={"status": "ok", "timestamp": 1627923461447, "user_tz": -330, "elapsed": 686, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ec2ee4a9-8a00-4a0c-db6b-f12c89206ed0"
young = train[(train["Age"] > 18) & (train["Age"] < 30)]
young.groupby(["Segment", "Age"])["Customer_ID"].nunique("Customer_ID").unstack()
# + [markdown] id="qxJPeG4hpg7J"
# Let's have a look at the box plot...
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="bys9ZsB8pj-w" executionInfo={"status": "ok", "timestamp": 1627923464680, "user_tz": -330, "elapsed": 1614, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3c9c9fb8-20f7-4fee-e60c-7460ffc171df"
sns.boxplot(train["Age"].values, train["Segment"])
plt.xlabel("Age")
plt.title("Age Box plot")
plt.show()
# + [markdown] id="MvM27lkopdnP"
# The customers in university segemnt seems to have median age of 24 years while other to 2 segments have median age of 46 and 52
# + [markdown] id="kyt7q4PzoNYX"
# Looks like these are university students, as most of the young customers belong to university segment... It makes sense why we have a bimodal distribution. 3 things to notice here:-
# 1. We have some population under 18 having bank accounts. these may be students or junior account holders where there parents have created an account for them.
# 2. We seem to have a group of people between 18 and 30 who could be students or early job starters. This segment has very high number of people than working people.
# 3. There are some people with age 164. It's better to cap the age at 100.
#
#
# + id="ohriMi7NobBy" executionInfo={"status": "ok", "timestamp": 1627923497497, "user_tz": -330, "elapsed": 508, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
train.loc[train.Age > 100,"Age"] = train.loc[(train.Age >= 30) & (train.Age <= 100),"Age"].median(skipna=True)
train["Age"].fillna(train["Age"].mean(),inplace=True)
train["Age"] = train["Age"].astype(int)
# + [markdown] id="ZeCuK3O_qiT2"
# Let's have a look at the distribution of age vs all the products
# + id="SadMWhHtqliR" colab={"base_uri": "https://localhost:8080/", "height": 720} executionInfo={"status": "ok", "timestamp": 1627923542928, "user_tz": -330, "elapsed": 8969, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="5d7fd15a-1ef7-40ce-c649-da820c661270"
fig, ax = plt.subplots(4,6, figsize=(30,15))
fig.suptitle('Distribution of Age vs All products')
for i, col in enumerate(eng_target_features):
sns.boxplot(train[col], train["Age"].values, ax=ax[i//6][i%6])
plt.xlabel(col)
plt.ylabel("Age")
plt.title("Age Box plot")
plt.show()
# + [markdown] id="lww3Ii6_qwr4"
# This boxplot confirms our belief about the median ages of cutsomer's having various types of accounts. We can see that customer's having junior account are very young.
# + [markdown] id="1z1No7m2qxvC"
# Let's explore new_customer column
# + colab={"base_uri": "https://localhost:8080/"} id="1gM39MlivfSL" executionInfo={"status": "ok", "timestamp": 1627923580953, "user_tz": -330, "elapsed": 824, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c4d1fb9a-368e-4e91-8178-c8257d91e129"
train["New_customer"].value_counts(dropna=False)
# + id="1eUJojcxw02P" colab={"base_uri": "https://localhost:8080/", "height": 241} executionInfo={"status": "ok", "timestamp": 1627923600452, "user_tz": -330, "elapsed": 1281, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ccb08de1-d3ec-400e-9c5e-ca9e5dc03e7e"
missing_new_cust = train[train["New_customer"].isnull()]
missing_new_cust.sort_values(by="Customer_ID").head()
# + [markdown] id="dqZtQ1Z-w5RI"
# New_customer, Relationship_months and Join_date are all corelated variables.Customer would have joined before the observation period, but we do not have any information on that.Hence, I think, it is best to impute the join date first by finding the first month_status_date.
# + id="m77Wn8RFw7_K" colab={"base_uri": "https://localhost:8080/", "height": 326} executionInfo={"status": "ok", "timestamp": 1627923698714, "user_tz": -330, "elapsed": 9635, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f08aa89e-9cd6-4144-971a-4d70e8c35e36"
First_month = train.groupby(["Customer_ID"])["Month_status_date"].first()
train = train.merge(First_month, on="Customer_ID", how = "outer")
train.loc[train["Join_date"].isnull(), "Join_date"] = train["Month_status_date_y"]
train.drop("Month_status_date_y", axis=1).head(5)
# + [markdown] id="8Z-GPwAnz4Su"
# Let's calculate relationship months
# + colab={"base_uri": "https://localhost:8080/"} id="PMJwi9o-0JdA" executionInfo={"status": "ok", "timestamp": 1627923934975, "user_tz": -330, "elapsed": 1511, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9cdfd162-7e59-4679-a2a6-ac693724912e"
from datetime import datetime
train["Join_date"] = pd.to_datetime(train["Join_date"])
train["Month_status_date_x"] = pd.to_datetime(train["Month_status_date_x"])
train["Relnshp_Mnths"] = train["Relnshp_Mnths"].str.strip()
train.loc[train["Relnshp_Mnths"]=='NA',"Relnshp_Mnths"] = (train.loc[train["Relnshp_Mnths"]=='NA']["Month_status_date_x"] - train.loc[train["Relnshp_Mnths"]=='NA']["Join_date"])/2678400000000000
train["Relnshp_Mnths"].value_counts().head()
# + id="cPjrydRp1f0R" executionInfo={"status": "ok", "timestamp": 1627924075114, "user_tz": -330, "elapsed": 2476, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
train["New_customer"] = train.loc[train["New_customer"].isnull(), "New_customer"] = 1
# + colab={"base_uri": "https://localhost:8080/", "height": 191} id="L1mjNg161gG-" executionInfo={"status": "ok", "timestamp": 1627924102533, "user_tz": -330, "elapsed": 2496, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ea2cb58e-672c-49e9-9370-1e241556408c"
pd.crosstab(train["Relnshp_flag"], train["Relnshp_Mnths"])
# + [markdown] id="M9wr5c6r1m1D"
# The newer the customer, there is a high likelihood of a customer to have reltionship_flag to be 99. However, the percentage of new customers having 99 Relationship flag is less than 0.1%, so it will be better to impoute the values by most frequent value
# + id="i3sI7aCl1uKa" executionInfo={"status": "ok", "timestamp": 1627924132232, "user_tz": -330, "elapsed": 995, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
train.loc[train["Relnshp_flag"].isnull(), "Relnshp_flag"] = 1
# + [markdown] id="2L-rHCZR1ucB"
# Great.. 4 more variables have been imputed with values!!!
# + colab={"base_uri": "https://localhost:8080/"} id="BJSIG1ny1yDH" executionInfo={"status": "ok", "timestamp": 1627924169101, "user_tz": -330, "elapsed": 674, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e31845f9-820d-41ef-8c99-46341ec0591a"
train["Employee_Index"].value_counts()
# + [markdown] id="FkcQMATe13r7"
# Employee index: A active, B ex employed, F filial, N not employee, P passive
# + colab={"base_uri": "https://localhost:8080/"} id="ieupswJa2Bs8" executionInfo={"status": "ok", "timestamp": 1627924211361, "user_tz": -330, "elapsed": 430, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f267b217-5889-4e17-f6be-3c3a77ae57b2"
train["Employee_Index"].isnull().sum()
# + [markdown] id="mLdBcndJ2CDz"
# We do not have any more information about the employee status, so it will be safe to impute the employee index to be the most frequent value
# + colab={"base_uri": "https://localhost:8080/"} id="pRGiYG0n2FY3" executionInfo={"status": "ok", "timestamp": 1627924233486, "user_tz": -330, "elapsed": 774, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4b9a3c77-5c22-40f6-81bf-0302f441c4a3"
train.loc[train["Employee_Index"].isnull(), "Employee_Index"] = 'N'
train["Employee_Index"].isnull().sum()
# + [markdown] id="54D2pwmt2HdF"
# Customer's Country residence
# + colab={"base_uri": "https://localhost:8080/"} id="Tlpbry6W2Ol6" executionInfo={"status": "ok", "timestamp": 1627924263570, "user_tz": -330, "elapsed": 775, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e1014ddc-4ef8-4e63-e999-377d06dea5ce"
train["Customer_country"].value_counts().head()
# + colab={"base_uri": "https://localhost:8080/"} id="ov7o7B0c2Oxq" executionInfo={"status": "ok", "timestamp": 1627924269128, "user_tz": -330, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="35aacfb5-5653-435d-cd41-252583b12f3f"
train["Customer_country"].isnull().sum()
# + [markdown] id="1ZteRxRg2QI7"
# Lets check if we have customer's address information in the data.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="Bhg4h6g82R3P" executionInfo={"status": "ok", "timestamp": 1627924286626, "user_tz": -330, "elapsed": 523, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4b4d657c-20a9-4429-d38c-61fea60f36ec"
train.loc[train["Customer_country"].isnull(), ["Address_detail", "Customer_address"]].head(10)
# + [markdown] id="JT8JL-di2UdJ"
# Nope.. we do not have customer's data. So again, we can impute the customer country as the most frequent country which is Spain
# + colab={"base_uri": "https://localhost:8080/"} id="QCUwcpk52VGc" executionInfo={"status": "ok", "timestamp": 1627924301286, "user_tz": -330, "elapsed": 462, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0af435d1-c52b-46f3-911f-c4ef1d533910"
train.loc[train["Customer_country"].isnull(), "Customer_country"] = 'ES'
train["Customer_country"].isnull().sum()
# + [markdown] id="e943fmqf2YCF"
# Residence flag and forigner flag
# + colab={"base_uri": "https://localhost:8080/", "height": 131} id="tdOgz8Ql2gZF" executionInfo={"status": "ok", "timestamp": 1627924339563, "user_tz": -330, "elapsed": 717, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cf6e18c7-6a55-4993-c581-687110cc8b3a"
pd.crosstab(train["Residence_flag"], train["Forigner_flag"])
# + colab={"base_uri": "https://localhost:8080/", "height": 110} id="uDgJhkcz2hSY" executionInfo={"status": "ok", "timestamp": 1627924361544, "user_tz": -330, "elapsed": 533, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="29f809d4-c03b-4bcb-c3f9-03b6d1751f10"
pd.crosstab(train.loc[train["Residence_flag"]=='S',"Customer_country"], train["Forigner_flag"])
# + [markdown] id="s3pBbPbW2muF"
# As we have imputed missing customer_country to be Spain, it will be safe to impute the residence flag to be Y and Forigner flag to be No, as we have only 4% of the spanish customer;s to have forigner flag
# + id="7Oq673482x9Z"
train.loc[train["Residence_flag"].isnull(), "Residence_flag"] = "S"
train.loc[train["Forigner_flag"].isnull(), "Forigner_flag"] = "N"
# + [markdown] id="2O8Jqa9Y3DRU"
# ## To be continued...
# + [markdown] id="QgjF_dQ13CT0"
# https://nbviewer.jupyter.org/github/Sahoopa/My-Projects/blob/master/Santander_Data_Exploration_EDA_Submission.ipynb
# + [markdown] id="AjH7hpoy3C0A"
# https://nbviewer.jupyter.org/github/Sahoopa/My-Projects/blob/master/Santander%20Data%20Prep.ipynb
# + [markdown] id="E_i_8Psi3erM"
# https://nbviewer.jupyter.org/github/Sahoopa/My-Projects/blob/master/Santander%20Product%20Recommendation/Santander%20Data%20Prep%20-%20Part2.ipynb
# + [markdown] id="IolvOIDu3MOS"
# https://nbviewer.jupyter.org/github/Sahoopa/My-Projects/blob/master/Santander%20Product%20Recommendation/Santander_Data_Exploration_EDA%20-%20Part1.ipynb
|
_docs/nbs/reco-tut-spr-02-eda.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Updating Constraint Matrices
#
# To implement Graph SLAM, a matrix and a vector (omega and xi, respectively) are introduced. The matrix is square and labelled with all the robot poses (xi) and all the landmarks (Li). Every time you make an observation, for example, as you move between two poses by some distance `dx` and can relate those two positions, you can represent this as a numerical relationship in these matrices.
#
# Below you can see a matrix representation of omega and a vector representation of xi.
#
# <img src='images/omega_xi.png' width=20% height=20% />
#
#
# ### Solving for x, L
#
# To "solve" for all these poses and landmark positions, we can use linear algebra; all the positional values are in the vector `mu` which can be calculated as a product of the inverse of omega times xi.
#
# ---
#
# ## Constraint Updates
#
# In the below code, we construct `omega` and `xi` constraint matrices, and update these according to landmark sensor measurements and motion.
#
# #### Sensor Measurements
#
# When you sense a distance, `dl`, between a pose and a landmark, l, update the constraint matrices as follows:
# * Add `[[1, -1], [-1, 1]]` to omega at the indices for the intersection of `xt` and `l`
# * Add `-dl` and `dl` to xi at the rows for `xt` and `l`
#
# The values 2 instead of 1 indicate the "strength" of the measurement.
#
# You'll see three new `dl`'s as new inputs to our function `Z0, Z1, Z2`, below.
#
# #### Motion
# When your robot moves by some amount `dx` update the constraint matrices as follows:
# * Add `[[1, -1], [-1, 1]]` to omega at the indices for the intersection of `xt` and `xt+1`
# * Add `-dx` and `dx` to xi at the rows for `xt` and `xt+1`
#
# ## QUIZ: Include three new sensor measurements for a single landmark, L.
# +
import numpy as np
def mu_from_positions(initial_pos, move1, move2, Z0, Z1, Z2):
## TODO: construct constraint matrices
## and add each position/motion constraint to them
# initialize constraint matrices with 0's
# Now these are 4x4 because of 3 poses and a landmark
omega = np.zeros((4,4))
xi = np.zeros((4,1))
# add initial pose constraint
omega[0][0] = 1
xi[0] = initial_pos
# account for the first motion, dx = move1
omega += [[1., -1., 0., 0.],
[-1., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]
xi += [[-move1],
[move1],
[0.],
[0.]]
# account for the second motion
omega += [[1., 0., 0., -1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[-1., 0., 0., 1.]]
xi +=[[-Z0],
[0.],
[0.],
[Z0]]
omega += [[0., 0., 0., 0.],
[0., 1., 0., -1.],
[0., 0., 0., 0.],
[0, -1., 0., 1.]]
xi +=[[0.],
[-Z1],
[0.],
[Z1]]
omega += [[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 1., -1.],
[0., 0., -1., 1.]]
xi +=[[0.],
[0.],
[-Z2],
[Z2]]
## TODO: Include three new sensor measurements for the landmark, L
# Your code here
# display final omega and xi
print('Omega: \n', omega)
print('\n')
print('Xi: \n', xi)
print('\n')
## TODO: calculate mu as the inverse of omega * xi
## recommended that you use: np.linalg.inv(np.matrix(omega)) to calculate the inverse
omega_inv = np.linalg.inv(np.matrix(omega))
mu = omega_inv*xi
return mu
# -
# call function and print out `mu`
mu = mu_from_positions(-3, 5, 3, 10, 5, 2)
print('Mu: \n', mu)
|
4_7_SLAM/2_1. Include Landmarks, exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module 1: Introduction to Exploratory Analysis
# <a href="https://drive.google.com/file/d/1r4SBY6Dm6xjFqLH12tFb-Bf7wbvoIN_C/view" target="_blank">
# <img src="http://www.deltanalytics.org/uploads/2/6/1/4/26140521/screen-shot-2019-01-05-at-4-48-15-pm_orig.png" width="500" height="400">
# </a>
#
# [(Page 17)](https://drive.google.com/file/d/1r4SBY6Dm6xjFqLH12tFb-Bf7wbvoIN_C/view)
# What we'll be doing in this notebook:
# -----
#
# 1. Checking variable type
# 2. Checking for missing variables
# 3. Number of observations in the dataset
# 4. Descriptive statistics
# ### Import packages
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
import dateutil.parser
# The command below means that the output of multiple commands in a cell will be output at once
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# The command below tells jupyter to display up to 80 columns, this keeps everything visible
pd.set_option('display.max_columns', 80)
pd.set_option('expand_frame_repr', True)
# Show figures in notebook
# %matplotlib inline
# -
# ### Import dataset
# We read in our dataset
# +
path = '../data/'
filename = 'loans.csv'
try:
df = pd.read_csv(path+filename)
except FileNotFoundError:
# If data is not found, download it from GitHub
import os
os.system(f'git clone --single-branch --depth=1 https://github.com/DeltaAnalytics/machine_learning_for_good_data {path}')
df = pd.read_csv(path+filename)
# -
# In the cell below, we take a random sample of 2 rows to get a feel for the data.
df.sample(n=2)
# ### 1) Type Checking
# <a id='type_check'></a>
# Type is very important in Python programing, because it affects the types of functions you can apply to a series. There are a few different types of data you will see regularly (see [this](https://en.wikibooks.org/wiki/Python_Programming/Data_Types) link for more detail):
# * **int** - a number with no decimal places. example: loan_amount field
# * **float** - a number with decimal places. example: partner_id field
# * **str** - str is short for string. This type formally defined as a sequence of unicode characters. More simply, string means that the data is treated as a word, not a number. example: sector
# * **boolean** - can only be True or False. There is not currently an example in the data, but we will be creating a gender field shortly.
# * **datetime** - values meant to hold time data. Example: posted_date
#
# Let's check the type of our variables using the examples we saw in the cell above.
# Here are all of the columns
df.columns.tolist()
# Find the dtype, aka datatype, for a column
df['id_number'].dtype
# Try this - Pick a couple of columns and check their type on your own
# ### 2) Do I have missing values?
#
# <a id='missing_check'></a>
# If we have missing data, is the missing data at random or not? If data is missing at random, the data distribution is still representative of the population. You can probably ignore the missing values as an inconvenience. However, if the data is systematically missing, the analysis you do may be biased. You should carefully consider the best way to clean the data, it may involve dropping some data.
# We want to see how many values are missing in certain variable columns. One way to do this is to count the number of null observations.
#
# For this, we wrote a short function to apply to the dataframe.
#
# We print out the first few observations, but you can remove the .head() to print out all columns.
# +
#Create a new function:
def num_missing(x):
return sum(x.isnull())
#Applying per column:
print("Missing values per column:")
## Check how many are missing by column, and then check which ones have any missing values
print(df.apply(num_missing, axis=0).where(lambda x : x != 0).dropna().head(20))
#axis=0 defines that function is to be applied on each column
# -
# ### 3) Sanity Checks
# <a id='obs_check'></a>
# **Does the dataset match what you expected to find?**
# - is the range of values what you would expect. For example, are all loan_amounts above 0.
# - do you have the number of rows you would expect
# - is your data for the date range you would expect. For example, is there a strange year in the data like 1880.
# - are there unexpected spikes when you plot the data over time
#
#
# In the command below we find out the number of loans and number of columns by using the function shape. You can also use len(df.index) to find the number of rows.
print(f'There are {df.shape[0]} observations and {df.shape[1]} features')
# Remember, each row is an observation and each column is a potential feature.
#
# Remember we need large about of data for machine learning.
# ### 4) Descriptive statistics of the dataset
#
# <a id='desc_stats'></a>
# The "describe" command below provides key summary statistics for each numeric column.
df.describe()
# In order to get the same summary statistics for categorical columns (string) we need to do a little data wrangling. The first line of code filters for all columns that are a data type object. As we know from before this means they are considered to be a string. The final row of code provides summary statistics for these character fields.
categorical = df.dtypes[df.dtypes == "object"].index
df[categorical].describe()
# In the table above, there are 4 really useful fields:
#
# 1) **count** - total number of fields populated (Not empty).
#
# 2) **unique** - tells us how many different unique ways this field is populated. For example 4 in description.languages tells us there are 4 different language descriptions.
#
# 3) **top** - tells us the most popular data point. For example, the top activity in this dataset is Farming which tells us most loans are in Farming.
#
# 4) **freq** - tells us that how frequent the most popular category is in our dataset. For example, 'en' (English) is the language almost all descriptions (description.languages) are written in (118,306 out of 118,316).
# What is next
# -----
#
# In the next section, we move on to exploratory data analysis (EDA).
# <br>
# <br>
# <br>
#
# ----
|
module_1_introduction/1_3_loading_and_understanding_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decorators
#
#
# Decorators can be thought of as functions which modify the *functionality* of another function. They help to make your code shorter and more "Pythonic".
#
# To properly explain decorators we will slowly build up from functions. Make sure to run every cell in this Notebook for this lecture to look the same on your own computer.<br><br>So let's break down the steps:
#
# ## Functions Review
def func():
return 1
func()
# ## Scope Review
# Remember from the nested statements lecture that Python uses Scope to know what a label is referring to. For example:
# +
s = 'Global Variable'
def check_for_locals():
print(locals())
# -
# Remember that Python functions create a new scope, meaning the function has its own namespace to find variable names when they are mentioned within the function. We can check for local variables and global variables with the <code>locals()</code> and <code>globals()</code> functions. For example:
print(globals())
# Here we get back a dictionary of all the global variables, many of them are predefined in Python. So let's go ahead and look at the keys:
print(globals().keys())
# Note how **s** is there, the Global Variable we defined as a string:
globals()['s']
# Now let's run our function to check for local variables that might exist inside our function (there shouldn't be any)
check_for_locals()
# Great! Now lets continue with building out the logic of what a decorator is. Remember that in Python **everything is an object**. That means functions are objects which can be assigned labels and passed into other functions. Lets start with some simple examples:
def hello(name='Jose'):
return 'Hello '+name
hello()
# Assign another label to the function. Note that we are not using parentheses here because we are not calling the function **hello**, instead we are just passing a function object to the **greet** variable.
greet = hello
greet
greet()
# So what happens when we delete the name **hello**?
del hello
hello()
greet()
# Even though we deleted the name **hello**, the name **greet** *still points to* our original function object. It is important to know that functions are objects that can be passed to other objects!
# ## Functions within functions
# Great! So we've seen how we can treat functions as objects, now let's see how we can define functions inside of other functions:
def hello(name='Jose'):
print('The hello() function has been executed')
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
print(greet())
print(welcome())
print("Now we are back inside the hello() function")
hello()
welcome()
# Note how due to scope, the welcome() function is not defined outside of the hello() function. Now lets learn about returning functions from within functions:
# ## Returning Functions
def hello(name='Jose'):
def greet():
return '\t This is inside the greet() function'
def welcome():
return "\t This is inside the welcome() function"
if name == 'Jose':
return greet
else:
return welcome
# Now let's see what function is returned if we set x = hello(), note how the empty parentheses means that name has been defined as Jose.
x = hello()
x
# Great! Now we can see how x is pointing to the greet function inside of the hello function.
print(x())
# Let's take a quick look at the code again.
#
# In the <code>if</code>/<code>else</code> clause we are returning <code>greet</code> and <code>welcome</code>, not <code>greet()</code> and <code>welcome()</code>.
#
# This is because when you put a pair of parentheses after it, the function gets executed; whereas if you don’t put parentheses after it, then it can be passed around and can be assigned to other variables without executing it.
#
# When we write <code>x = hello()</code>, hello() gets executed and because the name is Jose by default, the function <code>greet</code> is returned. If we change the statement to <code>x = hello(name = "Sam")</code> then the <code>welcome</code> function will be returned. We can also do <code>print(hello()())</code> which outputs *This is inside the greet() function*.
# ## Functions as Arguments
# Now let's see how we can pass functions as arguments into other functions:
# +
def hello():
return 'Hi Jose!'
def other(func):
print('Other code would go here')
print(func())
# -
other(hello)
# Great! Note how we can pass the functions as objects and then use them within other functions. Now we can get started with writing our first decorator:
# ## Creating a Decorator
# In the previous example we actually manually created a Decorator. Here we will modify it to make its use case clear:
# +
def new_decorator(func):
def wrap_func():
print("Code would be here, before executing the func")
func()
print("Code here will execute after the func()")
return wrap_func
def func_needs_decorator():
print("This function is in need of a Decorator")
# -
func_needs_decorator()
# Reassign func_needs_decorator
func_needs_decorator = new_decorator(func_needs_decorator)
func_needs_decorator()
# So what just happened here? A decorator simply wrapped the function and modified its behavior. Now let's understand how we can rewrite this code using the @ symbol, which is what Python uses for Decorators:
@new_decorator
def func_needs_decorator():
print("This function is in need of a Decorator")
func_needs_decorator()
# **Great! You've now built a Decorator manually and then saw how we can use the @ symbol in Python to automate this and clean our code. You'll run into Decorators a lot if you begin using Python for Web Development, such as Flask or Django!**
|
Python/01-Decorators.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="p1WoKPoU6oQ5"
# # Normalizing and encoding the data
# + id="dlHoej2OL05O"
# Jupyter magic methods
# For auto-reloading when external modules are changed
# %load_ext autoreload
# %autoreload 2
# For showing plots inline
# %matplotlib inline
import numpy as np
import pandas as pd
pd.set_option('float_format', '{:f}'.format)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
# + id="MtVd2k9EQhUt"
# We will use a set random seed when calling df.sample for reproducibility
PD_RANDOM_STATE = 1
# + id="xP-fLYu-L2q2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640530990925, "user_tz": 300, "elapsed": 2212, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="7be1f84f-92a0-499b-d493-53bfb67e1d83"
# Give the notebook access to the rest of your google drive files.
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
# Enter the relevant foldername
FOLDERNAME = '/content/drive/My Drive/ML/arxiv_vixra/'
assert FOLDERNAME is not None, "[!] Enter the foldername."
# For importing .py modules stored in FOLDERNAME or a subdirectory:
import sys
sys.path.append(FOLDERNAME)
# + [markdown] id="0A1TndeGMfAG"
# # Importing Data
#
# Only use the training set.
# + id="dsmJg51pL3rm" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640530992875, "user_tz": 300, "elapsed": 1964, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="aaf2ce61-8cae-46be-e05c-0826253aba67"
balanced_train_file = 'balanced_filtered_data_train.feather'
large_train_file = 'large_filtered_data_train.feather'
SUBDIR = 'data/data_splits/'
balanced_path = FOLDERNAME + SUBDIR + balanced_train_file
large_file = FOLDERNAME + SUBDIR + large_train_file
# !cp '{large_file}' .
# !cp '{balanced_path}' .
# !ls
# + id="rYOBJWyvL1O0" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1640530993015, "user_tz": 300, "elapsed": 151, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="85053925-b924-4e17-b0cb-1e3606670da0"
balanced_train_df = pd.read_feather(balanced_train_file, columns=['title', 'abstract', 'source'])
balanced_train_df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="2fGp_OuN9WpZ" executionInfo={"status": "ok", "timestamp": 1640530997716, "user_tz": 300, "elapsed": 4704, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="b4b9e79b-c8f2-46a7-b9b9-f9fcdd26cc82"
large_train_df = pd.read_feather(large_train_file, columns=['title', 'abstract', 'source'])
large_train_df.head()
# + [markdown] id="qTmvuHOviSOH"
# # Importance of Normalizing
# + [markdown] id="I46x5D1cmTSh"
# Let's build up the sets of characters for arxiv and vixra papers for each dataset. Just look at the balanced dataset for now.
# + id="7qgSWkNMlOTr"
balanced_train_df_dic = {key: balanced_train_df[balanced_train_df['source'] == key] for key in {'arxiv', 'vixra'}}
# + [markdown] id="XbukHQCq_eAx"
# We perform some mild normalization at this stage by using `lower` on the text. In particular, we don't `strip`.
# + id="B6zt67sk_a5g"
def mild_normalizer(s):
return s.lower()
# + id="lMM6_nCzmlfk"
balanced_train_char_mild_norm_dic = {}
for key, df in balanced_train_df_dic.items():
for text_col in ('title', 'abstract'):
temp_char_set = set(' ') # Start with a space in the set.
for text in df[text_col]:
for char in text:
new_char = mild_normalizer(char)
temp_char_set.add(new_char)
balanced_train_char_mild_norm_dic[f'{key}_{text_col}_chars'] = temp_char_set
# + [markdown] id="_L95z0_0oaX_"
# Check out the number of unique characters in each slice of the dataset:
# + colab={"base_uri": "https://localhost:8080/"} id="Bt2BaoE0oELR" executionInfo={"status": "ok", "timestamp": 1640531006778, "user_tz": 300, "elapsed": 48, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="ed14022b-7420-4bae-ce45-f21aa7df01b4"
print(*((key, len(val)) for key, val in balanced_train_char_mild_norm_dic.items()), sep='\n')
# + id="-fJyf9HB_1eh"
balanced_train_char_mild_norm_differences_dic = {}
for key1, key2 in (('vixra_title_chars', 'arxiv_title_chars'),
('arxiv_title_chars', 'vixra_title_chars'),
('vixra_abstract_chars', 'arxiv_abstract_chars'),
('arxiv_abstract_chars', 'vixra_abstract_chars')
):
balanced_train_char_mild_norm_differences_dic[f"{key1.replace('_chars', '')}_only"] = [char for char in balanced_train_char_mild_norm_dic[key1] - balanced_train_char_mild_norm_dic[key2]]
# + colab={"base_uri": "https://localhost:8080/"} id="Aigcb_7V_1ej" executionInfo={"status": "ok", "timestamp": 1640531006780, "user_tz": 300, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="5eac2a8b-3e96-49c4-f1ce-01be12edec16"
for key, item in balanced_train_char_mild_norm_differences_dic.items():
print(key, item)
# + [markdown] id="7gnvWptk_1em"
# List all instances where a character appears only in arxiv or vixra and does so more than 1 percent of the time.
# + colab={"base_uri": "https://localhost:8080/"} id="UZetTVsV_1em" executionInfo={"status": "ok", "timestamp": 1640531019537, "user_tz": 300, "elapsed": 12764, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="5696b489-ce92-4780-9d5b-0b14172d7087"
for key, char_list in balanced_train_char_mild_norm_differences_dic.items():
for char in char_list:
key_split = key.split('_')
percent = 100 * balanced_train_df_dic[key_split[0]][key_split[1]].apply(lambda s: char in mild_normalizer(s)).mean()
if percent > 1:
print(f'{(char, ord(char))} appears in {percent:.3f} percent of {key_split[0]} {key_split[1]}s')
# + [markdown] id="Eevi0eqvtCW3"
# Let's look at where the `\n` and `\r` chars appear in arxiv titles and vixra abstracts, respectively.
# + colab={"base_uri": "https://localhost:8080/"} id="pTwJ5O0ftQ7H" executionInfo={"status": "ok", "timestamp": 1640531019707, "user_tz": 300, "elapsed": 177, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="37ba9f0f-420e-4271-fce9-2e71fdaa093c"
for title in balanced_train_df_dic['arxiv']['title'][balanced_train_df_dic['arxiv']['title'].apply(lambda x: '\n' in x)].sample(5, random_state=PD_RANDOM_STATE):
print(title.__repr__(), '\n') # For printing raw text.
print(50 * '-')
for text in balanced_train_df_dic['vixra']['abstract'][balanced_train_df_dic['vixra']['abstract'].apply(lambda x: '\r' in x)].sample(5, random_state=PD_RANDOM_STATE):
print(text.__repr__(), '\n') # For printing raw text.
# + [markdown] id="1A_SyZpNC8jP"
# While some of this information might have signal, a lot of it is also not able to be seen by humans and would make the comparison unfair, e.g. the difference between underscores or appearance of carriage returns `\r`
# + [markdown] id="zD13YR8vDUct"
# The bigger problem, it turns out, is the lack of `.strip()`-ing white space, particularly in the arxiv abstracts: every arxiv abstract ends in \n, while none of the vixra ones do. They also seem to start with a space
# + colab={"base_uri": "https://localhost:8080/"} id="oiX9cncPDe3b" executionInfo={"status": "ok", "timestamp": 1640531019707, "user_tz": 300, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="371c4547-e741-48d4-c3de-d6b6187635b5"
for key, df in balanced_train_df_dic.items():
for field in ('title', 'abstract'):
print(f"Percent of \\n's appearing at end of {key} {field}s:",df[field].apply(lambda s: s[-1] == '\n').mean())
for key, df in balanced_train_df_dic.items():
for field in ('title', 'abstract'):
print(f'Percent of blank spaces appearing at start of {key} {field}s:',df[field].apply(lambda s: s[0] == ' ').mean())
print(50 * '-')
for text in balanced_train_df_dic['arxiv']['abstract'].sample(5, random_state=PD_RANDOM_STATE):
print(text.__repr__(), '\n') # For printing raw text.
# + [markdown] id="wIpB9xiXxwdw"
# Look at the other fields to check for similar issues. Nothing obvious.
# + colab={"base_uri": "https://localhost:8080/"} id="L4cE_3LixRnk" executionInfo={"status": "ok", "timestamp": 1640531019708, "user_tz": 300, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="c97a04c8-459b-4195-8bf5-45471eba2851"
for text in balanced_train_df_dic['arxiv']['title'].sample(5, random_state=PD_RANDOM_STATE):
print(text.__repr__(), '\n') # For printing raw text.
print(50 * '-')
for text in balanced_train_df_dic['vixra']['title'].sample(5, random_state=PD_RANDOM_STATE):
print(text.__repr__(), '\n') # For printing raw text.
print(50 * '-')
for text in balanced_train_df_dic['vixra']['abstract'].sample(5, random_state=PD_RANDOM_STATE):
print(text.__repr__(), '\n') # For printing raw text.
# + [markdown] id="7ooms6jiEJNM"
# This lets any ML architecture easily distinguish arxiv abstracts, if not handled.
# + [markdown] id="e2vXKM7jETiu"
# To this end, let us apply a much stronger normalization where all text is converted to ASCII, spaces around non-alphanumeric chars are inserted, and all control chars are mapped to a blank space. We import `text_normalizer` from our custom package as the strong normalizer.
# + id="9IpODhMsEe5F"
# %%capture text_normalizer_import --no-stderr
# !pip install unidecode wandb pytorch-lightning
from arxiv_vixra_models import text_normalizer
from unidecode import unidecode
# + [markdown] id="4eOtcRmr6zPT"
# My `text_normalizer` is pretty brutal. Examples of the replacements:
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="iYywy2ab66Pm" executionInfo={"status": "ok", "timestamp": 1640531025202, "user_tz": 300, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="e157c2d8-6359-45b3-c2c6-f85ab47a31ff"
with pd.option_context('display.max_rows', None):
display(pd.DataFrame({'original': [ch for ch in balanced_train_char_mild_norm_differences_dic['vixra_title_only']],
'normalized': [text_normalizer(ch) for ch in balanced_train_char_mild_norm_differences_dic['vixra_title_only']]})
)
# + id="H9S59UvBEs8K"
balanced_train_char_strong_norm_dic = {}
for key, df in balanced_train_df_dic.items():
for text_col in ('title', 'abstract'):
temp_char_set = set(' ') # Start with a space in the set.
for text in df[text_col]:
for char in text:
new_char = text_normalizer(char)
# Sometimes text_normalizer maps one non-ASCII char to multiple ASCII chars.
for split_char in new_char:
temp_char_set.add(split_char)
balanced_train_char_strong_norm_dic[f'{key}_{text_col}_chars'] = temp_char_set
# + [markdown] id="t0xcOZB0Es8K"
# Check out the number of unique characters in each slice of the dataset:
# + colab={"base_uri": "https://localhost:8080/"} id="hVWu-vm7Es8K" executionInfo={"status": "ok", "timestamp": 1640531071626, "user_tz": 300, "elapsed": 32, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="1b8bcb78-4930-41d3-a1b4-230577a16204"
print(*((key, len(val)) for key, val in balanced_train_char_strong_norm_dic.items()), sep='\n')
# + id="vy0n4mVyEs8L"
balanced_train_char_strong_norm_differences_dic = {}
for key1, key2 in (('vixra_title_chars', 'arxiv_title_chars'),
('arxiv_title_chars', 'vixra_title_chars'),
('vixra_abstract_chars', 'arxiv_abstract_chars'),
('arxiv_abstract_chars', 'vixra_abstract_chars')
):
balanced_train_char_strong_norm_differences_dic[f"{key1.replace('_chars', '')}_only"] = [char for char in balanced_train_char_strong_norm_dic[key1] - balanced_train_char_strong_norm_dic[key2]]
# + colab={"base_uri": "https://localhost:8080/"} id="611c8-yFEs8M" executionInfo={"status": "ok", "timestamp": 1640531071814, "user_tz": 300, "elapsed": 194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="40e548f7-082b-468b-e524-6bab9ecfe024"
for key, item in balanced_train_char_strong_norm_differences_dic.items():
print(key, item)
# + [markdown] id="3kMMLpIUEs8M"
# Percentages:
# + id="O9YXrGp2Es8M"
for key, char_list in balanced_train_char_strong_norm_differences_dic.items():
for char in char_list:
key_split = key.split('_')
percent = 100 * balanced_train_df_dic[key_split[0]][key_split[1]].apply(lambda s: char in text_normalizer(s)).mean()
print(f'{(char, ord(char))} appears in {percent:.3f} percent of {key_split[0]} {key_split[1]}s')
# + [markdown] id="y6osNDMkKvRq"
# This corresponds to just O(1) titles in the set
# + [markdown] id="DNenhy0UK2Ew"
# The char set we are using above is just all lower, non-control ASCII chars:
# + id="HVyxMFxSLK_W" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640531071816, "user_tz": 300, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="6a6b132d-4fe5-432c-c371-315fc5c407e1"
import unicodedata
ascii_all_lower = set(chr(i).lower() for i in range(128))
ascii_control = set(ch for ch in ascii_all_lower if unicodedata.category(ch)[0]=='C')
ascii_all_lower_alpha_num = {ch for ch in ascii_all_lower if ch.isalnum()}
ascii_punctuation = ascii_all_lower - ascii_control - ascii_all_lower_alpha_num
normalized_char_set = ascii_all_lower_alpha_num | ascii_punctuation
print(normalized_char_set)
print(f'{len(normalized_char_set)} total characters.')
# + [markdown] id="PkttkrJogXHy"
# Export as a feather file.
# + id="Ia4Tj3ayghaf"
normalized_char_set = set()
for char_set in balanced_train_char_strong_norm_dic.values():
normalized_char_set |= char_set
normalized_char_list = sorted(list(normalized_char_set))
# Sorting should place ' ' at index 0. Padding will be performed based on this assumption.
assert normalized_char_list[0] == ' '
normalized_char_dic = {char: idx for idx, char in enumerate(normalized_char_list)}
normalized_char_df = pd.DataFrame(list(normalized_char_dic.items()), columns=['char', 'idx'])
normalized_char_df.to_feather(FOLDERNAME + 'data/data_splits/normalized_char_set.feather')
# + [markdown] id="Dly_4WpeLt_5"
# # Normalizing the Text
#
# Go through and strongly normalize the text everywhere, via `text_normalizer`.
# + [markdown] id="TEbxNIi7gMXl"
# Examine the output of the normalizer.
# + colab={"base_uri": "https://localhost:8080/"} id="8X_rOOP6fRkL" executionInfo={"status": "ok", "timestamp": 1640531071976, "user_tz": 300, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="2b373c8d-1333-4c31-9bc8-050dce539d29"
text_normalizer_test_samples = balanced_train_df.head(10).abstract
for text in text_normalizer_test_samples:
print(text + '\n', text_normalizer(text), 20 * '-', sep='\n')
# + [markdown] id="UOBFxAv4YTX2"
# Normalize and export all data sets
# + colab={"base_uri": "https://localhost:8080/"} id="YtcrvhJ-MWZr" executionInfo={"status": "ok", "timestamp": 1640531463478, "user_tz": 300, "elapsed": 391505, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="bb3fd80f-e055-4d27-9bd0-70218067b023"
all_file_names = ('balanced_filtered_data_train.feather',
'balanced_filtered_data_validation.feather',
'balanced_filtered_data_test.feather',
'large_filtered_data_train.feather'
)
df_list = []
for f in all_file_names:
path = FOLDERNAME + SUBDIR + f
# !cp '{path}' .
df_list.append(pd.read_feather(f, columns=['title', 'abstract', 'source']))
temp_df = df_list[-1]
temp_df['title'] = temp_df['title'].apply(text_normalizer)
temp_df['abstract'] = temp_df['abstract'].apply(text_normalizer)
idx = f.find('_data')
out_name = f[:idx] + '_normalized' + f[idx:]
print(f'saved {out_name}')
temp_df.to_feather(FOLDERNAME + SUBDIR + out_name)
# + [markdown] id="ovrYMa0UYh3v"
# We will work with the below training sets for the remainder of the notebook.
# + id="X_bQckbGAKnm"
balanced_train_normalized_df = df_list[all_file_names.index('balanced_filtered_data_train.feather')]
balanced_validation_normalized_df = df_list[all_file_names.index('balanced_filtered_data_validation.feather')]
large_train_normalized_df = df_list[all_file_names.index('large_filtered_data_train.feather')]
# + colab={"base_uri": "https://localhost:8080/"} id="0QkT2DVx3GqD" executionInfo={"status": "ok", "timestamp": 1640531463481, "user_tz": 300, "elapsed": 40, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="05f9457b-0f89-42cc-dc96-dacb7181ce7e"
len(balanced_train_normalized_df)
# + [markdown] id="x2mtfUlRROkw"
# The normalization process (the insertion of spaces around punctuation, in particular) increases the char length and word count of the various fields that we have previously cut on. Check that the increase is not unacceptably drastic.
# + colab={"base_uri": "https://localhost:8080/"} id="JpxO_8sSQc4l" executionInfo={"status": "ok", "timestamp": 1640531463855, "user_tz": 300, "elapsed": 399, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="92fbd66a-91ba-4164-cf2b-6ee7230fbae9"
print(balanced_train_normalized_df['title'].apply(lambda x: len(x) > 128).mean(),
balanced_train_normalized_df['abstract'].apply(lambda x: len(x) > 2048).mean(),
balanced_train_normalized_df['title'].apply(lambda x: len(x.split()) > 16).mean(),
balanced_train_normalized_df['abstract'].apply(lambda x: len(x.split()) > 256).mean(),
sep='\n')
# + [markdown] id="0_j2FzjOWUuy"
# The overflow for char len is less than 1%, but for word counts it's ~10%, which is not great. Increasing the threshold by 50% is brings the cut down below 1%, so we will increase the `seq_len` threshold for embedding models by a similar factor during training, as to not unneccessarily cut out additional data.
# + colab={"base_uri": "https://localhost:8080/"} id="IPwZ4i_CUxxS" executionInfo={"status": "ok", "timestamp": 1640531464194, "user_tz": 300, "elapsed": 348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="68b906ec-049e-4638-9002-cc014b495cb4"
print(balanced_train_normalized_df['title'].apply(lambda x: len(x.split()) > 1.5 * 16).mean(),
balanced_train_normalized_df['abstract'].apply(lambda x: len(x.split()) > 1.5 * 256).mean(),
sep='\n')
# + [markdown] id="vUwp17F3gdVX"
# We also create versions of the training data which have their source column randomized, which will be used to test whether there is any accidental cheating in the network.
# + id="MwHkZGdZgv-C"
balanced_train_normalized_randomized_source_df = balanced_train_normalized_df.copy()
# Need to reset the indices after shuffling, otherwise overwriting silently fails.
balanced_train_normalized_randomized_source_df_shuffle = balanced_train_normalized_randomized_source_df['source'].sample(frac=1, random_state=PD_RANDOM_STATE)
balanced_train_normalized_randomized_source_df_shuffle.index = balanced_train_normalized_randomized_source_df.index
balanced_train_normalized_randomized_source_df['source'] = balanced_train_normalized_randomized_source_df_shuffle
balanced_train_normalized_randomized_source_df.to_feather(FOLDERNAME + SUBDIR + 'balanced_filtered_normalized_data_randomized_source_train.feather')
# + id="t59K610XZweP"
balanced_validation_normalized_randomized_source_df = balanced_validation_normalized_df.copy()
# Need to reset the indices after shuffling, otherwise overwriting silently fails.
balanced_validation_normalized_randomized_source_df_shuffle = balanced_validation_normalized_randomized_source_df['source'].sample(frac=1, random_state=PD_RANDOM_STATE)
balanced_validation_normalized_randomized_source_df_shuffle.index = balanced_validation_normalized_randomized_source_df.index
balanced_validation_normalized_randomized_source_df['source'] = balanced_validation_normalized_randomized_source_df_shuffle
balanced_validation_normalized_randomized_source_df.to_feather(FOLDERNAME + SUBDIR + 'balanced_filtered_normalized_data_randomized_source_validation.feather')
# + id="jfjijpGTRQhl"
large_train_normalized_randomized_source_df = large_train_normalized_df.copy()
# Need to reset the indices after shuffling, otherwise overwriting silently fails.
large_train_normalized_randomized_source_df_shuffle = large_train_normalized_randomized_source_df['source'].sample(frac=1, random_state=PD_RANDOM_STATE)
large_train_normalized_randomized_source_df_shuffle.index = large_train_normalized_randomized_source_df.index
large_train_normalized_randomized_source_df['source'] = large_train_normalized_randomized_source_df_shuffle
large_train_normalized_randomized_source_df.to_feather(FOLDERNAME + SUBDIR + 'large_filtered_normalized_data_randomized_source_train.feather')
# + [markdown] id="D_-ZMIUSuPi8"
# Sanity check that the indices are really shuffled. The numbers in the tuple should be close to each other, since 'vixra' never appears in arxiv abstracts.
#
# + colab={"base_uri": "https://localhost:8080/"} id="VQEaLFIou2n6" executionInfo={"status": "ok", "timestamp": 1640531478612, "user_tz": 300, "elapsed": 1863, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="a45dd8d1-6a12-48a2-89d8-635350656bd8"
for df in (balanced_train_normalized_randomized_source_df,
large_train_normalized_randomized_source_df):
# P(source == 'arxiv' | 'vixra' in 'abstract')
vixra_in_arxiv_abstract_frac = (df[df['abstract'].apply(lambda x: 'vixra' in x)]['source'] == 'arxiv').mean()
# vixra-to-arxiv ratio
arxiv_to_vixra_ratio = (df['source'] == 'arxiv').mean()
print(vixra_in_arxiv_abstract_frac, arxiv_to_vixra_ratio)
# + [markdown] id="9QrBtOvEM9UO"
# ## Vocabularies
#
# Get the unique words in titles and abstracts from the various datasets along with their counts. We create dataframes with `word` and `count` columns, since we may want to filter on frequency.
# + [markdown] id="h8R-ZL9bfqVR"
# Titles:
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="XgphJVA1-93J" executionInfo={"status": "ok", "timestamp": 1640531478615, "user_tz": 300, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="30e15112-8b85-41db-e7e0-8c0cbd0a32be"
balanced_title_normalized_word_counter = {}
for _, title in balanced_train_normalized_df['title'].iteritems():
for word in title.split():
balanced_title_normalized_word_counter[word] = balanced_title_normalized_word_counter.get(word, 0) + 1
# Turn into a dataframe with word, count, and idx (1-indexed), sorted by count
balanced_title_normalized_words, balanced_title_normalized_counts = zip(*sorted(balanced_title_normalized_word_counter.items(), key = lambda x: x[1], reverse=True))
balanced_title_normalized_vocab = {**{'word': balanced_title_normalized_words},
**{'count': balanced_title_normalized_counts}}
balanced_title_normalized_vocab_df = pd.DataFrame(balanced_title_normalized_vocab)
balanced_title_normalized_vocab_df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="ghoc0oM9TUzi" executionInfo={"status": "ok", "timestamp": 1640531487986, "user_tz": 300, "elapsed": 9386, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="1b3a2591-1918-4e9e-c3f4-4e74d0c715d3"
large_title_normalized_word_counter = {}
for _, title in large_train_normalized_df['title'].iteritems():
for word in title.split():
large_title_normalized_word_counter[word] = large_title_normalized_word_counter.get(word, 0) + 1
# Turn into a dataframe with word, count, and idx (1-indexed), sorted by count
large_title_normalized_words, large_title_normalized_counts = zip(*sorted(large_title_normalized_word_counter.items(), key = lambda x: x[1], reverse=True))
large_title_normalized_vocab = {**{'word': large_title_normalized_words}, **{'count': large_title_normalized_counts}}
large_title_normalized_vocab_df = pd.DataFrame(large_title_normalized_vocab)
large_title_normalized_vocab_df.head()
# + [markdown] id="q-7t4-2oT9KJ"
# Abstracts
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="gw4vHfDuTswx" executionInfo={"status": "ok", "timestamp": 1640531490083, "user_tz": 300, "elapsed": 2100, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="c91f4407-4376-415f-a845-0b6b084f7f91"
balanced_abstract_normalized_word_counter = {}
for _, abstract in balanced_train_normalized_df['abstract'].iteritems():
for word in abstract.split():
balanced_abstract_normalized_word_counter[word] = balanced_abstract_normalized_word_counter.get(word, 0) + 1
# Turn into a dataframe with word, count, and idx (1-indexed), sorted by count
balanced_abstract_normalized_words, balanced_abstract_normalized_counts = zip(*sorted(balanced_abstract_normalized_word_counter.items(), key = lambda x: x[1], reverse=True))
balanced_abstract_normalized_vocab = {**{'word': balanced_abstract_normalized_words},
**{'count': balanced_abstract_normalized_counts}}
balanced_abstract_normalized_vocab_df = pd.DataFrame(balanced_abstract_normalized_vocab)
balanced_abstract_normalized_vocab_df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="IP4X5o6vTswz" executionInfo={"status": "ok", "timestamp": 1640531595284, "user_tz": 300, "elapsed": 105205, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="f5f0112b-ea7c-4ea2-8af4-d0a4efe6075d"
large_abstract_normalized_word_counter = {}
for _, abstract in large_train_normalized_df['abstract'].iteritems():
for word in abstract.split():
large_abstract_normalized_word_counter[word] = large_abstract_normalized_word_counter.get(word, 0) + 1
# Turn into a dataframe with word, count, and idx (1-indexed), sorted by count
large_abstract_normalized_words, large_abstract_normalized_counts = zip(*sorted(large_abstract_normalized_word_counter.items(), key = lambda x: x[1], reverse=True))
large_abstract_normalized_vocab = {**{'word': large_abstract_normalized_words},
**{'count': large_abstract_normalized_counts}}
large_abstract_normalized_vocab_df = pd.DataFrame(large_abstract_normalized_vocab)
large_abstract_normalized_vocab_df.head()
# + [markdown] id="X56q29-PUNhA"
# ### Look into the statistics of the word sets
# + [markdown] id="TfvKpr4xUhE6"
# Titles
# + colab={"base_uri": "https://localhost:8080/"} id="KTH0HmNLUSPG" executionInfo={"status": "ok", "timestamp": 1640531595285, "user_tz": 300, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="7a998a15-24af-4180-c7fb-5e3e79c0494c"
balanced_title_normalized_vocab_df['count'].describe()
# + [markdown] id="Bs2oHKJ1Ub-X"
# Filtering on words which appear more than twice already hugely cleaves the data.
# + colab={"base_uri": "https://localhost:8080/"} id="hPVI54FWSxgV" executionInfo={"status": "ok", "timestamp": 1640531595562, "user_tz": 300, "elapsed": 281, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="e4a35a5b-aca0-46b2-eb3f-e917dcad787e"
(balanced_title_normalized_vocab_df['count'] > 2).mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="AV7s8TTyTyRp" executionInfo={"status": "ok", "timestamp": 1640531595563, "user_tz": 300, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="706b5994-d177-4658-e69e-f7199c7c269e"
balanced_title_normalized_vocab_df[balanced_title_normalized_vocab_df['count'] < 2]
# + [markdown] id="IPSy0476UmtU"
# Abstracts
# + colab={"base_uri": "https://localhost:8080/"} id="wjuPQOY2Umz4" executionInfo={"status": "ok", "timestamp": 1640531595563, "user_tz": 300, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="83ac1ed2-0421-41b4-aa0c-0920068c22a6"
balanced_abstract_normalized_vocab_df['count'].describe()
# + [markdown] id="HKGJIAxfUmz6"
# Filtering on words which appear more than twice already hugely cleaves the data.
# + colab={"base_uri": "https://localhost:8080/"} id="JpuClDnqUmz6" executionInfo={"status": "ok", "timestamp": 1640531595564, "user_tz": 300, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="23fb5ea1-a499-4feb-fc9b-e579c047b750"
(balanced_abstract_normalized_vocab_df['count'] > 2).mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="01QT1KblUmz7" executionInfo={"status": "ok", "timestamp": 1640531595564, "user_tz": 300, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="bca487c8-40a2-464f-dc6b-561fb57a1f62"
balanced_abstract_normalized_vocab_df[balanced_abstract_normalized_vocab_df['count'] < 2]
# + [markdown] id="sVRjQyzFAyU0"
# Abstracts:
# + [markdown] id="pXlc28uZm_ti"
# Save to feather format.
# + id="BEKly3Tamzcy"
balanced_title_normalized_vocab_df.to_feather(FOLDERNAME + 'data/data_splits/balanced_title_normalized_vocab.feather')
balanced_abstract_normalized_vocab_df.to_feather(FOLDERNAME + 'data/data_splits/balanced_abstract_normalized_vocab.feather')
large_title_normalized_vocab_df.to_feather(FOLDERNAME + 'data/data_splits/large_title_normalized_vocab.feather')
large_abstract_normalized_vocab_df.to_feather(FOLDERNAME + 'data/data_splits/large_abstract_normalized_vocab.feather')
# + [markdown] id="o2nMpL-Ck1xn"
# Create the Zipf's law plots
# + id="-iiklWAMirti" colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"status": "ok", "timestamp": 1640531598212, "user_tz": 300, "elapsed": 1264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="ea39f108-2cc5-4b52-9f02-3dde55d9fb96"
zipf_balanced_title_data = np.array(balanced_title_normalized_vocab_df['count'])
zipf_balanced_title_data = np.log(zipf_balanced_title_data / zipf_balanced_title_data.sum())
zipf_balanced_title_plot = sns.scatterplot(y=zipf_balanced_title_data,
x=np.log(np.arange(1, len(zipf_balanced_title_data) + 1)),
marker='$-$',
edgecolor='none')
zipf_balanced_title_plot.set_title("Zipf's Law: balanced Titles")
zipf_balanced_title_plot.set_xlabel('ln(rank)')
zipf_balanced_title_plot.set_ylabel('ln(frequency)')
zipf_balanced_title_plot.get_figure().savefig(FOLDERNAME + 'figures/zipf_balanced_title_plot.svg')
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="04XSbdldFKjy" executionInfo={"status": "ok", "timestamp": 1640531598951, "user_tz": 300, "elapsed": 741, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjiiYFf8nm-IpkI8yUgW_CqJJ76xMZhUUU94FCJabg=s64", "userId": "15364959356591490410"}} outputId="8a13ca8a-de3f-4928-9a11-b3c8671d2403"
zipf_balanced_abstract_data = np.array(balanced_abstract_normalized_vocab_df['count'])
zipf_balanced_abstract_data = np.log(zipf_balanced_abstract_data / zipf_balanced_abstract_data.sum())
zipf_balanced_abstract_plot = sns.scatterplot(y=zipf_balanced_abstract_data,
x=np.log(np.arange(1, len(zipf_balanced_abstract_data) + 1)),
marker='$-$',
edgecolor='none')
zipf_balanced_abstract_plot.set_title("Zipf's Law: balanced abstracts")
zipf_balanced_abstract_plot.set_xlabel('ln(rank)')
zipf_balanced_abstract_plot.set_ylabel('ln(frequency)')
zipf_balanced_abstract_plot.get_figure().savefig(FOLDERNAME + 'figures/zipf_balanced_abstract_plot.svg')
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="m8aRuo6rFX9w" executionInfo={"status": "ok", "timestamp": 1640531600607, "user_tz": 300, "elapsed": 1661, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "15364959356591490410"}} outputId="94128c8e-c193-4f8d-d039-048fb1bd09aa"
zipf_large_title_data = np.array(large_title_normalized_vocab_df['count'])
zipf_large_title_data = np.log(zipf_large_title_data / zipf_large_title_data.sum())
zipf_large_title_plot = sns.scatterplot(y=zipf_large_title_data,
x=np.log(np.arange(1, len(zipf_large_title_data) + 1)),
marker='$-$',
edgecolor='none')
zipf_large_title_plot.set_title("Zipf's Law: large Titles")
zipf_large_title_plot.set_xlabel('ln(rank)')
zipf_large_title_plot.set_ylabel('ln(frequency)')
zipf_large_title_plot.get_figure().savefig(FOLDERNAME + 'figures/zipf_large_title_plot.svg')
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="_xZS5viYGN8u" executionInfo={"status": "ok", "timestamp": 1640531603927, "user_tz": 300, "elapsed": 3323, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "15364959356591490410"}} outputId="93207cbb-a38d-4d8e-f36c-c7fa4a85c328"
zipf_large_abstract_data = np.array(large_abstract_normalized_vocab_df['count'])
zipf_large_abstract_data = np.log(zipf_large_abstract_data / zipf_large_abstract_data.sum())
zipf_large_abstract_plot = sns.scatterplot(y=zipf_large_abstract_data,
x=np.log(np.arange(1, len(zipf_large_abstract_data) + 1)),
marker='$-$',
edgecolor='none')
zipf_large_abstract_plot.set_title("Zipf's Law: large abstracts")
zipf_large_abstract_plot.set_xlabel('ln(rank)')
zipf_large_abstract_plot.set_ylabel('ln(frequency)')
zipf_large_abstract_plot.get_figure().savefig(FOLDERNAME + 'figures/zipf_large_abstract_plot.svg')
|
data_processing/data_normalization_and_tokenization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Installations and Data loading
# !pip install xgboost
# !pip install lightgbm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import lightgbm as ltb
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import precision_score,recall_score
from sklearn.feature_selection import RFE
train = pd.read_csv('C:\\Users\\Deep\\Desktop\\GA projects\\train_loan.csv')
train
test = pd.read_csv('C:\\Users\\Deep\\Desktop\\GA projects\\test_loan.csv')
test
# ### Exploratory Data Analysis
train.columns
train.shape
train.info()
train.skew()
train.isnull().sum()
train.nunique()
# ### Feature Engineering
train['fpd_year'] = pd.DatetimeIndex(train['first_payment_date']).year
train['fpd_month'] = pd.DatetimeIndex(train['first_payment_date']).month
train
mean_encode = train.groupby('financial_institution')['m13'].mean()
print(mean_encode)
mean_encode = train.groupby('financial_institution')['m13'].mean()
print(mean_encode)
train.loc[:,'financial_institution_mean_encode'] = train['financial_institution'].map(mean_encode)
train.head()
# ### Dropping Redundant Columns
train.drop(columns=['loan_id','origination_date','first_payment_date','financial_institution'],axis=1,inplace=True)
train
train = pd.get_dummies(train)
train
# ### Applying ML Algorithms
rfr=RandomForestClassifier()
lr=LogisticRegression()
xgb=XGBClassifier()
et=ExtraTreeClassifier()
gr=GradientBoostingClassifier()
exp_tup={0:(lr,'LogisticRegressor'),1:(rfr,'RandomForestClassifier'),2:(xgb,'XGBClassifier'),3:(et,'ExtraTreeClassifier'),4:(gr,'GradientBoostingClassifier')}
x = train.drop('m13',1)
y = train['m13']
x_train,x_test,y_train,y_test=train_test_split(x,y,train_size=0.7,random_state=7)
from imblearn.over_sampling import SMOTE
smote = SMOTE(random_state=0)
x_sample,y_sample = smote.fit_sample(x_train,y_train)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
res_ls = []
for k in exp_tup.keys():
model = exp_tup[k][0]
name = exp_tup[k][1]
model.fit(x_sample,y_sample)
y_pred = model.predict(x_test)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
accuracy = accuracy_score(y_test,y_pred)
f_score = f1_score(y_test,y_pred)
res_ls.append((name,precision,recall,accuracy,f_score))
res_df = pd.DataFrame(res_ls)
res_df.columns = ['model','precision','recall','accuracy','f_score']
res_df.sort_values('f_score',ascending=False,inplace=True)
result=pd.DataFrame(res_df)
result
# ### Test Dataset Treatment and Results
test['fpd_year'] = pd.DatetimeIndex(test['first_payment_date']).year
test['fpd_month'] = pd.DatetimeIndex(test['first_payment_date']).month
test.loc[:,'financial_institution_mean_encode'] = test['financial_institution'].map(mean_encode)
test.drop(columns=['loan_id','origination_date','first_payment_date','financial_institution'],axis=1,inplace=True)
test = pd.get_dummies(test)
test.columns
features = ['Unnamed: 0', 'interest_rate', 'unpaid_principal_bal', 'loan_term',
'loan_to_value', 'number_of_borrowers', 'debt_to_income_ratio',
'borrower_credit_score', 'insurance_percent',
'co-borrower_credit_score', 'insurance_type', 'm1', 'm2', 'm3', 'm4',
'm5', 'm6', 'm7', 'm8', 'm9', 'm10', 'm11', 'm12', 'fpd_year',
'fpd_month', 'financial_institution_mean_encode', 'source_X',
'source_Y', 'source_Z', 'loan_purpose_A23', 'loan_purpose_B12',
'loan_purpose_C86']
target = 'm13'
smote = SMOTE(random_state=0)
train_sample,target_sample = smote.fit_sample(train[features],train[target])
clfmodel = RandomForestClassifier()
clfmodel.fit(train_sample,target_sample)
predictions = clfmodel.predict(test[features])
submission = pd.DataFrame({'Unnamed: 0':test['Unnamed: 0'],'m13':predictions})
submission
submission['m13'].value_counts()
submission
submission.to_csv('gea2sub.csv', index=False)
|
loan_delinquency_project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Ic4_occAAiAT"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="ioaprt5q5US7"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab={} colab_type="code" id="yCl0eTNH5RS3"
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="ItXfxkxvosLH"
# # Text classification with movie reviews
# + [markdown] colab_type="text" id="hKY4XMc9o8iB"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/keras/basic_text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="Eg62Pmz3o83v"
#
# This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem.
#
# We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews.
#
# This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
# + colab={} colab_type="code" id="2ew7HTbPpCJH"
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
# + [markdown] colab_type="text" id="iAsKG535pHep"
# ## Download the IMDB dataset
#
# The IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.
#
# The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
# + colab={} colab_type="code" id="zXXx5Oc3pOmN"
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# + [markdown] colab_type="text" id="odr-KlzO-lkL"
# The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable.
# + [markdown] colab_type="text" id="l50X3GfjpU4r"
# ## Explore the data
#
# Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
# + colab={} colab_type="code" id="y8qCnve_-lkO"
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
# + [markdown] colab_type="text" id="RnKvHWW4-lkW"
# The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
# + colab={} colab_type="code" id="QtTS4kpEpjbi"
print(train_data[0])
# + [markdown] colab_type="text" id="hIE4l_72x7DP"
# Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
# + colab={} colab_type="code" id="X-6Ii9Pfx6Nr"
len(train_data[0]), len(train_data[1])
# + [markdown] colab_type="text" id="4wJg2FiYpuoX"
# ### Convert the integers back to words
#
# It may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
# + colab={} colab_type="code" id="tr5s_1alpzop"
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
# + [markdown] colab_type="text" id="U3CNRvEZVppl"
# Now we can use the `decode_review` function to display the text for the first review:
# + colab={} colab_type="code" id="s_OqxmH6-lkn"
decode_review(train_data[0])
# + [markdown] colab_type="text" id="lFP_XKVRp4_S"
# ## Prepare the data
#
# The reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:
#
# * Convert the arrays into vectors of 0s and 1s indicating word occurrence, similar to a one-hot encoding. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.
#
# * Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `max_length * num_reviews`. We can use an embedding layer capable of handling this shape as the first layer in our network.
#
# In this tutorial, we will use the second approach.
#
# Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
# + colab={} colab_type="code" id="2jQv-omsHurp"
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
# + [markdown] colab_type="text" id="VO5MBpyQdipD"
# Let's look at the length of the examples now:
# + colab={} colab_type="code" id="USSSBnkE-lky"
len(train_data[0]), len(train_data[1])
# + [markdown] colab_type="text" id="QJoxZGyfjT5V"
# And inspect the (now padded) first review:
# + colab={} colab_type="code" id="TG8X9cqi-lk9"
print(train_data[0])
# + [markdown] colab_type="text" id="LLC02j2g-llC"
# ## Build the model
#
# The neural network is created by stacking layers—this requires two main architectural decisions:
#
# * How many layers to use in the model?
# * How many *hidden units* to use for each layer?
#
# In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
# + colab={} colab_type="code" id="xpKOoWgu-llD"
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
# + [markdown] colab_type="text" id="6PbKQ6mucuKL"
# The layers are stacked sequentially to build the classifier:
#
# 1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.
# 2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible.
# 3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.
# 4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level.
# + [markdown] colab_type="text" id="0XMwnDOp-llH"
# ### Hidden units
#
# The above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.
#
# If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later.
# + [markdown] colab_type="text" id="L4EqVWg4-llM"
# ### Loss function and optimizer
#
# A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function.
#
# This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.
#
# Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.
#
# Now, configure the model to use an optimizer and a loss function:
# + colab={} colab_type="code" id="Mr0GP-cQ-llN"
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# + [markdown] colab_type="text" id="hCWYwkug-llQ"
# ## Create a validation set
#
# When training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
# + colab={} colab_type="code" id="-NpcXY9--llS"
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
# + [markdown] colab_type="text" id="35jv_fzP-llU"
# ## Train the model
#
# Train the model for 40 epochs in mini-batches of 512 samples. This is 40 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
# + colab={} colab_type="code" id="tXSGrjWZ-llW"
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
# + [markdown] colab_type="text" id="9EEGuDVuzb5r"
# ## Evaluate the model
#
# And let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
# + colab={} colab_type="code" id="zOMKywn4zReN"
results = model.evaluate(test_data, test_labels)
print(results)
# + [markdown] colab_type="text" id="z1iEXVTR0Z2t"
# This fairly naive approach achieves an accuracy of about 87%. With more advanced approaches, the model should get closer to 95%.
# + [markdown] colab_type="text" id="5KggXVeL-llZ"
# ## Create a graph of accuracy and loss over time
#
# `model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
# + colab={} colab_type="code" id="VcvSXvhp-llb"
history_dict = history.history
history_dict.keys()
# + [markdown] colab_type="text" id="nRKsqL40-lle"
# There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
# + colab={} colab_type="code" id="nGoYf2Js-lle"
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# + colab={} colab_type="code" id="6hXx-xOv-llh"
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="oFEmZ5zq-llk"
#
# In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy.
#
# Notice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration.
#
# This isn't the case for the validation loss and accuracy—they seem to peak after about twenty epochs. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data.
#
# For this particular case, we could prevent overfitting by simply stopping the training after twenty or so epochs. Later, you'll see how to do this automatically with a callback.
|
site/en/r2/tutorials/keras/basic_text_classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="mHk5rsUalwiO"
from tensorflow.keras.datasets.mnist import load_data
# + colab={"base_uri": "https://localhost:8080/"} id="_942GZVWqw42" outputId="410a0348-7dd9-4c01-c406-bb79ac77af00"
(x_train, y_train), (x_test, y_test) = load_data(path='mnist.npz')
x_train.shape, y_train.shape, x_test.shape, y_test.shape
# + id="0rqe6ZlnrEpO"
y_train[4], x_train[4]
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="unnNAtFnsQtP" outputId="e88e2ad8-419a-4a43-a1a6-b9ed67af2e4e"
import matplotlib.pyplot as plt
plt.imshow(x_train[50000])
print(y_train[50000], type(y_train[50000]))
# + colab={"base_uri": "https://localhost:8080/"} id="5hotxXv9tCXf" outputId="9b5e2b72-2d08-4d4e-af8d-2c6bc07e36c1"
x_train = x_train.reshape(-1, 28*28) / 255
x_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="YEoS0yY7uqsv" outputId="1dc6fa8a-9c7d-439b-cfad-cdb8837f33c6"
x_test = x_test.reshape(-1, 28*28) / 255
x_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="_XOzr4enwGIx" outputId="ae1e1ef4-7a2f-4692-eaa9-ba62a7c17ad3"
y_train[2:10], y_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="4N_GijBdxpY9" outputId="aaa25817-eada-4a89-ca9d-f92cc9b9f9b7"
import numpy as np
np.unique(y_train)
# + [markdown] id="7b5vGJEHwJTx"
# # apply model
# + id="WLh86xv8wLh4"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="9YBsou_kwRCQ" outputId="64a13960-491b-4534-a82f-ba1191b0d7b3"
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(784,))) # input
model.add(tf.keras.layers.Dense(64, activation='relu')) # hidden
model.add(tf.keras.layers.Dense(64, activation='relu'))
# 3개 이상 --> softmax
model.add(tf.keras.layers.Dense(10, activation='softmax')) # output
# binary --> sigmoid
# regression --> loss : mse
# binary classification --> loss : binary crossentropy
# over 3 classification --> loss : categorical crossentropy (sparse_categorical_crossentropy)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
# + colab={"base_uri": "https://localhost:8080/"} id="lZyOcQJ8_PjH" outputId="9c2c5231-6668-469a-a1cf-85b5e6a33415"
hist = model.fit(x_train, y_train, epochs=100, validation_split=0.3)
# + [markdown] id="Njg34SrrKvXJ"
# # evaluation
# + colab={"base_uri": "https://localhost:8080/"} id="1ITt8KuxLIrm" outputId="71d03563-e5b2-4cd5-df85-0102ced2cd4c"
hist.history.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="4E8n7yyk_npO" outputId="681f62b1-b377-40a1-8e64-9b875dac667d"
import matplotlib.pyplot as plt
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'], 'b-')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="K3kgroEWLaAC" outputId="4951d070-7bb3-4f1a-a083-98a046d4ca8e"
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'], 'r-')
plt.show()
# + [markdown] id="waUJbDwZMpro"
# # service
# + colab={"base_uri": "https://localhost:8080/"} id="RefBdd7SMoLs" outputId="2f011395-7820-4fe2-9990-815a04242f1e"
y_test[30]
# + colab={"base_uri": "https://localhost:8080/"} id="-HbDMb7yMtX8" outputId="35dcfc3a-e4fc-4b6e-9197-0f3666f1ebde"
import numpy as np
# np.set_printoptions(precision=8)
pred = model.predict(x_test[30:31])
pred
# + colab={"base_uri": "https://localhost:8080/"} id="97CMDgZFnj0c" outputId="e69e1e9b-a986-47b2-f71b-492dd6aa10d7"
model.save('./model_save')
# + id="rQKR06dl6kd6"
model.save('./model_save01.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="t7zobRL87Hmk" outputId="12c3edb8-91fb-4035-8675-0908375cb5a3"
model_load = tf.keras.models.load_model('./model_save01.h5')
model_load
# + colab={"base_uri": "https://localhost:8080/"} id="XXfPjE7S8Y4-" outputId="07a18044-fbda-4644-ff59-7b20748a8d0d"
load_pred = model_load.predict(x_test[30:31])
load_pred
# + id="8XqPMuZu9Qe-"
|
mnist_classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Naive Bayes Lab
# ## Text Classification with Naive Bayes
#
#
# <h3>1. Reading files and storing their contents in 2 string arrays.</h3>
data_dir = "../../data_ml_2020/movies_reviews"
import os
cwd = os.getcwd()
os.chdir(cwd)
print(os.listdir(data_dir))
# First, we put reviews from negative folder into neg_lst string array and put reviews from positive folder into pos_lst string array
# +
neg_lst = []
neg_path = data_dir + "/neg"
for path in os.listdir(neg_path):
if path.endswith('.txt'):
with open(neg_path + '/' + path) as f:
neg_lst.append(f.read())
print("Num of negative reviews: {}".format(len(neg_lst)))
pos_lst = []
pos_path = data_dir + "/pos"
for path in os.listdir(pos_path):
if path.endswith('.txt'):
with open(pos_path + '/' + path) as f:
pos_lst.append(f.read())
print("Num of positive reviews: {}".format(len(pos_lst)))
# -
# <h3>2. Assigning class labels based on directory and combining small lists into one big list</h3>
#
# We assign 1 for negative reviews and 0 for positive reviews. We also combine two lists into one list. At first, there were two lists: pos_lst and neg_lst. So we combine it into 1 big list X. Also, we combine the 1's and 0's together into one list Y. Y contains class label (ie. 1 for negative and 0 for positive)
# +
import numpy as np
Y_neg = np.ones((len(neg_lst),)) # ones for negative
Y = np.concatenate((Y_neg, np.zeros((len(pos_lst),)))) # zeros for positive
X = np.concatenate((neg_lst,pos_lst))
# -
# <h3>3. Split Data</h3>
#
# We then split the big list into training and testing set for both X and Y.
# +
from sklearn.model_selection import train_test_split
#split dataset into train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,random_state=20)
# -
# <h3>4. Transformation</h3>
#
# We then apply transformations. After assigning unique numbers to each words in text, we count the occurence of each word. In other words, CountVectorizer transforms a given text into vector on the basis of count of each word in each document. So CountVectorizer creates a matrix where each row represents each document and where each column represents each unique word. The value at a particular cell tells us how many times does this word (represented by column) occurs in this document (represented by rows).
#
# Right now, if we weigh these words equally, we would find words like 'they' which occurs almost in every texts as important. So we need to find a way to overcome this problem.
#
# We can use TFidfTransformer.
# We pass the array of counts (matrix) from CountVectorizer to Term Frequency Inverse Document Frequency (TfidfTransformer), which counts the number of times a word appears in a document and we give weights to the word so that word such as "the" will be less significant even though it occurs many times in many documents.
#
#
# +
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
count_vect = CountVectorizer()
X_train_count = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_count)
# -
# <h3>5. Training the Model</h3>
#
# We will then build a model using training dataset. I use ComplementNB as it works well with text classification and we see that the data is imbalanced because there are 1000 negative reviews and 1005 positive reviews. ComplementNB will give higher accuracy when the dataset is imbalanced compared to MultinominalNB and GaussianNB.
#
# ComplementNB is an adaptated form of MultinominalNB. MultinominalNB does not work well on imbalanced datasets, meaning that the number of examples of a class is higher than the number of examples of another class. In this case, we have imbalanced datasets since number of positive reviews > number of negative reviews. Even though the number of positive reviews is not that much greater than the number of negative reviews, I would consider to use Complement NB. So basically, as the difference is not that great, we could still use MultinomialNB.
#
# Actually I have tried out both the MultinomialNB and ComplementNB and finds that ComplementNB was able to produce a little bit better accuracy (results are shown below) so I plan to use ComplementNB.
# +
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
#clf = MultinomialNB().fit(X_train_tfidf, Y_train)
clf = ComplementNB().fit(X_train_tfidf, Y_train)
# +
# to get accuracy for training set
acc_train = clf.score(X_train_tfidf, Y_train)
print("train accuracy: {} ".format(acc_train))
# testing with accuracy_score
predicted = clf.predict(X_train_tfidf)
from sklearn.metrics import accuracy_score
print("train accuracy", accuracy_score(Y_train, predicted))
# +
# first transform
X_test_counts = count_vect.transform(X_test)
X_test_tfidf = tfidf_transformer.transform(X_test_counts)
# to get accuracy for testing set
acc_test = clf.score(X_test_tfidf, Y_test)
print("test accuracy: {} ".format(acc_test))
# testing with accuracy_score
predicted = clf.predict(X_test_tfidf)
print("test accuracy", accuracy_score(Y_test, predicted))
# -
# <h4>Note:</h4>
# I have tried it with both multinomialNB and ComplementNB. As shown below, we see that ComplementNB gives a better accuracy for test set than multinomialNB.
#
# with MultinomialNB ->
#
# train accuracy: 0.9675810473815462
#
# test accuracy: 0.8254364089775561
#
#
# with ComplementNB ->
#
# train accuracy: 0.9706982543640897
#
# test accuracy: 0.8354114713216958
#
# <h3>6. Testing with New Reviews</h3>
#
# <p>
# I have taken 5 new movie reviews from IMDB and the ones I have taken have ratings up to 10.
# So we use them to see how our classifier will perform on this new dataset.
#
# The link for new reviews is
# <a href="https://drive.google.com/drive/folders/1XRjgANGakdyKKMXL9zZFz67mtwIt7lCr?usp=sharing">here
# </a>:
#
# </p>
new_data_dir = "../../data_ml_2020/movies_reviews/new_movies_reviews"
new_reviews_lst = []
for path in sorted(os.listdir(new_data_dir)):
if path.endswith('.txt'):
with open(new_data_dir + '/' + path) as f:
new_reviews_lst.append(f.read())
# +
X_new_counts = count_vect.transform(new_reviews_lst)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = clf.predict(X_new_tfidf)
print(predicted)
# 0's for positive and 1's for negative
# -
# <h4>Note:</h4>
# <p>
# I consider above half (ie. above 5/10) to be positive and the rest to be negative
# </p>
#
# <h4>Result</h4>
#
# [0. 0. 0. 1. 0.]
#
# <ul>
# <li>
# 1st file ->
#
# Rating: 7/10
# predicted: <b>positive</b>
# actual: <b>positive</b>
# </li>
#
# <li>
# 2nd file ->
#
# Rating: 10/10;
# predicted: <b>positive</b>
# actual: <b>positive</b>
# </li>
#
# <li>
# 3rd file ->
#
# Rating: 3/10;
# predicted: <b>positive</b>
# actual: <b>negative</b> ;
# </li>
#
# <li>
# 4th file ->
#
# Rating: 3/10;
# predicted: <b>negative</b> ;
# actual: <b>negative</b>
# </li>
#
# <li>
# 5th file ->
#
# Rating: 8/10;
# predicted: <b>positive</b>. ;
# actual: <b>positive</b>
# </li>
#
# </ul>
#
# <p>Out of all 5 new reviews, the classifier was able to predict 4 correctly.</p>
#
# <p>
# The classifier was not able to predict correctly for the 3rd text file. When I look at that review, I see things like "A 3-star is probably the best rating that I can give here." So I think the classifier associates 'best' as positive even though that person only gives 3 out of 10 stars. Then that person explains why 3 star was given. When explaining that, there are some positive words and maybe that's why the classifier thinks it's a positive review whereas it's actually a negative review. That person also states "IMDB won't allow me to give a ZERO star for the worst movie of all time".
# </p>
#
#
#
# <h3>7. References</h3>
#
# https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.ComplementNB.html#sklearn.naive_bayes.ComplementNB
#
# https://heartbeat.fritz.ai/understanding-naive-bayes-its-applications-in-text-classification-part-1-ec9caea4baae
#
# https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3
#
# https://www.geeksforgeeks.org/complement-naive-bayes-cnb-algorithm/
#
# For new review 1: https://www.imdb.com/review/rw0893947/?ref_=ur_urv
#
# For new review 2: https://www.imdb.com/review/rw2764760/?ref_=tt_urv
#
# For new review 3: https://www.imdb.com/review/rw6094710/?ref_=ur_urv
#
# For new review 4: https://www.imdb.com/review/rw3315212/?ref_=tt_urv
#
# For new review 5: https://www.imdb.com/review/rw2770824/?ref_=tt_urv
|
Text_Classification_with_Naive_Bayes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
with open('../tmp/history.2018.log', 'r') as f:
data = json.loads(f.read())
# -
data.keys()
data['BAT-USDC'][:10]
# +
import collections
import json
from coind.tickers import ticker_date_format
historical_sample = collections.namedtuple('HistoricalSample',
('time', 'product_id', 'price', 'low', 'high', 'volume'))
def stream_historical(path):
with open(path, 'r') as f:
data = json.loads(f.read())
stream = []
for product in data:
for msg in data[product]:
if msg == 'message':
continue
else:
time, low, high, _, close, volume = msg
time = datetime.fromtimestamp(time)
#time = time.strftime(ticker_date_format)
sample = historical_sample(time, product, close, low, high, volume)
stream.append(sample)
stream.sort(key=lambda s: s.time)
for sample in stream:
#ticker = {
# "best_ask": None, "best_bid": None, "high_24h": None, "last_size": None, "low_24h": None,
# "open_24h": None, "price": sample.price, "product_id": sample.product_id, "sequence": None,
# "side": None, "time": sample.time.strftime(ticker_date_format), "trade_id": None, "type": "ticker",
# "volume_24h": None, "volume_30d": None,
#}
ticker = {
"best_ask": None, "best_bid": None, "high_24h": sample.high, "last_size": None, "low_24h": sample.low,
"open_24h": None, "price": sample.price, "product_id": sample.product_id, "sequence": None,
"side": None, "time": sample.time.strftime(ticker_date_format), "trade_id": None, "type": "ticker",
"volume_24h": sample.volume, "volume_30d": None,
}
yield json.dumps(ticker, sort_keys=True)
path = '../tmp/history.2015.log'
with open('../tmp/history.2015.stream.log', 'w') as f:
for tick in stream_historical(path):
f.write(f'{tick}\n')
# -
# !head ../tmp/history.2019.stream.log
samples[:10]
len(samples)
# !tail ../data/spec/stream.0.log
import cbpro
public_client = cbpro.PublicClient()
# +
rates = public_client.get_product_historic_rates('ETH-USD',
'2019-01-01 00:00:00-05:00', '2019-01-02 00:00:00-05:00', granularity=300)
# -
rates
# +
from datetime import datetime
print(len(rates))
for r in rates:
print(datetime.fromtimestamp(r[0]))
# -
|
notebooks/historical.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Table Tutorial
#
# [Table](https://hail.is/docs/0.2/hail.Table.html) is Hail's distributed analogue of a data frame or SQL table. It will be familiar if you've used R or `pandas`, but `Table` differs in 3 important ways:
#
# - It is distributed. Hail tables can store far more data than can fit on a single computer.
# - It carries global fields.
# - It is keyed.
#
# A `Table` has two different kinds of fields:
#
# - global fields
# - row fields
# + [markdown] slideshow={"slide_type": "slide"}
# ### Importing and Reading
#
# Hail can [import](https://hail.is/docs/0.2/methods/impex.html) data from many sources: TSV and CSV files, JSON files, FAM files, databases, Spark, etc. It can also *read* (and *write*) a native Hail format.
#
# You can read a dataset with [hl.read_table](https://hail.is/docs/0.2/methods/impex.html#hail.methods.read_table). It take a path and returns a `Table`. `ht` stands for Hail Table.
#
# We've provided a method to download and import [the MovieLens dataset](https://grouplens.org/datasets/movielens/100k/) of movie ratings in the Hail native format. Let's read it!
#
# <NAME> and <NAME>. 2015. The MovieLens Datasets: History and Context. ACM Transactions on Interactive Intelligent Systems (TiiS) 5, 4, Article 19 (December 2015), 19 pages. DOI=https://dx.doi.org/10.1145/2827872.
# -
import hail as hl
hl.init()
hl.utils.get_movie_lens('data/')
# + slideshow={"slide_type": "fragment"}
users = hl.read_table('data/users.ht')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Exploring Tables
#
# The [describe](https://hail.is/docs/0.2/hail.Table.html#hail.Table.describe) method prints the structure of a table: the fields and their types.
# + slideshow={"slide_type": "slide"}
users.describe()
# + [markdown] slideshow={"slide_type": "slide"}
# You can view the first few rows of the table using [show](https://hail.is/docs/0.2/hail.Table.html#hail.Table.show).
#
# 10 rows are displayed by default. Try changing the code in the cell below to `users.show(5)`.
# + slideshow={"slide_type": "slide"}
users.show()
# + [markdown] slideshow={"slide_type": "slide"}
# You can [count](https://hail.is/docs/0.2/hail.Table.html#hail.Table.count) the rows of a table.
# + slideshow={"slide_type": "fragment"}
users.count()
# + [markdown] slideshow={"slide_type": "slide"}
# You can access fields of tables with the Python attribute notation `table.field`, or with index notation `table['field']`. The latter is useful when the field names are not valid Python identifiers (if a field name includes a space, for example).
# + slideshow={"slide_type": "slide"}
users.occupation.describe()
# + slideshow={"slide_type": "slide"}
users['occupation'].describe()
# + [markdown] slideshow={"slide_type": "slide"}
# `users.occupation` and `users['occupation']` are [Hail Expressions](https://hail.is/docs/0.2/expressions.html)
#
# Lets peak at their using `show`. Notice that the key is shown as well!
# + slideshow={"slide_type": "slide"}
users.occupation.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Exercise
#
# The movie dataset has two other tables: `movies.ht` and `ratings.ht`. Load these tables and have a quick look around.
# -
|
hail/python/hail/docs/tutorials/03-tables.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
from multiprocessing import Pool
from bs4 import BeautifulSoup
from bs4.element import NavigableString as n_str
from lxml import etree
from scraping import standard_works
from text_processing import flatten
# -
from time import time
for i in range(7, 12):
start = time()
standard_works(size=i)
print(f"For processor size {i} the time was {start - time()}")
# +
# %%time
corpus = standard_works()
# +
errors = corpus.xpath('//collection[@name="failures"]')
if not errors:
print("No errors detected.")
else:
failures = etree.fromstring(etree.tostring(errors[0]))
failures = failures.xpath('//chapter')
print(f"The number of failures was: {len(failures)}")
print(etree.tostring(failures[0]))
# +
# Examines lengths of the available chapters
lengths = sorted(
[
(len(flatten(c)), c.attrib['source'])
for c in corpus.xpath('//chapter')
],
key=lambda x: x[0]
)
print(
f"The shortest chapter is {lengths[0]}"
)
# +
nonascii = [
c
for c in corpus.xpath('//chapter')
if not flatten(c).isascii()
]
print(f"Number of nonascii chapters is: {len(nonascii)}")
if nonascii:
print(etree.tostring(nonascii[0]))
# +
# Examination of a specific chapter
url = "https://www.churchofjesuschrist.org/study/scriptures/ot/title-page?lang=eng"
chapter = process_chapter(
get(url),
url,
url.split('scriptures')[1].split('/'),
)[0]
print(chapter)
chapter = etree.fromstring(chapter)
verse = 6
verse = chapter.xpath(f'//verse[@number={verse}]')[0]
verse.text
|
Scrape Corpus.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interactive Map of Playgrounds in Houston, TX
#
# Playing outside, climbing, jumping, swinging and laughing is not only one of the most quintessential childhood activities, it is an indispensable need for children during their development - physically, cognitively and socially. This is especially important today, with children spending less time engaged in outdoor play, and more time glued to screens. Schools are another culprit - trading valuable time spent playing during recess for increased indoor classroom time in an effort to boost test-scores. This strategy, although seemingly common sense, is counter to the findings of numerous research studies showing increased academic performance and better behavioural outcomes in children who are provided with longer (30-60 minutes) recess breaks in the middle of the day.
#
# In addition, the concomitant rise in childhood obesity and development of near-sightedness along with decreasing time spent playing outside is alarming, but the solution is simple.
#
# 
#
#
# ### Increase outdoor playtime for children.
#
# One strategy would be to rally parents to petition their school boards and School Health Advisory Councils to increase recess for their children, and to protect this time from being reduced in favor of cramming for tests. One children's advocacy group, [Children at Risk](https://childrenatrisk.org/wp-content/uploads/2018/07/CHILDREN-AT-RISK-School-Recess-Report.pdf), is working on just that. Unfortunately, changing school policies is likely a very slow and tedious process, and parents may want to take measures into their own hands, and take their children to playgrounds more often.
#
# The Houston Texas Parks and Recreation Department has a list of all public Houston playgrounds with a small description of the kinds of equipment present. I have created a simple interactive map to help parents find playgrounds near them.
#
# *For more information on the effects of outdoor playtime on children's health and performance, please see references at the bottom.*
#
# +
import requests
import lxml.html as lh
import pandas as pd
import numpy as np
from geopy.geocoders import Nominatim
import math
from time import sleep
#PART 1: Get the playground data from the Houston TX Parks & Recreation Department website
#Scraping tables from a website adapted from https://towardsdatascience.com/web-scraping-html-tables-with-python-c9baba21059
def check_table(url):
#Create a handle, page, to handle the contents of the website
page = requests.get(url)
#Store the contents of the website under doc
doc = lh.fromstring(page.content)
#Parse data that are stored between <tr>..</tr> of HTML
tr_elements = doc.xpath('//tr')
#Check the length of the first 12 rows
return [len(T) for T in tr_elements[:12]]
def get_playgrounds(url, skip = 0):
#Create a handle, page, to handle the contents of the website
page = requests.get(url)
#Store the contents of the website under doc
doc = lh.fromstring(page.content)
#Parse data that are stored between <tr>..</tr> of HTML
tr_elements = doc.xpath('//tr')
tr_elements = tr_elements[skip:]
#Create empty list
col=[]
i=0
#For each row, store each first element (header) and an empty list
for t in tr_elements[0]:
i+=1
name=t.text_content()
col.append((name,[]))
#Since out first row is the header, data is stored on the second row onwards
for j in range(1,len(tr_elements)):
#T is our j'th row
T=tr_elements[j]
#If row is not of size 10, the //tr data is not from our table
if len(T)!=5:
break
#i is the index of our column
i=0
#Iterate through each element of the row
for t in T.iterchildren():
data=t.text_content()
#Check if row is empty
if i>0:
#Convert any numerical value to integers
try:
data=int(data)
except:
pass
#Append the data to the empty list of the i'th column
col[i][1].append(data)
#Increment i for the next column
i+=1
#Create a dataframe
Dict={title:column for (title,column) in col}
return pd.DataFrame(Dict)
#urls of playground pages
urls = ['https://www.houstontx.gov/parks/playgroundsA-F.html', 'https://www.houstontx.gov/parks/playgroundsG-N.html',
'https://www.houstontx.gov/parks/playgroundsO-Z.html']
#Get playground tables into dataframes
play_1 = get_playgrounds(urls[0], skip = 1)
play_2 = get_playgrounds(urls[1], skip = 1)
play_3 = get_playgrounds(urls[2], skip = 1)
#Combine dataframes
playgrounds = pd.concat([play_1, play_2, play_3])
playgrounds.reset_index(drop = True, inplace = True)
# +
##PART 2: Clean the data and gather geo data for mapping
#Create columns for more data
playgrounds["Zipcode"] = 0
playgrounds["CityState"] = "Houston, TX"
playgrounds["FullAddress"] = ""
playgrounds["Latitude"] = np.nan
playgrounds["Longitude"] = np.nan
#Detach Zipcodes from the Addresses
for i in range(len(playgrounds.Address)):
if playgrounds.Address[i][-5:].isdigit():
playgrounds.at[i,"Zipcode"] = playgrounds.at[i,"Address"][-5:]
playgrounds.at[i,"Address"] = playgrounds.at[i,"Address"][:-6]
playgrounds.at[i,"Address"].rstrip()
#Get the latitudes and longitudes of all the playgrounds for mapping
geolocator = Nominatim(user_agent="Houston_Playgrounds")
for i in range(len(playgrounds.Address)):
if math.isnan(playgrounds.Zipcode[i]):
continue
location = geolocator.geocode(playgrounds.Address[i]+" "+playgrounds.CityState[i])
if location is not None:
playgrounds.at[i,"FullAddress"] = location.address
playgrounds.at[i,"Latitude"] = location.latitude
playgrounds.at[i,"Longitude"] = location.longitude
else:
location = geolocator.geocode(playgrounds["Facility Name"][i]+", Houston, TX")
if location is not None:
playgrounds.at[i,"FullAddress"] = location.address
playgrounds.at[i,"Latitude"] = location.latitude
playgrounds.at[i,"Longitude"] = location.longitude
sleep(0.10)
playgrounds.head(10)
# +
#Not all of the playgrounds were able to be geolocated
#Grab all addresses that do not have latitudes and longitudes
need_address = (playgrounds
.where((playgrounds["Latitude"].isnull()== True) & (playgrounds["Address"].isnull() == False))
.dropna(subset = ["Address"]))
need_address.reset_index(inplace = True)
# +
#PART 3: Create the map of playgrounds
def plot_playgrounds(df):
import folium
playgrounds = df.copy()
playgrounds.dropna(inplace = True)
playgrounds.reset_index(inplace = True, drop = True)
# generate a new map
location = geolocator.geocode("Houston, TX")
folium_map = folium.Map(location=[location.latitude, location.longitude],
zoom_start=11,
tiles="CartoDB positron",
width='100%')
# for each row in the data, add a cicle marker
for index, row in playgrounds.iterrows():
# generate the popup message that is shown on click.
popup_text = "<b>{}</b><br> {}<br><br> Play equipment: {}<br> Swings: {}<br> Other equipment: {}"
popup_text = popup_text.format(row["Facility Name"],
row["Address"],
row["Play Equip."],
row["Swings"],
row["Other Equip."])
# radius of circles
radius = 5
# choose the color of the marker
if row["Swings"] == "X":
color="#E37222" # tangerine
else:
color="#0A8A9F" # teal
# add marker to the map
folium.CircleMarker(location=(row["Latitude"],
row["Longitude"]),
radius=radius,
color=color,
popup=popup_text,
fill=True).add_to(folium_map)
return folium_map
playgrounds_map = plot_playgrounds(playgrounds)
# +
#Create a draggable legend for the map
#Solution adapted from http://nbviewer.jupyter.org/gist/talbertc-usgs/18f8901fc98f109f2b71156cf3ac81cd
from branca.element import Template, MacroElement
template = """
{% macro html(this, kwargs) %}
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>jQuery UI Draggable - Default functionality</title>
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
<script src="https://code.jquery.com/jquery-1.12.4.js"></script>
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
<script>
$( function() {
$( "#maplegend" ).draggable({
start: function (event, ui) {
$(this).css({
right: "auto",
top: "auto",
bottom: "auto"
});
}
});
});
</script>
</head>
<body>
<div id='maplegend' class='maplegend'
style='position: absolute; z-index:9999; border:2px solid grey; background-color:rgba(255, 255, 255, 0.8);
border-radius:6px; padding: 10px; font-size:16px; right: 20px; bottom: 20px;'>
<div class='legend-title'>Houston Playgrounds</div>
<div class='legend-scale'>
<ul class='legend-labels'>
<li><span style='background:#0A8A9F;opacity:0.7;'></span>Basic equipment</li>
<li><span style='background:#E37222;opacity:0.7;'></span>Swings</li>
<div class = 'legend-footer'>Data from Houston Parks & Rec. Dept</div>
</ul>
</div>
</div>
</body>
</html>
<style type='text/css'>
.maplegend .legend-title {
text-align: left;
margin-bottom: 5px;
font-weight: bold;
font-size: 90%;
}
.maplegend .legend-footer {
text-align: left;
margin-top: 5px;
margin-bottom: 0px;
font-size: 50%;
}
.maplegend .legend-scale ul {
margin: 0;
margin-bottom: 5px;
padding: 0;
float: left;
list-style: none;
}
.maplegend .legend-scale ul li {
font-size: 80%;
list-style: none;
margin-left: 0;
line-height: 18px;
margin-bottom: 2px;
}
.maplegend ul.legend-labels li span {
display: block;
float: left;
height: 16px;
width: 30px;
margin-right: 5px;
margin-left: 0;
border: 1px solid #999;
}
.maplegend .legend-source {
font-size: 80%;
color: #777;
clear: both;
}
.maplegend a {
color: #777;
}
</style>
{% endmacro %}"""
macro = MacroElement()
macro._template = Template(template)
playgrounds_map.get_root().add_child(macro)
# +
# Saving the map to an html file
outfp = "Houston_playgrounds_map.html"
# Save the map
playgrounds_map.save(outfp)
# -
# ### References
#
# **Benefits of Recess:**
#
# * American Academy of Pediatrics, “Policy Statement: The Crucial Role of Recess in School,” Pediatrics 131, no. 1 (2013): 186.
# * <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “The School Health Policies and Programs Study (SHPPS): Context, Methods, General Findings, and Future Efforts,” Journal of School Health 65 (1995): 339.
# * Promoting Better Health for Young People through Physical Activity and Sports (Washington, DC: U.S. Department of Health and Human Services and U.S. Department of Education), app. 7, www.thenewpe.com/advocacy/promotingPA.pdf.
# * <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Impact of Recess on Classroom Behavior: Group Effects and Individual Differences,” Journal of Educational Research 92 (1998): 121–126.
# * Centers for Disease Control and Prevention, “School Health Policies and Practices Study: 2014 Overview” (Atlanta: U.S. Department of Health and Human Services, 2015), 1.
# * <NAME>, <NAME>, and <NAME>, “School Recess and Group Classroom Behavior,” Pediatrics 123 (2009): 431–436.
# * Centers for Disease Control and Prevention, “Guidelines for School and Community Programs to Promote Lifelong Physical Activity among Young People,” Morbidity and Mortality Weekly Report 46, no. RR-6 (March 7, 1997): 12.
# * <NAME>, <NAME>, and <NAME>, “The Liink Project: Implementation of a Recess and Character Development Pilot Study with Grades K & 1 Children,” Texas Association for Health, Physical Education, Recreation & Dance Journal 84, no. 2 (Summer 2016): 14–17, 35.
# * <NAME>, <NAME>, <NAME>, and <NAME>, “Long-Term Effects of a Playground Markings and Physical Structures on Children’s Recess Physical Activity Levels,” Preventative Medicine 44 (2007): 393–397.
# * <NAME> and <NAME>, “Outdoor Playing = Outdoor Learning,” Educational Facility Planner 49, nos. 2–3 (2016): 16–20.
# * <NAME>, <NAME>, and <NAME>, “Withholding Recess from Elementary School Students: Policies Matter,” Journal of School Health 83 (2013): 533–541.
#
# **Physical Activity and Academic Performance in Children:**
# * <NAME>, Pontifex MB, O’Leary KC, Scudder MR, Wu CT, Castelli DM, Hillman CH. The effects of an afterschool physical activity program on working memory in preadolescent children, Developmental Science. 2011; 14(5): 1046-1058.
# * <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Physical activity and mental performance in preadolescents: effects of acute exercise on free-recall memory, Mental Health and Physical Activity. 2009. 2(1): 16-22.
# * Strong WB, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Evidence based physical activity for school-age youth, The Journal of Pediatrics. 2005. 146(6): 732-737. * <NAME>, Etnier JL, The relationship between physical activity and cognition in children: a meta-analysis, Pediatric Exercise Science. 2003. 15(3): 243-256.
# * Aadland KN, Moe VF, Aadland E, <NAME>, Resaland GK, Ommundsen Y, Relationships between physical activity, sedentary time, aerobic fitness, motor skills and executive function and academic performance in children, Mental Health and Physical Activity. 2017. 12: 10-18.
# * Haapala EA et al, Physical activity and sedentary time in relation to academic achievement in children, Journal of Science and Medicine in Sport. 2017. 20(6): 583-589.
# * Kao SC, Westfall DR, Parks AC, Pontifex MB, Hillman CH. Muscular and aerobic fitness, working memory, and academic achievement in children, Medicine and Science in Sports and Exercise. 2016.
#
# **Myopia Development and Sunlight Exposure:**
# * <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. Outdoor activity reduces the prevalence of myopia in children. Ophthalmology. 2008;115(8):1279–85.
# * <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. Effect of time spent outdoors at school on the development of myopia among children in China: a randomized clinical trial. JAMA. 2015;314(11):1142–8.
# * Wu PC, Tsai CL, Wu HL, Yang YH, Kuo HK. Outdoor activity during class recess reduces myopia onset and progression in school children. Ophthalmology. 2013;120(5): 1080–5.
# * Yi JH, Li RR. Influence of near-work and outdoor activities on myopia progression in school children. Zhongguo Dang Dai Er Ke Za Zhi. 2011;13(1):32–5.
# * Read SA, Collins MJ, <NAME>J. Light Exposure and Eye Growth in Childhood. Invest Ophthalmol Vis Sci. 2015;56(11):6779–87.
|
assets/code/HoustonPlaygrounds.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
names = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)
names
names[4:10]
names[:10]
names[4:]
names[:]
names[::-1]
names[::2]
names[1::2]
|
09. Python Tuples/07. Slicing of Tuple.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### having upper and lower limit
def isValidBST(self, root):
pass
# ### Having Inorder traversal
def isValidBST(self, root):
lis = []
def doInorder(root):
if root == None:
return
else:
doInorder(root.left)
lis.append(root.val)
doInorder(root.right)
def isSorted():
for i in range(len(lis)-1):
if lis[i] >= lis[i + 1]:
return False
return True
doInorder(root)
return isSorted()
# ### brute force approach
def isValidBST(self, root):
def isPositionProper(node, toCheck):
if node == None:
return False
if node == toCheck:
return True
else:
if node.val > toCheck.val:
return isPositionProper(node.left, toCheck)
elif node.val < toCheck.val:
return isPositionProper(node.right, toCheck)
def innerFunc(node):
if node == None:
return True
if not isPositionProper(root, node):
return False
else:
x = innerFunc(node.left)
y = innerFunc(node.right)
return x and y
return innerFunc(root)
|
098_Validate_Binary_Search_Tree.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NumPy
# ## Introduction to NumPy Arrays
# Arrays are the main data structure used in machine learning. In Python, arrays from the NumPy library, called N-dimensional arrays or the ndarray, are used as the primary data structure for representing data.
# ### NumPy N-dimensional Array
# NumPy is a Python library that can be used for scientific and numerical applications and is the tool to use for linear algebra operations. The main data structure in NumPy is the ndarray, which is a shorthand name for N-dimensional array. When working with NumPy, data in an ndarray is simply referred to as an array. It is a fixed-sized array in memory that contains data of the same type, such as integers or floating point values.
#
# The data type supported by an array can be accessed via the dtype attribute on the array.
# The dimensions of an array can be accessed via the shape attribute that returns a tuple
# describing the length of each dimension.
# +
# Create array
import numpy as np
l=[1.6,8.9,3.6,5.6]
arr=np.array(l)
# display array
print(arr)
# display array shape
print(arr.shape)
# array datatype
print(arr.dtype)
# -
# ### Functions to Create Arrays
# ### Empty
# The `empty()` function will create a new array of the specified shape. The argument to the
# function is an array or tuple that specifies the length of each dimension of the array to create.The values or content of the created array will be random and will need to be assigned before use.
arr=np.empty(3)
print(arr)
# arr=np.empty((3,4))
arr=np.empty([3,4])
print(arr)
# ### Zeros
# The `zeros()` function will create a new array of the specified size with the contents filled with zero values. The argument to the function is an array or tuple that specifies the length of each dimension of the array to create.
# create a zero array
arr=np.zeros(3)
# arr=np.zeros(3,)
# arr=np.zeros((3))
# arr=np.zeros((3,))
arr2=np.zeros([3,5])
print(arr)
print(arr2)
# ### Ones
# The `ones()` function will create a new array of the specified size with the contents filled with one values. The argument to the function is an array or tuple that specifies the length of each dimension of the array to create.
arr=np.ones(5)
arr2=np.ones([4,6])
arr3=np.ones([2,3,4])
print(arr)
print(arr2)
print(arr3)
# ### Combining Arrays
# NumPy provides many functions to create new arrays from existing arrays.
# ### Vertical Stack
# Given two or more existing arrays, you can stack them vertically using the `vstack()` function.
# +
# create array with vstack
arr=np.array([1,2,3,4])
arr2=np.array([4,5,6,7])
arr3=np.vstack((arr,arr2))
print(arr3)
print(arr3.shape)
# -
arr=np.array([[1,2,3,4],[1,2,3,4]])
arr2=np.array([[1,2,3,4],[1,2,3,4]])
arr3=np.vstack((arr,arr2))
print(arr3)
print(arr3.shape)
# ### Horizontal Stack
# Given two or more existing arrays, you can stack them horizontally using the `hstack()` function.
arr=np.ones(5)
arr2=np.ones(5)
arr3=np.hstack((arr,arr2))
print(arr)
print(arr2)
print(arr3)
print(arr3.shape)
arr=np.ones((5,5))
arr2=np.ones((5,5))
arr3=np.hstack((arr,arr2))
print(arr)
print(arr2)
print(arr3)
print(arr3.shape)
arr=np.ones((5,5))
arr2=np.zeros((5,5))
arr3=np.hstack((arr,arr2))
print(arr)
print(arr2)
print(arr3)
print(arr3.shape)
arr=np.zeros((5,5))
arr2=np.ones((5,5))
arr3=np.hstack((arr,arr2))
print(arr)
print(arr2)
print(arr3)
print(arr3.shape)
# ### One-Dimensional List to Array
data=[2,6,5,7,8,9,34,55]
arr=np.array(data)
print(arr)
print(type(arr))
# ## List of Lists to Array
data=[[2,6,5,7,8,9,34,55],
[2,6,5,7,8,9,34,55],
[2,6,5,7,8,9,34,55]
]
arr=np.array(data)
print(arr)
print(type(arr))
# ## Array Indexing
# Once your data is represented using a NumPy array, you can access it using indexing.
# ### One-Dimensional Indexing
data=np.array([4,6,8,9,55,44])
# index data
print(data[0])
print(data[2])
print(data[-1])
print(data[-4])
# ### Two-Dimensional Indexing
# Indexing two-dimensional data is similar to indexing one-dimensional data, except that a comma is used to separate the index for each dimension.
# define array
data = np.array([
[11, 22,33],
[33, 44,55],
[55, 66,77]])
# index data
print(data[0,0])
# index row of two-dimensional array
print(data[0,])
# ### One-Dimensional Slicing
# slice a one-dimensional array
data = np.array([11, 22, 33, 44, 55])
print(data[:])
print(data[2:4])
# negative slicing of a one-dimensional array
data = np.array([11, 22, 33, 44, 55])
print(data[-2:])
# ### Two-Dimensional Slicing
# +
# split input and output data
data = np.array([
[11, 22, 33,45],
[44, 55, 66,75],
[77, 88, 99,105]])
# separate data
X, y = data[:, :-1], data[:, -1]
print(X)
print(y)
# -
# split train and test data
data = np.array([
[11, 22, 33,45],
[44, 55, 66,75],
[77, 88, 99,105],
np.random.random(4),
np.random.randn(4)
])
# separate data
split = 3
train,test = data[:split,:],data[split:,:]
print(train)
print(test)
# ## Array Reshaping
#
# ### Data Shape
# NumPy arrays have a shape attribute that returns a tuple of the length of each dimension of
# the array. For example:
# shape of one-dimensional array
data=np.random.randn(5)
print(data.shape)
# shape of two-dimensional array
data=np.array([np.random.randn(5),
np.random.randn(5),
np.random.randn(5)])
print(data.shape)
# row and column shape of two-dimensional array
data = np.array([
np.random.randn(5),
np.random.randn(5),
np.random.randn(5)
])
print( ' Rows: %d ' % data.shape[0])
print( ' Cols: %d ' % data.shape[1])
# ### Reshape 1D to 2D Array
# reshape 1D array to 2D
data = np.array([11, 22, 33, 44, 55,66,77,88])
print(data)
print(data.shape)
# reshape
data = data.reshape((data.shape[0], 1))
print(data)
print(data.shape)
# ### Reshape 2D to 3D Array
# reshape 2D array to 3D
data=np.random.random((2,3))
print(data)
print(data.shape)
# reshape
data = data.reshape((data.shape[0], data.shape[1], 1))
print(data)
print(data.shape)
# ### # reshape 3D to 4D array
# reshape 3D array to 4D
data=np.random.random((2,3,3))
print(data)
print(data.shape)
# reshape
data = data.reshape((data.shape[0], data.shape[1],data.shape[2], 1))
print(data)
print(data.shape)
# ## NumPy Array Broadcasting
# Arrays with different sizes cannot be added, subtracted, or generally be used in arithmetic. A way to overcome this is to duplicate the smaller array so that it has the dimensionality and size as the larger array. This is called array broadcasting and is available in NumPy when performing array arithmetic.
# ### Scalar and One-Dimensional Array
# A single value or scalar can be used in arithmetic with a one-dimensional array.
# broadcast scalar to one-dimensional array
from numpy import array
# define array
a = array([1, 2, 3,4,5])
print(a)
# define scalar
b = 2
print(b)
# broadcast
c = a + b
print(c)
# ## Scalar and Two-Dimensional Array
# A scalar value can be used in arithmetic with a two-dimensional array.
# broadcast scalar to two-dimensional array
A=np.random.random((3,4))
print(A)
# define scalar
b = 2
print(b)
# broadcast
C = A + b
print(C)
# ### One-Dimensional and Two-Dimensional Arrays
# A one-dimensional array can be used in arithmetic with a two-dimensional array. For example, we can imagine a two-dimensional array A with 2 rows and 3 columns added to a one-dimensional array b with 3 values.
A=np.random.random((3,4))
print(A)
# define one-dimensional array
b = np.ones(4)
print(b)
# broadcast
C = A + b
print(C)
# +
# broadcasting error
# A = array([
# [1, 2, 3],
# [1, 2, 3]])
# print(A.shape)
# define one-dimensional array
# b = array([1, 2])
# print(b.shape)
# attempt broadcast
# C = A + b
# print(C)
# -
|
linear_algebra/numpy_intro.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
import datetime
# load raw data frames
raw_covid_cases_df = pd.read_csv("../data/RAW_us_confirmed_cases.csv")
raw_mask_mandates_df = pd.read_csv("../data/mask-mandates-by-county.csv")
raw_mask_use_df = pd.read_csv("../data/mask-use-by-county.csv")
raw_covid_cases_df
raw_mask_mandates_df
raw_mask_use_df
# +
state = "New Jersey"
state_abrev = "NJ"
county = "Bergen County"
county_abrev = "Bergen"
start_date = datetime.datetime(2020, 2, 1, 0, 0) # feb 1st 2020
end_date = datetime.datetime(2021, 10, 15, 0, 0) # oct 15 2021
# -
# ## Data cleaning
# We are dealing with only a subset of the data for a given county. We filter down the confirmed COVID cases dataset, masking mandate dataset and masking survey dataset to that particular region only.
# +
# filter down confirmed cases datasets
cases_df = raw_covid_cases_df.loc[(raw_covid_cases_df['Province_State'] == state) & (raw_covid_cases_df['Admin2'] == county_abrev)].head(1)
# filter down mask mandage datasets
mandates_df = raw_mask_mandates_df[(raw_mask_mandates_df['State_Tribe_Territory'] == state_abrev) & (raw_mask_mandates_df['County_Name'] == county)]
# get the fips code for Bergen NJ
fips = cases_df['FIPS'].astype(int).values[0]
# filter down mask use datasets
masking_df = raw_mask_use_df[raw_mask_use_df['COUNTYFP'] == fips]
# -
#
# CONFIRMED CASES DATASET DATA CLEANUP
#
# remove unnecessary columns for confirmed cases & transpose
cases_df = cases_df.reset_index().drop(['index','Province_State', 'Admin2', 'UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Country_Region', 'Lat', 'Long_', 'Combined_Key'], axis=1)
cases_df = cases_df.transpose().reset_index()
# confirmed cases dataset : rename & filter for specific timeframe
cases_df['index'] = pd.to_datetime(cases_df['index'])
cases_df = cases_df.rename(columns={'index': 'doy', 0: 'cases_count'})
cases_df = cases_df[(cases_df['doy'] >= start_date) & (cases_df['doy'] <= end_date)]
#
# MASK MANDATES DATASET DATA CLEANUP
#
mandates_df = mandates_df.drop(['State_Tribe_Territory', 'County_Name', 'FIPS_State', 'FIPS_County', 'Source_of_Action', 'URL', 'Citation'], axis=1)
mandates_df = mandates_df.reset_index().drop(['index'], axis=1)
mandates_df['date'] = pd.to_datetime(mandates_df['date'])
mandates_df = mandates_df[(mandates_df['date'] >= start_date) & (mandates_df['date'] <= end_date)]
# save data
cases_df.to_csv("../data_clean/cases.csv")
mandates_df.to_csv("../data_clean/mandates.csv")
masking_df.to_csv("../data_clean/masking.csv")
# Plotting the number of cases in the `cases_df` dataset, we see that the reported no of cases is actually the total number of cases per day. We are interested in looking at the derivative, that is, the increase day over day of COVID cases. To do that, we need to cleanup the dataset.
plt.plot(cases_df.doy, cases_df.cases_count)
# +
def derivative(row):
# from a given row, look at yesterday's row to figure out the increase
date = row['doy']
yesterday = date - pd.Timedelta(days=1)
today_count = row['cases_count']
yesterday_count = 0
yesterday_case_count = cases_df[cases_df['doy'] == yesterday].head(1)
if yesterday_case_count.shape[0] == 1:
yesterday_count = yesterday_case_count['cases_count'].astype(int).values[0]
return today_count - yesterday_count
cases_df['dod_increase'] = cases_df.apply(derivative, axis=1)
# -
plt.figure(figsize=(16,5))
plt.title("Day-over-day confirmed COVID 19 cases delta")
plt.plot(cases_df.doy, cases_df.dod_increase)
# Let's plot the dates from which masking became in place alongside that graph in order to have a proper estimate of the incubation period.
# +
def policy_change(row):
date = row['date']
yesterday = date - pd.Timedelta(days=1)
today_order = row['Face_Masks_Required_in_Public']
yesterday_order = 'No'
yesterday_order_df = mandates_df[mandates_df['date'] == yesterday].head(1)
if yesterday_order_df.shape[0] == 1:
yesterday_order = yesterday_order_df['Face_Masks_Required_in_Public'].astype(str).values[0]
return "change" if today_order != yesterday_order else "no_change"
mandates_df['policy_change'] = mandates_df.apply(policy_change, axis=1)
mandates_df[mandates_df['policy_change'] == "change"]
# -
# Face masks mandate only changed onces from being required -> not required on 2021-05-28 for Bergen County NJ.
# +
plt.figure(figsize=(16,5))
plt.title("Day-over-day NEW confirmed COVID 19 cases delta")
plt.plot(cases_df.doy, cases_df.dod_increase)
plt.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2021,5,28)), color="green", alpha=0.3, label="Masking required in public")
plt.axvspan(date2num(datetime.datetime(2021,5,28)), date2num(end_date), color="red", alpha=0.3, label="Masking NOT required in public")
plt.legend()
# -
# It seems like the first introduction of a masking policy mandate in 2020/04/10 contributed to flattening the curve, at least in the increase of cases a few days after it got introduced. Removal of the masking policy was OK considering that there were no constant increases around May 2021.
#
# Something worth noting here, there is a delay between the time of infection and the time a case is confirmed. Hence the effect of the masking policy can been seen a few days after it got introduced. We are interesting in knowing that delay, on average, based on the data. Hence we zoom on the 2020/04/10 area to figure out that delay.
# +
plt.figure(figsize=(16,5))
plt.title("Day-over-day NEW confirmed COVID 19 cases delta")
plt.plot(cases_df.doy, cases_df.dod_increase)
plt.xlim(date2num(datetime.datetime(2020,4,1)), date2num(datetime.datetime(2020,5,10)))
plt.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2021,5,28)), color="green", alpha=0.3, label="Masking required in public")
plt.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2020,4,25)), color="blue", alpha=0.3, label="Delay between time of infection and the time a case is confirmed")
plt.legend()
# -
# We're particularily interested in the delay for which we can see the masking mandate have an effect on the daily increase of COVID 19 cases. That is, what is the time delay for which we can see a decrease in the new number of daily cases AFTER having the masking mandate implemented.
#
# Here we can see that after the masking mandate got implemented on **2020-04-10**, the first decrease in new cases of COVID 19 we can observe in the data is roughly around **2020-04-25**. Which means that, according to the data observed, the average delay between the time of infection and the time a case is confirmed is roughly **14 days**. This also follows the CDC observations.
masking_df
# It also seems like the vast majority of people in Bergen County are okay complying with masking mandates, making this data more reliable.
#
# We can now look at the graph of new cases increase difference day over day with the masking mandate taking into consideration the 14 incubation period shift.
# +
def derivative(row):
# from a given row, look at yesterday's row to figure out the increase
date = row['doy']
yesterday = date - pd.Timedelta(days=1)
today_count = row['dod_increase']
yesterday_count = 0
yesterday_case_count = cases_df[cases_df['doy'] == yesterday].head(1)
if yesterday_case_count.shape[0] == 1:
yesterday_count = yesterday_case_count['dod_increase'].astype(int).values[0]
return today_count - yesterday_count
cases_df['dod_increase_slope'] = cases_df.apply(derivative, axis=1)
cases_df
# +
plt.figure(figsize=(16,5))
plt.plot(cases_df.doy, cases_df.dod_increase_slope)
plt.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2020,4,25)), color="gray", alpha=0.1, label="Masking mandate incubation period")
plt.axvspan(date2num(datetime.datetime(2020,4,25)), date2num(datetime.datetime(2021,5,28)), color="green", alpha=0.3, label="Masking mandate in place")
plt.legend()
# -
# The datapoints below are the change of slope. This gives us a better (standarized way) of knowing whether, overall, the infection rates are worsening or improving. A positive value implies that the situation is worsening (more daily infections compared to the day prior). A negative value implies that the situation is improving (less daily infections compared to the day prior).
#
# Now let's merge both datasets with the mobility reports from Google and Apple.
# +
# load mobility reports
google_mobility_df = pd.read_csv("../data/COVID19_mobility/google_reports/mobility_report_US.csv")
apple_mobility_df = pd.read_csv("../data/COVID19_mobility/apple_reports/apple_mobility_report_US.csv")
google_mobility_df = google_mobility_df[google_mobility_df['county'] == county]
apple_mobility_df = apple_mobility_df[apple_mobility_df["county_and_city"] == county]
# -
google_mobility_df
apple_mobility_df
# +
# merge apple and google mobility datasets together
mobility_df = pd.merge(left=google_mobility_df, right=apple_mobility_df, how='inner', on='date')
# merge transit mobility scores
mobility_df['transit_avg'] = mobility_df[['transit stations', 'transit']].mean(axis=1)
# drop columns
mobility_df = mobility_df.drop(columns=['state_x', 'state_y', 'county', 'county_and_city', 'geo_type', 'transit', 'transit stations'])
# rename columns
mobility_df = mobility_df.rename(columns={'transit_avg': 'transit'})
mobility_df['date'] = mobility_df['date'].apply(pd.to_datetime)
# -
# Here we merge the daily mobility report with the cases.
# merge the cases data w/ the mobility data
cases_df = pd.merge(left=cases_df, right=mobility_df, left_on='doy', right_on='date')
cases_df = cases_df.drop(columns=['date'])
cases_df = cases_df.rename(columns={
"retail and recreation": "retail_rec",
"grocery and pharmacy": "grocery_pharmacy",
})
cases_df
# It doesn't really make sense to look at the difference in increases over time since it takes into account the logistics delay of entering data into the system. Instead we proceed by average the number of new cases in buckets to get a clearer picture. We set the bucket size to 14 to counter balance that incubation period.
# +
doy_cases_df = cases_df.set_index("doy")
doy_cases_df = doy_cases_df.resample("14D").agg({
'dod_increase': 'mean',
'dod_increase_slope': 'mean',
'retail_rec': 'mean',
'grocery_pharmacy': 'mean',
'parks': 'mean',
'workplaces': 'mean',
'residential': 'mean',
'driving': 'mean',
'walking': 'mean',
'transit': 'mean'
})
doy_cases_df.to_csv("../data_clean/doy_cases.csv")
doy_cases_df
# +
plt.figure(figsize=(16,5))
plt.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope)
plt.title("Median increase in the slope of confirmed COVID 19 cases bucketized in 14D buckets")
plt.axhline(y=0, color="red")
plt.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2021,5,28)), color="green", alpha=0.3, label="Masking mandate in place")
plt.axvspan(date2num(datetime.datetime(2021,5,28)), date2num(end_date), color="red", alpha=0.3, label="Masking NOT required in public")
plt.legend()
# -
# We can see in the graph below that introducing the masking mandate and having the people comply to it was enough to flatten the curve and at least stop this constant increases in new COVID 19 cases day over day. You see the number of cases falling after the first introduction of the masking mandate and the increase in the average number of new cases every day stay around the 0 line after the introduction of the masking mandate.
# +
fig, ax = plt.subplots(1, figsize=(16,8))
fig.suptitle("Efficacy of masking mandates, when followed, on new COVID 19 infections for Bergen County NJ")
ax.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase, label="Mean bi-weekly new confirmed COVID 19 cases")
ax.set_title("Mean increase/decrease in new COVID 19 confirmed cases in 14 days buckets")
ax.axhline(y=0, color="red")
ax.axvspan(date2num(datetime.datetime(2020,4,10)), date2num(datetime.datetime(2021,5,28)), color="green", alpha=0.3, label="Masking mandate in place")
ax.axvspan(date2num(datetime.datetime(2021,5,28)), date2num(end_date), color="red", alpha=0.3, label="Masking NOT required in public")
ax.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
ax.axvline(x=date2num(datetime.datetime(2020,4,10)), color="purple", label="Masking mandate gets introduced in NJ")
ax.set_ylabel("Mean number of new daily infections (14 days averaged)")
ax.legend()
# -
# Let's plot the daily new cases alongside the mobility data to see if we can find any correlation that might be interesting.
# +
fig, ax = plt.subplots(1, figsize=(16,8))
fig.suptitle("Mobility areas % change and new COVID-19 infection increases")
ax.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes in new infections", width=10)
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.driving, label="Driving % change")
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.walking, label="Walking % change")
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.parks, label="Parks % change")
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.workplaces, label="Workplaces % change")
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.residential, label="Residential % change")
ax.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.transit, label="Transit % change")
ax.set_title("Mean increase/decrease in new COVID 19 confirmed cases in 14 days buckets")
ax.axhline(y=0, color="red")
ax.axvspan(date2num(datetime.datetime(2021,3,17)), date2num(datetime.datetime(2022, 1, 1)), color="green", alpha=0.3, label="Vaccinated population")
ax.set_ylabel("Mean % change in mobility")
ax.legend(loc="lower right", bbox_to_anchor=(0.5, -0.35))
# +
from matplotlib import gridspec
fig = plt.figure(figsize=(16,32))
gs = gridspec.GridSpec(9, 1, height_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1])
ax0 = plt.subplot(gs[0])
ax0.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax0.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase, label="Mean bi-weekly new confirmed COVID 19 cases")
# retail_rec grocery_pharmacy parks workplaces residential
# driving
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax1.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.driving, label="Driving % change")
ax1.set_title("Driving mobility % change and slope change in new COVID-19 cases")
ax1.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# transit
ax2 = plt.subplot(gs[2], sharex=ax0)
ax2.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax2.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.transit, label="Transit % change")
ax2.set_title("Transit mobility % change and slope change in new COVID-19 cases")
ax2.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# walking
ax3 = plt.subplot(gs[3], sharex=ax0)
ax3.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax3.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.walking, label="Walking % change")
ax3.set_title("Walking mobility % change and slope change in new COVID-19 cases")
ax3.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# retail_rec
ax4 = plt.subplot(gs[4], sharex=ax0)
ax4.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax4.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.retail_rec, label="Retail & Recreation % change")
ax4.set_title("Retail & recreation mobility % change and slope change in new COVID-19 cases")
ax4.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# grocery_pharmacy
ax5 = plt.subplot(gs[5], sharex=ax0)
ax5.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax5.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.grocery_pharmacy, label="Groceries & Pharmacies % change")
ax5.set_title("Groceries & pharmacies mobility % change and slope change in new COVID-19 cases")
ax5.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# parks
ax6 = plt.subplot(gs[6], sharex=ax0)
ax6.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax6.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.parks, label="Parks % change")
ax6.set_title("Parks mobility % change and slope change in new COVID-19 cases")
ax6.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# workplaces
ax7 = plt.subplot(gs[7], sharex=ax0)
ax7.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax7.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.workplaces, label="Workplaces % change")
ax7.set_title("Workplaces mobility % change and slope change in new COVID-19 cases")
ax7.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# residential
ax8 = plt.subplot(gs[8], sharex=ax0)
ax8.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax8.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.residential, label="Residential % change")
ax8.set_title("Residential mobility % change and slope change in new COVID-19 cases")
ax8.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
# +
from matplotlib import gridspec
fig = plt.figure(figsize=(16,16))
gs = gridspec.GridSpec(4, 1, height_ratios=[1, 1, 1, 1])
# retail_rec grocery_pharmacy parks workplaces residential
# driving, transit, walking
ax1 = plt.subplot(gs[0], sharex=ax0)
ax1.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax1.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.driving, label="Driving % change")
ax1.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.transit, label="Transit % change")
ax1.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.walking, label="Walking % change")
ax1.set_title("Driving, transit, walking % change and slope change in new COVID-19 cases")
ax1.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
ax1.legend(loc="lower right")
# retail_rec, grocery_pharmacy
ax2 = plt.subplot(gs[1], sharex=ax0)
ax2.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax2.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.retail_rec, label="Retail & Recreation % change")
ax2.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.grocery_pharmacy, label="Groceries & Pharmacies % change")
ax2.set_title("Retail, Recreation, Grocery, Pharmacy mobility % change and slope change in new COVID-19 cases")
ax2.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
ax2.legend(loc="lower right")
# parks
ax3 = plt.subplot(gs[2], sharex=ax0)
ax3.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax3.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.parks, label="Parks % change")
ax3.set_title("Parks mobility % change and slope change in new COVID-19 cases")
ax3.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
ax3.legend(loc="lower right")
# workplaces, residential
ax4 = plt.subplot(gs[3], sharex=ax0)
ax4.bar(doy_cases_df.index.to_pydatetime(), doy_cases_df.dod_increase_slope, label="Slope changes", width=10)
ax4.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.workplaces, label="Workplaces % change")
ax4.plot(doy_cases_df.index.to_pydatetime(), doy_cases_df.residential, label="Residential % change")
ax4.set_title("Workplaces, Residential mobility % change and slope change in new COVID-19 cases")
ax4.axvline(x=date2num(datetime.datetime(2021,3,17)), color="yellow", label="General availability of COVID 19 vaccines in NJ")
ax4.legend(loc="lower right")
# -
doy_cases_df.corr()
|
src/data-512-a7.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
import torch
import matplotlib.pyplot as plt
# [Jump_to opening comments and overview of lesson 10](https://course.fast.ai/videos/?lesson=10&t=108)
# ## Callbacks
# ### Callbacks as GUI events
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=432)
import ipywidgets as widgets
def f(o): print('hi')
# From the [ipywidget docs](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Events.html):
#
# - *the button widget is used to handle mouse clicks. The on_click method of the Button can be used to register function to be called when the button is clicked*
w = widgets.Button(description='Click me')
# +
# widgets.Button??
# -
w.__dict__
w.on_click(f)
w
# *NB: When callbacks are used in this way they are often called "events".*
#
# Did you know what you can create interactive apps in Jupyter with these widgets? Here's an example from [plotly](https://plot.ly/python/widget-app/):
#
# 
# ### Creating your own callback
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=680)
from time import sleep
def slow_calculation():
res = 0
for i in range(5):
res += i*i
sleep(1)
return res
slow_calculation()
def slow_calculation(cb=None):
res = 0
for i in range(5):
res += i*i
sleep(1)
if cb: cb(i)
return res
def show_progress(epoch):
print(f"Awesome! We've finished epoch {epoch}!")
slow_calculation(show_progress)
# ### Lambdas and partials
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=811)
slow_calculation(lambda o: print(f"Awesome! We've finished epoch {o}!"))
def show_progress(exclamation, epoch):
print(f"{exclamation}! We've finished epoch {epoch}!")
slow_calculation(lambda o: show_progress("OK I guess", o))
def make_show_progress(exclamation):
_inner = lambda epoch: print(f"{exclamation}! We've finished epoch {epoch}!")
return _inner
slow_calculation(make_show_progress("Nice!"))
def make_show_progress(exclamation):
# Leading "_" is generally understood to be "private"
def _inner(epoch): print(f"{exclamation}! We've finished epoch {epoch}!")
return _inner
slow_calculation(make_show_progress("Nice!"))
f2 = make_show_progress("Terrific")
slow_calculation(f2)
slow_calculation(make_show_progress("Amazing"))
from functools import partial #to use only part of the function input
slow_calculation(partial(show_progress, "OK I guess"))
f2 = partial(show_progress, "OK I guess")
# ### Callbacks as callable classes
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=1122)
class ProgressShowingCallback():
def __init__(self, exclamation="Awesome"): self.exclamation = exclamation
def __call__(self, epoch): print(f"{self.exclamation}! We've finished epoch {epoch}!")
cb = ProgressShowingCallback("Just super")
slow_calculation(cb)
# ### Multiple callback funcs; `*args` and `**kwargs`
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=1194)
def f(*args, **kwargs): print(f"args: {args}; kwargs: {kwargs}")
f(3, 'a', thing1="hello")
# NB: We've been guilty of over-using kwargs in fastai - it's very convenient for the developer, but is annoying for the end-user unless care is taken to ensure docs show all kwargs too. kwargs can also hide bugs (because it might not tell you about a typo in a param name). In [R](https://www.r-project.org/) there's a very similar issue (R uses `...` for the same thing), and matplotlib uses kwargs a lot too.
def slow_calculation(cb=None):
res = 0
for i in range(5):
if cb: cb.before_calc(i)
res += i*i
sleep(1)
if cb: cb.after_calc(i, val=res)
return res
class PrintStepCallback():
def __init__(self): pass
def before_calc(self, *args, **kwargs): print(f"About to start")
def after_calc (self, *args, **kwargs): print(f"Done step")
slow_calculation(PrintStepCallback())
class PrintStatusCallback():
def __init__(self): pass
def before_calc(self, epoch, **kwargs): print(f"About to start: {epoch}")
def after_calc (self, epoch, val, **kwargs): print(f"After {epoch}: {val}")
slow_calculation(PrintStatusCallback())
# ### Modifying behavior
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=1454)
def slow_calculation(cb=None):
res = 0
for i in range(5):
if cb and hasattr(cb,'before_calc'): cb.before_calc(i)
res += i*i
sleep(1)
if cb and hasattr(cb,'after_calc'):
if cb.after_calc(i, res):
print("stopping early")
break
return res
class PrintAfterCallback():
def after_calc (self, epoch, val):
print(f"After {epoch}: {val}")
if val>10: return True
slow_calculation(PrintAfterCallback())
class SlowCalculator():
def __init__(self, cb=None): self.cb,self.res = cb,0
def callback(self, cb_name, *args):
if not self.cb: return
cb = getattr(self.cb,cb_name, None)
if cb: return cb(self, *args)
def calc(self):
for i in range(5):
self.callback('before_calc', i)
self.res += i*i
sleep(1)
if self.callback('after_calc', i):
print("stopping early")
break
class ModifyingCallback():
def after_calc (self, calc, epoch):
print(f"After {epoch}: {calc.res}")
if calc.res>10: return True
if calc.res<3: calc.res = calc.res*2
calculator = SlowCalculator(ModifyingCallback())
calculator.calc()
calculator.res
# ## `__dunder__` thingies
# Anything that looks like `__this__` is, in some way, *special*. Python, or some library, can define some functions that they will call at certain documented times. For instance, when your class is setting up a new object, python will call `__init__`. These are defined as part of the python [data model](https://docs.python.org/3/reference/datamodel.html#object.__init__).
#
# For instance, if python sees `+`, then it will call the special method `__add__`. If you try to display an object in Jupyter (or lots of other places in Python) it will call `__repr__`.
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=1647)
class SloppyAdder():
def __init__(self,o): self.o=o
def __add__(self,b): return SloppyAdder(self.o + b.o + 0.01)
def __repr__(self): return str(self.o)
a = SloppyAdder(1)
b = SloppyAdder(2)
a+b
# Special methods you should probably know about (see data model link above) are:
#
# - `__getitem__`
# - `__getattr__`
# - `__setattr__`
# - `__del__`
# - `__init__`
# - `__new__`
# - `__enter__`
# - `__exit__`
# - `__len__`
# - `__repr__`
# - `__str__`
# ## Variance and stuff
# ### Variance
# Variance is the average of how far away each data point is from the mean. E.g.:
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=2133)
t = torch.tensor([1.,2.,4.,18])
m = t.mean(); m
(t-m).mean()
# Oops. We can't do that. Because by definition the positives and negatives cancel out. So we can fix that in one of (at least) two ways:
(t-m).pow(2).mean()
(t-m).abs().mean()
# But the first of these is now a totally different scale, since we squared. So let's undo that at the end.
(t-m).pow(2).mean().sqrt()
# They're still different. Why?
#
# Note that we have one outlier (`18`). In the version where we square everything, it makes that much bigger than everything else.
#
# `(t-m).pow(2).mean()` is refered to as **variance**. It's a measure of how spread out the data is, and is particularly sensitive to outliers.
#
# When we take the sqrt of the variance, we get the **standard deviation**. Since it's on the same kind of scale as the original data, it's generally more interpretable. However, since `sqrt(1)==1`, it doesn't much matter which we use when talking about *unit variance* for initializing neural nets.
#
# `(t-m).abs().mean()` is referred to as the **mean absolute deviation**. It isn't used nearly as much as it deserves to be, because mathematicians don't like how awkward it is to work with. But that shouldn't stop us, because we have computers and stuff.
#
# Here's a useful thing to note about variance:
(t-m).pow(2).mean(), (t*t).mean() - (m*m)
# You can see why these are equal if you want to work thru the algebra. Or not.
#
# But, what's important here is that the latter is generally much easier to work with. In particular, you only have to track two things: the sum of the data, and the sum of squares of the data. Whereas in the first form you actually have to go thru all the data twice (once to calculate the mean, once to calculate the differences).
#
# Let's go steal the LaTeX from [Wikipedia](https://en.wikipedia.org/wiki/Variance):
#
# $$\operatorname{E}\left[X^2 \right] - \operatorname{E}[X]^2$$
# ### Covariance and correlation
# Here's how Wikipedia defines covariance:
#
# $$\operatorname{cov}(X,Y) = \operatorname{E}{\big[(X - \operatorname{E}[X])(Y - \operatorname{E}[Y])\big]}$$
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=2414)
t
# Let's see that in code. So now we need two vectors.
# +
# `u` is twice `t`, plus a bit of randomness
u = t*2
u *= torch.randn_like(t)/10+0.95
plt.scatter(t, u);
# -
torch.randn_like(t)
prod = (t-t.mean())*(u-u.mean()); prod
prod.mean()
v = torch.randn_like(t)
plt.scatter(t, v);
((t-t.mean())*(v-v.mean())).mean()
# It's generally more conveniently defined like so:
#
# $$\operatorname{E}\left[X Y\right] - \operatorname{E}\left[X\right] \operatorname{E}\left[Y\right]$$
cov = (t*v).mean() - t.mean()*v.mean(); cov
# From now on, you're not allowed to look at an equation (or especially type it in LaTeX) without also typing it in Python and actually calculating some values. Ideally, you should also plot some values.
#
# Finally, here is the Pearson correlation coefficient:
#
# $$\rho_{X,Y}= \frac{\operatorname{cov}(X,Y)}{\sigma_X \sigma_Y}$$
cov / (t.std() * v.std())
# It's just a scaled version of the same thing. Question: *Why is it scaled by standard deviation, and not by variance or mean or something else?*
# ## Softmax
# Here's our final `logsoftmax` definition:
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=2674)
def log_softmax(x): return x - x.exp().sum(-1,keepdim=True).log()
# which is:
#
# $$\hbox{logsoftmax(x)}_{i} = x_{i} - \log \sum_{j} e^{x_{j}}$$
#
# And our cross entropy loss is:
# $$-\log(p_{i})$$
# ## Browsing source code
# [Jump_to lesson 10 video](https://course.fast.ai/videos/?lesson=10&t=1782)
# - Jump to tag/symbol by with (with completions)
# - Jump to current tag
# - Jump to library tags
# - Go back
# - Search
# - Outlining / folding
|
nbs/dl2/05a_foundations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from model import learn_opinion_dynamics
u_v_t_weights = ([0, 1, 0, 1], [1, 0, 1, 1])
v_a_t_weights = ([0, 0, 0, 1], [2, 1, 0, 1], [0, 0, 1, 1], [2, 1, 1, 1])
res = learn_opinion_dynamics(N=3, Q=2, T=2, u_v_t_weights=u_v_t_weights, v_a_t_weights=v_a_t_weights)
print(res.w)
res.X
|
Reddit_Analysis/Learnable Opinions on Networks/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
import pandas as pd
import numpy as np
# ## Definitions
def assign_curve_extra_pts(unified_info, course_name, curve_extra_pts, grade_to_get_pts):
unified_courses_with_extra_pts = unified_info.loc[unified_info['Course Name'] == course_name]
unified_courses_with_extra_pts['Extra Points'] = np.where(unified_courses_with_extra_pts['Grade'].between(0, grade_to_get_pts, inclusive=True), curve_extra_pts, 0)
return unified_courses_with_extra_pts
def merging_df(dataframes_list):
merged_df = pd.concat(dataframes_list)
print(merged_df.shape)
print(merged_df.tail(10))
return merged_df
def xcolumnlookup(lookup_value, lookup_array, return_array, if_not_found:str = ''):
match_value = return_array.loc[lookup_array == lookup_value]
if match_value.empty:
return "Not Found" if if_not_found == '' else if_not_found
else:
return match_value.tolist()[0]
# ## Import information
unified_courses_df = pd.read_csv("./_output/0_unified_courses_data.csv", header='infer')
output = {}
output['Columns'] = unified_courses_df.columns.values.tolist()
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output)
output = {}
output['Columns'] = unified_courses_df.dtypes
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output)
unified_courses_df.head(10)
unified_courses_df.shape
# ## Calculations
math_with_extra_pts = assign_curve_extra_pts(unified_courses_df, "Math", 10, 70)
math_with_extra_pts.head(10)
chem_with_extra_pts = assign_curve_extra_pts(unified_courses_df, "Chemistry", 15, 60)
chem_with_extra_pts.head(10)
bio_with_extra_pts = assign_curve_extra_pts(unified_courses_df, "Biology", 5, 50)
bio_with_extra_pts.head(10)
# ## Merging Data
courses_with_extra = [bio_with_extra_pts, math_with_extra_pts, chem_with_extra_pts]
unified_extra_pts = merging_df(courses_with_extra)
# ## Califications Summarize
unified_extra_pts.shape
calif_sum = unified_extra_pts.groupby('Student Contact',as_index=False).agg({'Course Name':'first', 'Grade':'sum'})
calif_sum.shape
calif_sum.head(3)
### Adding extra points
calif_sum['Grade + Extra'] = calif_sum['Grade'] + unified_extra_pts['Extra Points']
calif_sum.head(10)
unified_extra_pts.head(10)
unified_extra_pts_no_duplicates = unified_extra_pts.copy()
unified_extra_pts_no_duplicates.shape
unified_extra_pts_no_duplicates.drop(columns=['Grade', 'Course Name', 'Extra Points'], axis=1, inplace=True)
unified_extra_pts_no_duplicates.shape
unified_extra_pts_no_duplicates = unified_extra_pts_no_duplicates.reset_index(drop=True)
unified_extra_pts_no_duplicates.head(10)
output = {}
output['Types'] = unified_extra_pts_no_duplicates.dtypes
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output)
output = {}
output['Columns'] = unified_extra_pts_no_duplicates.columns.values.tolist()
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output)
unified_extra_pts_no_duplicates.drop_duplicates(subset=['Student Name '])
unified_extra_pts_no_duplicates.shape
unified_extra_pts_no_duplicates.head(10)
calif_sum['Student Name'] = calif_sum['Student Contact'].apply(xcolumnlookup, args= (unified_extra_pts['Student Contact'], unified_extra_pts['Student Name ']))
calif_sum.head(13)
reordered_df = calif_sum[['Student Name','Student Contact', 'Course Name', 'Grade', 'Grade + Extra']]
reordered_df.head(15)
# ## Exporting Data
reordered_df.to_csv(path_or_buf='./_output/sum_grades.csv', index=False)
# ## END
|
2_data_wrangling_college_grades_e.g/1_Courses_Pts_Summarize_Grades.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Receiver operating characteristic curves
#
# Suppose that we have a binary classification task, in which we have to map a 'sample' $\mathbf{x}$ to a class $y \in \{ 0, 1\}$. We may use a machine learning method to learn a function $\hat{y} = h(\mathbf{x})$.
#
# How can we determine how good a binary classifier is at its task?
#
# One way in which one could do this is to simply take the percentage of times that the classifier is correct. So, if we apply the function $h$ to a test set of samples $\mathcal{X}$ with cardinality $N$, we could calculate the performance $p$:
#
# $p = 100 \frac{\left( \sum_{i=1}^{N} \delta(y_i^*, \hat{y_i}) \right)}{N}$,
#
# where $\delta$ is the Kronecker delta that returns 1 if its arguments are identical and 0 otherwise, and $y_i^*$ is the ground-truth class of sample $\mathbf{x}_i \in \mathcal{X}$. The main problem with this measure is that it may be very easy to obtain a really good performance if the classes $\{ 0, 1\}$ are _skewed_. For instance, if the classifier is to classify an image patch as containing a face or not, typically the chance that there is a face is much smaller than that there is no face in the patch. This means that a very high performance can be obtained just by means of always classifying the patch as having no face.
#
# In order to deal with potentially skewed classes, one could split the evaluation for each class. How many of the positive samples ($y=1$) are actually classified as positive ($\hat{y}=1$)? This ratio is called the _true positive ratio_, $\mathrm{TPR}$.
#
# $\mathrm{TPR} = \left( \sum_{i=1}^{M} \delta(\hat{y_i}, 1) \right) / M$,
#
# where $M$ is the number of positive samples and only the positive samples $\mathbf{x_i} \in \mathcal{X}_{\mathrm{P}}$ are evaluated. Knowing the true positive ratio also provides one with the number of positive samples that were wrongly classified as negatives, i.e., the _false negative ratio_ $\mathrm{FNR} = 1 - \mathrm{TPR}$. A similar reasoning can be applied to the negative samples, where we typically determine the ratio of negative samples that were wrongly classified as positive, i.e., the _false positive ratio_ $\mathrm{FPR}$:
#
# $\mathrm{FPR} = \left( \sum_{i=1}^{L} \delta(\hat{y_i}, 1) \right) / L$,
#
# where $L$ is the number of negative samples and only the negative samples $\mathbf{x_i} \in \mathcal{X}_{\mathrm{N}}$ are evaluated. Here, one can obtain the _true negative ratio_ simply by calculating $\mathrm{TNR} = 1- \mathrm{FPR}$.
#
# Calculating the true positive ratio and false positive ratio already gives a much better idea of the performance of a classifier than just the ratio of correct samples. Still, it remains hard to compare two classifiers $h_1$ and $h_2$. For instance, what classifier is better if $\mathrm{TPR}_1 = 0.9$, $\mathrm{FPR}_1 = 0.2$, $\mathrm{TPR}_2 = 0.8$, and $\mathrm{FPR}_2 = 0.01$? One will likely choose between these two classifiers based on the _costs_ of false negatives and false positives. If a false negative means that a drone will not detect a face and may fly into a person, then a false negative may be more costly than a false positive that would just result in the drone stopping more often. One would then probably pick classifier 1, as it has fewer false negatives.
#
# Still, in order to compare the two classifiers in a fair way, it would be ideal if they could be tested at the same operating point, e.g., the same $\mathrm{TPR}$ or the same $\mathrm{FPR}$. This is actually possible, if the function $h$ originally outputs continuous values, and $\hat{y}$ is based on thresholding this function:
#
# $\hat{y_i} = \delta \left( h(\mathbf{x}_i) > \tau, \mathrm{true} \right) $,
#
# with $h$ a continuous function and $\tau$ the threshold. In that case, we can test the function $h$ at different operating points by varying the threshold $\tau$. If it is lower than the minimal $h(\mathbf{x}_i)$, all samples will be classified as positive, so $\mathrm{TPR}=1$ and $\mathrm{FPR}=1$. If it is higher than the maximal $h(\mathbf{x}_i)$, all samples will be classified as negative, so $\mathrm{TPR}=0$ and $\mathrm{FPR}=0$. Varying the threshold between these two extremes will lead to a _receiver operating characteristic curve_ (ROC curve). Making an ROC curve for two classifiers allows to better compare them at different operating points.
# ## How to make an ROC curve
#
# We can make an ROC curve for any continuous function $h$, be it simple like the norm ($h = \lvert \rvert \mathbf{x_i} \lvert \rvert^2$) or complex like a deep neural network. A naive approach, then, to making an ROC curve is to vary the threshold $\tau$ from the lowest possible value of $h$ to the highest possible value of $h$ (if both are applicable), by making fixed small steps. However, typically, we make an ROC curve with the help of a dataset $\mathcal{X}$. It is then more efficient to evaluate $h$ on all the samples in the data set, sort the values $\hat{y_i}$ while keeping track of their ground-truths $y^*_i$, and then set the threshold $\tau$ to all the values that actually occur in the dataset. Then we have a maximum resolution for the ROC curve, while performing a minimum number of evaluations. This way of making a ROC curve is used in the function `get_ROC_curve` in `ROC.py` <A HREF="https://github.com/guidoAI/ROC_notebook/blob/master/ROC.py" TARGET="_blank">(link to file)</A>.
#
# In this notebook, you will be constructing a ROC curve based on a tiny dataset for the task of _sky segmentation_ (used in [2] for autonomous obstacle avoidance). The tiny dataset consists of one image, in which the pixels have been classified as either 'sky' (positive) or 'ground' (negative).
#
# Please run the code below to see the image and its classification.
#
# %matplotlib inline
import ROC
im_name = 'CroppedImage.bmp';
segmentation_name = 'TreeSegmentation.bmp';
RGB, Cl, x, y = ROC.get_images_and_grid(im_name, segmentation_name);
# <font color='red'><B>Exercise 1.</B></font>
# The last line of the code above: `RGB, Cl, x, y = ROC.get_images_and_grid(im_name, segmentation_name);` returned the following elements: an RGB image `RGB`, the classifications in image format `Cl`, and the $x$- and $y$-coordinates in the image (`x` and `y`, as arrays). You are going to use these outputs of the function in order to construct your own 'classifiers' and determine the ROC curves for these classifiers.
#
# 1. Run the code below. Can you change the code (by using other channels of `RGB`, or by using `x` or `y`, etc.) so that the ROC curve becomes really good? Why is it good do you think?
# 2. Can you change the code so that the ROC curve becomes really bad? Why is it so bad do you think?
#
# +
import matplotlib.pyplot as plt
# Change ONLY the following line:
Values = RGB[:,:,0];
# make it a flat list of values, in the right order:
Values = Values.flatten();
# get the ROC curve:
TP, FP = ROC.get_ROC_curve(Values, Cl);
# plot the ROC curve:
plt.figure();
plt.plot(FP, TP, 'b');
plt.ylabel('TP');
plt.xlabel('FP');
# -
# ## References
#
# [1] <NAME>. (2006). An introduction to ROC analysis. Pattern recognition letters, 27(8), 861-874. – Sections 1-5 are relevant to the course.
#
# [2] <NAME>., <NAME>., <NAME>., & <NAME>. (2011, March). Sky segmentation approach to obstacle avoidance. In Aerospace Conference, 2011 IEEE (pp. 1-16). IEEE.
# ## Answers
#
# Exercise 1.
#
# 1. One can change the line to: ``Values = RGB[:,:,2];``. This gives really good results, as the 3rd channel (index = 2) is the blue channel, and in the given image the sky is blue. So the feature used for thresholding then corresponds well to the concept of the class 'sky'. Another pretty good ROC curve is given by ``Values = -y;``. The reason for this is that in the given image, the sky is in the upper part of the image, so the (negative) y-coordinate captures very well the concept of sky in this limited data set.
#
# 2. One can change the line to ``Values = x;``. This gives really bad results, as the x coordinate does not allow for any reasonable threshold. The sky is both left and right in the image, so the x-coordinate does not capture very well the concept of sky.
|
ROC.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="SsSUG2ffdRIC"
# # Packages and constants <a class="anchor" id="packages-constants"></a>
# + id="hZwNtGD2dRIC" executionInfo={"status": "ok", "timestamp": 1626829721860, "user_tz": 420, "elapsed": 3412, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# Install packages
# !pip install tellurium -q
# Import packages
import tellurium as te # Python-based modeling environment for kinetic models
# + [markdown] id="SxAiizgmIHYL"
# # Demo
# + id="3ygZTcCpohUZ" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1626829724167, "user_tz": 420, "elapsed": 2311, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="4f0a9452-7c7b-411f-b46a-fdf49ff85ab5"
r = te.loads("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000882.7?filename=munz2000.xml")
r.simulate()
r.plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="WiQGzISPnDih" executionInfo={"status": "ok", "timestamp": 1626829781237, "user_tz": 420, "elapsed": 2392, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="becdbae5-93ce-4eb4-f7ed-a7c451e0e417"
r = te.loads("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000882.7?filename=munz2000.xml")
r.Zombie = 0
r.simulate()
r.plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="BK-ZIH5BnJnu" executionInfo={"status": "ok", "timestamp": 1626830194950, "user_tz": 420, "elapsed": 3649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="fc486519-3a3e-4915-8187-abcf9810cbb8"
r = te.loads("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000882.7?filename=munz2000.xml")
r.Zombie = 0
r.simulate(0, 20)
r.plot()
r.reset()
r.simulate(0, 200)
r.plot()
r.reset()
r.simulate(0, 2000)
r.plot()
r.reset()
r.simulate(0, 20000)
r.plot()
r.reset()
# + colab={"base_uri": "https://localhost:8080/"} id="MjMtMHtIuM_6" executionInfo={"status": "ok", "timestamp": 1626829724168, "user_tz": 420, "elapsed": 29, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="8cdae978-20af-42fe-c607-d95096af9493"
print(r.getAntimony())
# + id="E6bsyWCGuQZ7" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1626829724168, "user_tz": 420, "elapsed": 18, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="0a31364f-93d6-4cd3-a2d3-6fe815802bd6"
import libsbml
sbmldoc = libsbml.readSBMLFromString(r.getSBML())
props = libsbml.ConversionProperties()
props.addOption("expandFunctionDefinitions", True)
if sbmldoc.convert(props) != libsbml.LIBSBML_OPERATION_SUCCESS:
print("[Error] Conversion failed...")
sys.exit(1)
newsbml = libsbml.writeSBMLToString(sbmldoc)
r = te.loads(newsbml)
print(r.getAntimony())
# + colab={"base_uri": "https://localhost:8080/", "height": 154} id="SvSkZ8fOLdlX" executionInfo={"status": "ok", "timestamp": 1626829724169, "user_tz": 420, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="e22c6471-4603-49a2-dd92-b48dd41c9e43"
newsbml
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="z8-SadiHLv33" executionInfo={"status": "ok", "timestamp": 1626829724711, "user_tz": 420, "elapsed": 549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="deb3f692-35be-4e03-8851-f3977ee1c1fc"
antstr = """
model *New_Model()
// Compartments and Species:
compartment compartment_;
species Susceptible in compartment_, Zombie in compartment_, Removal in compartment_;
// Reactions:
Birth: => Susceptible; compartment_*p;
Zombification: => Zombie; compartment_*(beta*Susceptible*Zombie + zeta*Removal);
Susceptible_to_Zombie_or_death: Susceptible => ; compartment_*(beta*Susceptible*Zombie + delta*Susceptible);
Removal_of_zombie_or_susceptible: => Removal; compartment_*(alpha*Susceptible*Zombie + delta*Susceptible);
Resurrection_to_Zombie: Removal => ; compartment_*zeta*Removal;
Death_from_Zombie: Zombie => ; compartment_*alpha*Susceptible*Zombie;
// Species initializations:
Susceptible = 500;
Zombie = 1;
Removal = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
alpha = 0.005;
beta = 0.0095;
delta = 0.0001;
zeta = 0.0001;
p = 0.05;
// Other declarations:
const compartment_, alpha, beta, delta, zeta, p;
// Unit definitions:
unit volume = 1e-3 litre;
unit time_unit = 86400 second;
unit substance = 1e-3 mole;
// Display Names:
time_unit is "time";
Susceptible_to_Zombie_or_death is "Susceptible to Zombie or death";
Removal_of_zombie_or_susceptible is "Removal of zombie or susceptible";
Resurrection_to_Zombie is "Resurrection to Zombie";
Death_from_Zombie is "Death from Zombie";
// CV terms:
Susceptible instance "http://identifiers.org/ncit/C16505"
Removal instance "http://identifiers.org/ncit/C64914"
Birth property "http://identifiers.org/ncit/C25155"
Susceptible_to_Zombie_or_death property "http://identifiers.org/ncit/C16505"
Removal_of_zombie_or_susceptible property "http://identifiers.org/ncit/C64914"
Resurrection_to_Zombie property "http://identifiers.org/ncit/C37987"
Death_from_Zombie instance "http://identifiers.org/go/GO:0016265"
end
"""
r = te.loada(antstr)
r.simulate()
r.plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="ZEnWP6kYMIK_" executionInfo={"status": "ok", "timestamp": 1626829725302, "user_tz": 420, "elapsed": 599, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="26e176fd-aaa5-429d-d619-8d363aff61bb"
antstr = """
model *New_Model()
// Reactions:
Birth: => Susceptible; p;
Zombification: => Zombie;(beta*Susceptible*Zombie + zeta*Removal);
Susceptible_to_Zombie_or_death: Susceptible => ; (beta*Susceptible*Zombie + delta*Susceptible);
Removal_of_zombie_or_susceptible: => Removal; (alpha*Susceptible*Zombie + delta*Susceptible);
Resurrection_to_Zombie: Removal => ; zeta*Removal;
Death_from_Zombie: Zombie => ; alpha*Susceptible*Zombie;
// Species initializations:
Susceptible = 500;
Zombie = 1;
Removal = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
alpha = 0.005;
beta = 0.0095;
delta = 0.0001;
zeta = 0.0001;
p = 0.05;
// Other declarations:
const compartment_, alpha, beta, delta, zeta, p;
// Unit definitions:
unit volume = 1e-3 litre;
unit time_unit = 86400 second;
unit substance = 1e-3 mole;
// Display Names:
time_unit is "time";
Susceptible_to_Zombie_or_death is "Susceptible to Zombie or death";
Removal_of_zombie_or_susceptible is "Removal of zombie or susceptible";
Resurrection_to_Zombie is "Resurrection to Zombie";
Death_from_Zombie is "Death from Zombie";
// CV terms:
Susceptible instance "http://identifiers.org/ncit/C16505"
Removal instance "http://identifiers.org/ncit/C64914"
Birth property "http://identifiers.org/ncit/C25155"
Susceptible_to_Zombie_or_death property "http://identifiers.org/ncit/C16505"
Removal_of_zombie_or_susceptible property "http://identifiers.org/ncit/C64914"
Resurrection_to_Zombie property "http://identifiers.org/ncit/C37987"
Death_from_Zombie instance "http://identifiers.org/go/GO:0016265"
end
"""
r = te.loada(antstr)
r.simulate()
r.plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Sjj3a6LwMdgv" executionInfo={"status": "ok", "timestamp": 1626830951041, "user_tz": 420, "elapsed": 1143, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="a307d8a7-e09f-4795-f909-a9e4e85b1e92"
antstr = """
model *New_Model()
// Reactions:
//Birth: => Susceptible; p;
//Zombification: => Zombie;(beta*Susceptible*Zombie + zeta*Removal);
//Susceptible_to_Zombie_or_death: Susceptible => ; (beta*Susceptible*Zombie + delta*Susceptible);
//Removal_of_zombie_or_susceptible: => Removal; (alpha*Susceptible*Zombie + delta*Susceptible);
//Resurrection_to_Zombie: Removal => ; zeta*Removal;
//Death_from_Zombie: Zombie => ; alpha*Susceptible*Zombie;
Birth: => Susceptible; p
Death: Susceptible => Removal; delta*Susceptible
Infection: Susceptible => Infected; beta*Susceptible*Zombie
Zombification: Infected => Zombie; rho*Infected
Inf_death: Infected => Removal; delta * Infected
Zombie_death_and_res: Zombie => Removal; alpha * Susceptible * Zombie - zeta * Removal
Cure: Zombie => Susceptible; cure * Zombie
// Species initializations:
Susceptible = 0;
Zombie = 1;
Removal = 0;
Infected = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
alpha = 0.005;
beta = 0.0095;
delta = 0.0001;
zeta = 0.0001;
rho = 0.01
p = 0.05;
cure = 0.00003
// Other declarations:
const compartment_, alpha, beta, delta, zeta, p;
// Unit definitions:
unit volume = 1e-3 litre;
unit time_unit = 86400 second;
unit substance = 1e-3 mole;
end
"""
r = te.loada(antstr)
r.simulate(0, 20)
r.plot()
r.reset()
r.simulate(0, 200)
r.plot()
r.reset()
r.simulate(0, 2000)
r.plot()
r.reset()
r.simulate(0, 20000)
r.plot()
# + id="G2xrVSCOMqSd" executionInfo={"status": "ok", "timestamp": 1626829725304, "user_tz": 420, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="pBunmUSHrwqA" executionInfo={"status": "ok", "timestamp": 1626831062578, "user_tz": 420, "elapsed": 1179, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="6e054874-e6d3-4e45-e720-a620289d9777"
antstr = """
model *New_Model()
// Reactions:
//Birth: => Susceptible; p;
//Zombification: => Zombie;(beta*Susceptible*Zombie + zeta*Removal);
//Susceptible_to_Zombie_or_death: Susceptible => ; (beta*Susceptible*Zombie + delta*Susceptible);
//Removal_of_zombie_or_susceptible: => Removal; (alpha*Susceptible*Zombie + delta*Susceptible);
//Resurrection_to_Zombie: Removal => ; zeta*Removal;
//Death_from_Zombie: Zombie => ; alpha*Susceptible*Zombie;
Birth: => Susceptible; p*Susceptible/500
Death: Susceptible => Removal; delta*Susceptible
Infection: Susceptible => Infected; beta*Susceptible*Zombie
Zombification: Infected => Zombie; rho*Infected
Inf_death: Infected => Removal; delta * Infected
Zombie_death_and_res: Zombie => Removal; alpha * Susceptible * Zombie - zeta * Removal
Cure: Zombie => Susceptible; cure * Zombie
// Species initializations:
Susceptible = 500;
Zombie = 1;
Removal = 0;
Infected = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
alpha = 0.005;
beta = 0.0095;
delta = 0.0001;
zeta = 0.0001;
rho = 0.01
p = 0.05;
cure = 0.00003
// Other declarations:
const compartment_, alpha, beta, delta, zeta, p;
// Unit definitions:
unit volume = 1e-3 litre;
unit time_unit = 86400 second;
unit substance = 1e-3 mole;
end
"""
r = te.loada(antstr)
r.simulate(0, 20)
r.plot()
r.reset()
r.simulate(0, 200)
r.plot()
r.reset()
r.simulate(0, 2000)
r.plot()
r.reset()
r.simulate(0, 20000)
r.plot()
# + id="99WdiinXr4ch"
|
archived_lectures/Network-Modeling-Summer-School-2021/Model-Repositories-and-Source-Control/2) Biomodels-Demo-last night.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
## Python course for data scientists in a moderate level, supported by Wagatsuma Lab@Kyutech
#
# The MIT License (MIT): Copyright (c) 2022 <NAME> and Wagatsuma Lab@Kyutech
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#
# # @Time : 2022-03-27
# # @Author : <NAME>
# # @Site : https://github.com/hirowgit/2B3_python_owl_logic_database_course
# # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin
# # @File : lec1_step6.py
# +
# Practice 2-3 (page 24/28)
# https://www.slideshare.net/tadahirotaniguchi0624/2-46861654
# -
TargetGraph={
'S':['A','B'],
'A':['S','C','D'],
'B':['S','C'],
'C':['A','B','D'],
'D':['A','C']
# 'G':'unknown now
}
OpenList=['S']
ClosedList=[]
while OpenList:
state=OpenList[0]
del OpenList[0]
ClosedList.append(state)
print(state)
if state=='G':
break
activeNodes=[item for item in TargetGraph[state] if item not in ClosedList]
# OpenList.insert(-1, activeNodes) # the first item
OpenList.append(activeNodes) # the last item
OpenList=[item for i in OpenList for item in i if item not in ClosedList]
print('completed')
TargetGraph={
'A':['B','C'],
'B':['A','D','E'],
'C':['A','F','G','H'],
'D':['B','I'],
'E':['B'],
'F':['C'],
'G':['C','J'],
'H':['C'],
'I':['D'],
'J':['G']
# 'G':'unknown now
}
OpenList=['A']
ClosedList=[]
k=1
while OpenList:
state=OpenList[0]
del OpenList[0]
ClosedList.append(state)
print(str(k)+": "+state)
if state=='Goal':
break
activeNodes=[item for item in TargetGraph[state] if item not in ClosedList]
OpenList.append(activeNodes) # the last item
OpenList=[item for i in OpenList for item in i if item not in ClosedList]
k=k+1
print('completed')
|
.ipynb_checkpoints/lec1_step6-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DugguAditya/dmdwassignment/blob/main/Assignment_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="dns3S7FMIL0O" outputId="98489d25-fbeb-4cb6-fde6-064eb7891350"
import pandas as pd
path="https://raw.githubusercontent.com/DugguAditya/dmdwassignment/main/Toyota.csv"
data =pd.read_csv(path)
data
# + colab={"base_uri": "https://localhost:8080/"} id="mYJjeUKKL3QU" outputId="9b08abf7-b6e2-407e-efdb-e3058a45553c"
type(data)
# + colab={"base_uri": "https://localhost:8080/"} id="bdWQt9C0MLXz" outputId="990afb41-38d9-4f68-983b-a3489dea5ac0"
data.shape
# + colab={"base_uri": "https://localhost:8080/"} id="4YBTLonJMT7W" outputId="d695004e-3c78-4f52-8dc7-075fa90d0124"
data.info()
# + colab={"base_uri": "https://localhost:8080/"} id="DxLIrLFaMYnn" outputId="aea6c622-ffb2-4397-eb58-f18600d77eb7"
data.index
# + colab={"base_uri": "https://localhost:8080/"} id="4hxHyPqjMei_" outputId="53fff9b2-a599-4832-a608-80da1d161ff4"
data.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="nSwRruc3MnKr" outputId="7ec1483e-8902-4843-c329-3eea107808cc"
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="BPmN7QY9NUbY" outputId="41694e9f-e0e4-42c2-cc9e-cc5bdc4d6aee"
data.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="TNw5MFkONbcP" outputId="f27f034a-b194-4cd7-b02b-cec630b7ef83"
data.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="Z28xpUC8Ner0" outputId="223c9c2e-e50d-43ce-b3d8-ff57dc45645f"
data[['Price',"Age"]].head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="UMYXjIaYNiOx" outputId="800bbe5d-a8c2-4bf9-f699-10667c63fb75"
data.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="xM775VlDNlP1" outputId="0675bdcb-f219-408c-94aa-1625df9e6695"
data.dropna(inplace=True)
data.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="aBVhYshONoZR" outputId="38992fac-248e-482d-87ad-5d4a02d13d72"
data.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="qJrfL2_tOE0b" outputId="ea853212-4070-414f-cc9b-a606011215c0"
data.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="Yv7orWylPY0M" outputId="8c354c1b-b6b2-4516-edb0-67c9d5d71d38"
data['MetColor'].mean()
# + colab={"base_uri": "https://localhost:8080/"} id="_uvQJksrPi39" outputId="86259996-8bf7-40da-8d21-eb5905e23168"
data['MetColor'].head()
# + colab={"base_uri": "https://localhost:8080/"} id="SuF4D_c6PuLU" outputId="b12ea5d4-5a24-48df-abd2-2e21828f0125"
import numpy as np
data['MetColor'].replace(np.NaN,data['MetColor'].mean()).head()
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="LH8nilaSQQ9t" outputId="7613ba43-70fb-4c99-eb76-2b7e36a90b76"
data.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="jO9wxiOdQUkE" outputId="8fda0dc0-e4a8-4b23-9ecc-adaac33cf811"
data['CC'].mean()
# + colab={"base_uri": "https://localhost:8080/"} id="7ydBlnG0QaTL" outputId="3ecf64de-5f30-4b44-d597-9189eb940892"
data['CC'].head()
# + colab={"base_uri": "https://localhost:8080/", "height": 639} id="xIwZwo2eQgqo" outputId="687471e5-dcd7-4fbc-971b-3b17da90d388"
data[['Age',"KM"]].head(20)
# + id="CbsmQvniQnEv"
|
Assignment_3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Array creation routines
# ## Ones and zeros
import numpy as np
# Create a new array of 2*2 integers, without initializing entries.
np.empty([2,2], int)
# Let X = np.array([1,2,3], [4,5,6], np.int32).
# Create a new array with the same shape and type as X.
X = np.array([[1,2,3], [4,5,6]], np.int32)
np.empty_like(X)
# Create a 3-D array with ones on the diagonal and zeros elsewhere.
np.eye(3)
np.identity(3)
# Create a new array of 3*2 float numbers, filled with ones.
np.ones([3,2], float)
# Let x = np.arange(4, dtype=np.int64). Create an array of ones with the same shape and type as X.
x = np.arange(4, dtype=np.int64)
np.ones_like(x)
# Create a new array of 3*2 float numbers, filled with zeros.
np.zeros((3,2), float)
# Let x = np.arange(4, dtype=np.int64). Create an array of zeros with the same shape and type as X.
x = np.arange(4, dtype=np.int64)
np.zeros_like(x)
# Create a new array of 2*5 uints, filled with 6.
np.full((2, 5), 6, dtype=np.uint)
np.ones([2, 5], dtype=np.uint) * 6
# Let x = np.arange(4, dtype=np.int64). Create an array of 6's with the same shape and type as X.
x = np.arange(4, dtype=np.int64)
np.full_like(x, 6)
np.ones_like(x) * 6
# ## From existing data
# Create an array of [1, 2, 3].
np.array([1, 2, 3])
# Let x = [1, 2]. Convert it into an array.
x = [1,2]
np.asarray(x)
# Let X = np.array([[1, 2], [3, 4]]). Convert it into a matrix.
X = np.array([[1, 2], [3, 4]])
np.asmatrix(X)
# Let x = [1, 2]. Conver it into an array of `float`.
x = [1, 2]
np.asfarray(x)
np.asarray(x, float)
# Let x = np.array([30]). Convert it into scalar of its single element, i.e. 30.
x = np.array([30])
np.asscalar(x)
x[0]
# Let x = np.array([1, 2, 3]). Create a array copy of x, which has a different id from x.
x = np.array([1, 2, 3])
y = np.copy(x)
print (id(x), x)
print (id(y), y)
# ## Numerical ranges
# Create an array of 2, 4, 6, 8, ..., 100.
np.arange(2, 101, 2)
# Create a 1-D array of 50 evenly spaced elements between 3. and 10., inclusive.
np.linspace(3., 10, 50)
# Create a 1-D array of 50 element spaced evenly on a log scale between 3. and 10., exclusive.
np.logspace(3., 10., 50, endpoint=False)
# ## Building matrices
# Let X = np.array([[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]]).
# Get the diagonal of X, that is, [0, 5, 10].
X = np.array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]])
np.diag(X)
X.diagonal()
# Create a 2-D array whose diagonal equals [1, 2, 3, 4] and 0's elsewhere.
np.diagflat([1, 2, 3, 4])
# Create an array which looks like below.
# array([[ 0., 0., 0., 0., 0.],
# [ 1., 0., 0., 0., 0.],
# [ 1., 1., 0., 0., 0.]])
np.tri(3, 5, -1)
# Create an array which looks like below.
# array([[ 0, 0, 0],
# [ 4, 0, 0],
# [ 7, 8, 0],
# [10, 11, 12]])
np.tril(np.arange(1, 13).reshape(4, 3), -1)
# Create an array which looks like below. array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 0, 8, 9],
# [ 0, 0, 12]])
np.triu(np.arange(1, 13).reshape(4, 3), -1)
|
1_Array_creation_routines_Solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to TensorFlow Data Validation
#
#
# ## Learning Objectives
#
# 1. Review TFDV methods
# 2. Generate statistics
# 3. Visualize statistics
# 4. Infer a schema
# 5. Update a schema
#
#
#
# ## Introduction
# This lab is an introduction to TensorFlow Data Validation (TFDV), a key component of TensorFlow Extended. This lab serves as a foundation for understanding the features of TFDV and how it can help you understand, validate, and monitor your data.
#
# TFDV can be used for generating schemas and statistics about the distribution of every feature in the dataset. Such information is useful for comparing multiple datasets (e.g. training vs inference datasets) and reporting:
#
# Statistical differences in the features distribution
# TFDV also offers visualization capabilities for comparing datasets based on the Google PAIR Facets project.
#
# Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/labs/tfdv_basic_spending.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
# + [markdown] colab_type="text" id="AsHg6SD2nO1v"
# ### Import Libraries
# + [markdown] colab_type="text" id="AsHg6SD2nO1v"
# **Run the below cell. Restart the kernel (Kernel > Restart kernel > Restart).**
# **Re-run the below cell and proceed further.**
# -
# !pip install pyarrow==2.0.0
# !pip install numpy==1.19.2
# !pip install tensorflow-data-validation
# +
import pandas as pd
import tensorflow_data_validation as tfdv
import sys
import warnings
warnings.filterwarnings('ignore')
print('Installing TensorFlow Data Validation')
# !pip install -q tensorflow_data_validation[visualization]
print('TFDV version: {}'.format(tfdv.version.__version__))
# Confirm that we're using Python 3
assert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type'
# + [markdown] colab_type="text" id="Fnm6Mj3vTGLm"
# ### Load the Consumer Spending Dataset
#
# We will download our dataset from Google Cloud Storage. The columns in the dataset are:
#
# * 'Graduated': Whether or not the person is a college graduate
# * 'Work Experience': The number of years in the workforce
# * 'Family Size': The size of the family unit
# * 'Spending Score': The spending score for consumer spending
# -
# TODO
score_train = pd.read_csv('data/score_train.csv')
score_train.head()
# TODO
score_test = pd.read_csv('data/score_test.csv')
score_test.head()
score_train.info()
# #### Review the methods present in TFDV
# check methods present in tfdv
# TODO
[methods for methods in dir(tfdv)]
# ### Describing data with TFDV
# The usual workflow when using TFDV during training is as follows:
#
#
# 1. Generate statistics for the data
# 2. Use those statistics to generate a schema for each feature
# 3. Visualize the schema and statistics and manually inspect them
# 4. Update the schema if needed
#
# ### Compute and visualize statistics
#
# First we'll use [`tfdv.generate_statistics_from_csv`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv) to compute statistics for our training data. (ignore the snappy warnings)
#
# TFDV can compute descriptive [statistics](https://github.com/tensorflow/metadata/blob/v0.6.0/tensorflow_metadata/proto/v0/statistics.proto) that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions.
#
# Internally, TFDV uses [Apache Beam](https://beam.apache.org/)'s data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation.
#
# **NOTE: Compute statistics**
# * [tfdv.generate_statistics_from_csv](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv)
# * [tfdv.generate_statistics_from_dataframe](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_dataframe)
# * [tfdv.generate_statistics_from_tfrecord](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_tfrecord)
# #### Generate Statistics from a Pandas DataFrame
# Compute data statistics for the input pandas DataFrame.
# TODO
stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train)
# Now let's use [`tfdv.visualize_statistics`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics), which uses [Facets](https://pair-code.github.io/facets/) to create a succinct visualization of our training data:
#
# * Notice that numeric features and categorical features are visualized separately, and that charts are displayed showing the distributions for each feature.
# * Notice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature.
# * Notice that there are no examples with values for `pickup_census_tract`. This is an opportunity for dimensionality reduction!
# * Try clicking "expand" above the charts to change the display
# * Try hovering over bars in the charts to display bucket ranges and counts
# * Try switching between the log and linear scales, and notice how the log scale reveals much more detail about the `payment_type` categorical feature
# * Try selecting "quantiles" from the "Chart to show" menu, and hover over the markers to show the quantile percentages
# Visualize the input statistics using Facets.
# TODO
tfdv.visualize_statistics(stats)
# #### TFDV generates different types of statistics based on the type of features.
#
# **For numerical features, TFDV computes for every feature:**
# * Count of records
# * Number of missing (i.e. null values)
# * Histogram of values
# * Mean and standard deviation
# * Minimum and maximum values
# * Percentage of zero values
#
# **For categorical features, TFDV provides:**
# * Count of values
# * Percentage of missing values
# * Number of unique values
# * Average string length
# * Count for each label and its rank
# ### Let's compare the score_train and the score_test datasets
# +
train_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train)
test_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_test)
tfdv.visualize_statistics(
lhs_statistics=train_stats, lhs_name='TRAIN_DATASET',
rhs_statistics=test_stats, rhs_name='NEW_DATASET')
# + [markdown] colab_type="text" id="KVR02-y4V0uM"
# ### Infer a schema
#
# Now let's use [`tfdv.infer_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema) to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics.
#
# Getting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct.
# -
# #### Generating Schema
# Once statistics are generated, the next step is to generate a schema for our dataset. This schema will map each feature in the dataset to a type (float, bytes, etc.). Also define feature boundaries (min, max, distribution of values and missings, etc.).
#
# Link to infer schema
# https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema
#
# With TFDV, we generate schema from statistics using
# + colab={} colab_type="code" id="6LLkRJThVr9m"
# Infers schema from the input statistics.
# TODO
schema = tfdv.infer_schema(statistics=stats)
print(schema)
# -
# The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use [`tfdv.display_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/display_schema) to display the inferred schema so that we can review it.
tfdv.display_schema(schema=schema)
# #### TFDV provides a API to print a summary of each feature schema using
#
# In this visualization, the columns stand for:
#
# **Type** indicates the feature datatype.
#
# **Presence** indicates whether the feature must be present in 100% of examples (required) or not (optional).
#
# **Valency** indicates the number of values required per training example.
#
# **Domain and Values** indicates The feature domain and its values
#
# In the case of categorical features, single indicates that each training example must have exactly one category for the feature.
# ### Updating the Schema
# As stated above, **Presence** indicates whether the feature must be present in 100% of examples (required) or not (optional). Currently, all of our features except for our target label are shown as "optional". We need to make our features all required except for "Work Experience". We will need to update the schema.
# TFDV lets you update the schema according to your domain knowledge of the data if you are not satisfied by the auto-generated schema. We will update three use cases: Making a feature required, adding a value to a feature, and change a feature from a float to an integer.
# #### Change optional features to required.
# Update Family_Size from FLOAT to Int
Graduated_feature = tfdv.get_feature(schema, 'Graduated')
Graduated_feature.presence.min_fraction = 1.0
Profession_feature = tfdv.get_feature(schema, 'Profession')
Profession_feature.presence.min_fraction = 1.0
Family_Size_feature = tfdv.get_feature(schema, 'Family_Size')
Family_Size_feature.presence.min_fraction = 1.0
tfdv.display_schema(schema)
# #### Update a feature with a new value
# Let's add "self-employed" to the Profession feature
Profession_domain = tfdv.get_domain(schema, 'Profession')
Profession_domain.value.insert(0, 'Self-Employed')
Profession_domain.value
# [0 indicates I want 'Self-Employed to come first', if the number were 3,
# it would be placed after the third value. ]
# #### Let's remove "Homemaker" from "Profession"
Profession_domain = tfdv.get_domain(schema, 'Profession')
Profession_domain.value.remove('Homemaker')
Profession_domain.value
# #### Change a feature from a float to an integer
# Update Family_Size to Int
size = tfdv.get_feature(schema, 'Family_Size')
size.type=2
tfdv.display_schema(schema)
# In the next lab, you compare two datasets and check for anomalies.
# + [markdown] colab_type="text" id="b8eC59yISdGB"
# ## When to use TFDV
#
# It's easy to think of TFDV as only applying to the start of your training pipeline, as we did here, but in fact it has many uses. Here are a few more:
#
# * Validating new data for inference to make sure that we haven't suddenly started receiving bad features
# * Validating new data for inference to make sure that our model has trained on that part of the decision surface
# * Validating our data after we've transformed it and done feature engineering (probably using [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/)) to make sure we haven't done something wrong
# -
# https://github.com/GoogleCloudPlatform/mlops-on-gcp/blob/master/examples/tfdv-structured-data/tfdv-covertype.ipynb
|
courses/machine_learning/deepdive2/production_ml/solutions/tfdv_basic_spending.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 0. Pre-processing of inut data to allow correct selection of field names etc.
# +
import os
import ipywidgets as widgets
import pandas as pd
import geopandas as gpd
import matplotlib
from shapely.geometry import Polygon
# %matplotlib inline
pd.set_option("display.max_columns", 101)
# +
test_data_name='test_data3'
test_data_path='../'+test_data_name+'/'
os.chdir(test_data_path)
# %run -i "m2l_config.py"
# +
bbox=(minx,miny,maxx,maxy)
bbox2=str(minx)+","+str(miny)+","+str(maxx)+","+str(maxy)
lat_point_list = [miny, miny, maxy, maxy, maxy]
lon_point_list = [minx, maxx, maxx, minx, minx]
bbox_geom = Polygon(zip(lon_point_list, lat_point_list))
polygon = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom])
geology = gpd.read_file(geology_file,bbox=bbox)
lines=gpd.read_file(fault_file,bbox=bbox)
structures = gpd.read_file(structure_file,bbox=bbox)
base=geology.plot(column=c_l['c'],figsize=(10,10),edgecolor='#000000',linewidth=0.2)
structures.plot(ax=base, color='none',edgecolor='black')
lines.plot(ax=base,cmap='rainbow',column=c_l['f'],figsize=(10,10),linewidth=0.4)
polygon.plot(ax=base, color='none',edgecolor='black')
# -
display(geology.head(10))
display(lines.head(10))
display(structures.head(10))
layer=geology
w=widgets.ToggleButtons(
options=list(layer.columns.values),
description='Field:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
)
# +
display(w)
s=widgets.ToggleButtons(
options=list(geology[w.value].values),
description='Unique Values:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
)
# -
display(s)
print(s.value)
# +
from shapely.geometry import MultiPoint
warox_name='warox_points_3354_28350'
structure_file='http://geo.loop-gis.org/geoserver/loop/wfs?service=WFS&version=1.1.0&request=GetFeature&typeName='+warox_name+'&srs=EPSG%3A28350'
points = gpd.read_file(structure_file)
display(points.total_bounds)
# -
warox_name='for_ham_warox_28350'
warox_name='warox_2749_28350'
warox_name='warox_points_2954_28350'
warox_name='warox_points_3154_28350'
warox_name='warox_points_3155_28350'
warox_name='warox_points_3156_28350'
warox_name='warox_points_3252_28350'
warox_name='warox_points_3253_28350'
warox_name='warox_points_3254_28350'
warox_name='warox_points_3352_28350'
warox_name='warox_points_3353_28350'
warox_name='warox_points_3354_28350'
warox_name='warox_points_3451_28350'
warox_name='warox_points_3452_28350'
warox_name='warox_points_f5011'
warox_name='warox_points_f5105_28350'
warox_name='warox_points_f5110_28350'
warox_name='warox_points_h5101_28350'
warox_name='warox_points_h5109_28350'
warox_name='warox_wa_28350'
|
notebooks/0. Pre-processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (GIS)
# language: python
# name: gis
# ---
# +
import csv
import io
import os
import mercantile
import numpy as np
from tqdm import tqdm
from PIL import Image
from common.colors import make_palette
# +
def main(args):
if args.weights and len(args.probs) != len(args.weights):
sys.exit("Error: number of slippy map directories and weights must be the same")
tilesets = map(tiles_from_slippy_map, args.probs)
for tileset in tqdm(list(zip(*tilesets)), desc="Masks", unit="tile", ascii=True):
tiles = [tile for tile, _ in tileset]
paths = [path for _, path in tileset]
assert len(set(tiles)), "tilesets in sync"
x, y, z = tiles[0]
# Un-quantize the probabilities in [0,255] to floating point values in [0,1]
anchors = np.linspace(0, 1, 256)
def load(path):
# Note: assumes binary case and probability sums up to one.
# Needs to be in sync with how we store them in prediction.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
probs = [load(path) for path in paths]
mask = softvote(probs, axis=0, weights=args.weights)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
os.makedirs(os.path.join(args.masks, str(z), str(x)), exist_ok=True)
path = os.path.join(args.masks, str(z), str(x), str(y) + ".png")
out.save(path, optimize=True)
def softvote(probs, axis=0, weights=None):
"""Weighted average soft-voting to transform class probabilities into class indices.
Args:
probs: array-like probabilities to average.
axis: axis or axes along which to soft-vote.
weights: array-like for weighting probabilities.
Notes:
See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting
"""
return np.argmax(np.average(probs, axis=axis, weights=weights), axis=axis)
# -
# +
def tiles_from_directory(dir_path):
"""Loads files from a directory
Args:
root: the directory with files that have a name with aslippy map structure "z_x_y.*""
Yields:
The mercantile tiles and file paths
"""
def isdigit(v):
try:
_ = int(v) # noqa: F841
return True
except ValueError:
return False
print(dir_path)
for name in os.listdir(dir_path):
tile_name = os.path.splitext(name)[0]
tile_name = tile_name.split("_")
#print(tile_name)
if not isdigit(tile_name[0]):
continue
#print(tile_name)
x=int(tile_name[1])
y=int(tile_name[2])
z=int(tile_name[0])
tile = mercantile.Tile(x=x, y=y, z=z)
path = os.path.join(dir_path,"{}_{}_{}.{}".format(z,x,y,"png"))
#print(path)
yield tile, path
# -
path = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs'
#tiles_from_directory(path)
#tilesets = map(tiles_from_directory, probs_path)
#print(list(zip(*tilesets)))
#print(list(tiles_from_directory(probs_path)))
# +
def get_masks_from_probs(probs_path, masks_path):
"""generate masks from probabilities files
Args:
probs_path: the directory with probabilities files that have a name with aslippy map structure "z_x_y.*"
masks_path: directory where the masks will be saved
"""
#print(probs_path)
# tilesets = map(tiles_from_directory, probs_path)
# print(list(zip(*tilesets)))
os.makedirs(masks_path, exist_ok=True)
for tileset in tqdm(list(tiles_from_directory(probs_path)), desc="masks", unit="tile", ascii=True):
print(tileset)
tiles = tileset[0] #[tile for tile, _ in tileset]
print(tiles)
paths = tileset[1]#[path for _, path in tileset]
print(paths)
assert len(set(tiles)), "tilesets in sync"
x, y, z = tiles
# Un-quantize the probabilities in [0,255] to floating point values in [0,1]
anchors = np.linspace(0, 1, 256)
def load(path):
# Note: assumes binary case and probability sums up to one.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
#[load(path) for path in paths]
print(probs.shape)
mask = np.argmax(probs, axis=0)
print(mask.shape)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
path = os.path.join(masks_path,"{}_{}_{}.{}".format(z,x,y,"png"))
print(path)
#path = os.path.join(args.masks, str(z), str(x), str(y) + ".png")
out.save(path, optimize=True)
# -
probs_path = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/'
masks_path = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs5'
get_masks_from_probs(probs_path, masks_path)
# +
def get_masks_from_probs2(probs_path, masks_path,weights):
"""generate masks from probabilities files
Args:
probs_path: the directory with probabilities files that have a name with aslippy map structure "z_x_y.*"
masks_path: directory where the masks will be saved
"""
if weights and len(probs_path) != len(weights):
sys.exit("Error: number of slippy map directories and weights must be the same")
#print(probs_path)
tilesets = map(tiles_from_directory, probs_path)
#print(list(zip(*tilesets)))
os.makedirs(masks_path, exist_ok=True)
for tileset in tqdm(list(zip(*tilesets)), desc="masks", unit="tile", ascii=True):
tiles = [tile for tile, _ in tileset]
paths = [path for _, path in tileset]
assert len(set(tiles))==3, "tilesets in sync"
x, y, z = tiles[0]
#print(x)
#print(tiles[0])
# assert len(set(tiles)), "tilesets in sync"
#x, y, z = tiles
# Un-quantize the probabilities in [0,255] to floating point values in [0,1]
anchors = np.linspace(0, 1, 256)
def load(path):
# Note: assumes binary case and probability sums up to one.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
probs = [load(path) for path in paths]
mask = softvote(probs, axis=0, weights=weights)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
path = os.path.join(masks_path,"{}_{}_{}.{}".format(z,x,y,"png"))
#print(path)
#path = os.path.join(args.masks, str(z), str(x), str(y) + ".png")
out.save(path, optimize=True)
# def softvote(probs, axis=0, weights=None):
# """Weighted average soft-voting to transform class probabilities into class indices.
# Args:
# probs: array-like probabilities to average.
# axis: axis or axes along which to soft-vote.
# weights: array-like for weighting probabilities.
# Notes:
# See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting
# """
# return np.argmax(probs, axis=axis)
def softvote(probs, axis=0, weights=None):
"""Weighted average soft-voting to transform class probabilities into class indices.
Args:
probs: array-like probabilities to average.
axis: axis or axes along which to soft-vote.
weights: array-like for weighting probabilities.
Notes:
See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting
"""
return np.argmax(np.average(probs, axis=axis, weights=weights), axis=axis)
# +
probs_path2 = ['/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/',
'/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecece/probs/']
masks_path2 = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs4'
weights = [.85,.15]
# -
len(probs_path2)
get_masks_from_probs2(probs_path2, masks_path2,weights)
# +
import csv
import io
import os
import sys
import mercantile
import numpy as np
from tqdm import tqdm
from PIL import Image
from common.colors import make_palette
def get_masks_from_probs(probs_path, masks_path,weights=None):
"""generate masks from probabilities files
Args:
probs_path: list of the directory with probabilities files that have a name with aslippy map structure "z_x_y.png"
masks_path: directory where the masks will be saved
weights: array-like for weighting probabilities
"""
if weights and len(probs_path) != len(weights):
sys.exit("Error: number of probs directories and weights must be the same")
os.makedirs(masks_path, exist_ok=True)
print(len(probs_path))
tilesets = map(tiles_from_directory, probs_path)
list_tilesets = list(zip(*tilesets))
# if len(probs_path) == 1:
# tilesets = map(tiles_from_directory, probs_path)
# list_tilesets = list(zip(*tilesets))
# else:
# list_tilesets = list(tiles_from_directory(probs_path))
for tileset in tqdm(list_tilesets, desc="masks", unit="tile", ascii=True):
# normalize the probabilities in [0,255] to floating point values in [0,1]
anchors = np.linspace(0, 1, 256)
tiles = [tile for tile, _ in tileset]
paths = [path for _, path in tileset]
assert len(set(tiles)), "tilesets in sync"
x, y, z = tiles[0]
probs = [probs_load(path,anchors) for path in paths]
mask = softvote(probs, axis=0, weights=weights)
# if len(probs_path) == 1:
# tiles = [tile for tile, _ in tileset]
# paths = [path for _, path in tileset]
# assert len(set(tiles)), "tilesets in sync"
# x, y, z = tiles[0]
# probs = [probs_load(path,anchors) for path in paths]
# mask = softvote(probs, axis=0, weights=weights)
# else:
# tiles = tileset[0]
# paths = tileset[1]
# assert len(set(tiles)), "tilesets in sync"
# x, y, z = tiles
# probs = probs_load(paths, anchors)
# mask = np.argmax(probs, axis=0)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
path = os.path.join(masks_path,"{}_{}_{}.{}".format(z,x,y,"png"))
out.save(path, optimize=True)
def tiles_from_directory(dir_path):
"""Loads files from a directory
Args:
root: the directory with files that have a name with aslippy map structure "z_x_y.*""
Yields:
The mercantile tiles and file paths
"""
def isdigit(v):
try:
_ = int(v) # noqa: F841
return True
except ValueError:
return False
print(dir_path)
for name in os.listdir(dir_path):
tile_name = os.path.splitext(name)[0]
tile_name = tile_name.split("_")
if not isdigit(tile_name[0]):
continue
x=int(tile_name[1])
y=int(tile_name[2])
z=int(tile_name[0])
tile = mercantile.Tile(x=x, y=y, z=z)
path = os.path.join(dir_path,"{}_{}_{}.{}".format(z,x,y,"png"))
yield tile, path
def softvote(probs, axis=0, weights=None):
"""Weighted average soft-voting to transform class probabilities into class indices.
Args:
probs: array-like probabilities to average.
axis: axis or axes along which to soft-vote.
weights: array-like for weighting probabilities.
Notes:
See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting
"""
return np.argmax(np.average(probs, axis=axis, weights=weights), axis=axis)
def probs_load(path,anchors):
# Note: assumes binary case and probability sums up to one.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
# -
probs_path = ['/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/']
masks_path = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs5'
# +
#get_masks_from_probs(probs_path, masks_path)
# +
probs_path2 = ['/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/',
'/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecece/probs/']
masks_path2 = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs6'
weights = [.85,.15]
# +
#get_masks_from_probs(probs_path2, masks_path2,weights)
# -
def get_masks_from_probs2(probs_path, masks_path,weights=None, num_workers = 4):
"""generate masks from probabilities files
Args:
probs_path: list of the directory with probabilities files that have a name with aslippy map structure "z_x_y.png"
masks_path: directory where the masks will be saved
weights: array-like for weighting probabilities
"""
if weights and len(probs_path) != len(weights):
sys.exit("Error: number of probs directories and weights must be the same")
os.makedirs(masks_path, exist_ok=True)
tilesets = map(tiles_from_directory, probs_path)
list_tilesets = list(zip(*tilesets))
with futures.ThreadPoolExecutor(num_workers) as executor:
def worker(tileset):
anchors = np.linspace(0, 1, 256)
try:
tiles = [tile for tile, _ in tileset]
paths = [path for _, path in tileset]
assert len(set(tiles)), "tilesets ont sync"
except OSError:
return tiles, False
x, y, z = tiles[0]
probs = [probs_load(path,anchors) for path in paths]
mask = softvote(probs, axis=0, weights=weights)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
path = os.path.join(masks_path,"{}_{}_{}.{}".format(z,x,y,"png"))
out.save(path, optimize=True)
return tiles, True
for tiles, ok in executor.map(worker, list_tilesets):
if not ok:
print("Warning: {} tilesets ont sync".format(tiles), file=sys.stderr)
# +
import concurrent.futures as futures
probs_path2 = ['/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/',
'/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecece/probs/']
masks_path2 = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs7'
weights = [.85,.15]
get_masks_from_probs2(probs_path2, masks_path2,weights, num_workers=4)
# -
# # Test treshold on probs
def probs_load(path,anchors, threshold = None):
# Note: assumes binary case and probability sums up to one.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
if threshold:
print("Hello World")
foreground[foreground < threshold] = 0
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
anchors = np.linspace(0, 1, 256)
path_probs = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/19_83727_202641.png'
probs = probs_load(path_probs,anchors,threshold=0.85)
probs.shape
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = [10, 10]
plt.imshow(probs[1,:,:], interpolation='nearest')
plt.colorbar()
plt.show()
# +
import csv
import io
import os
import sys
import mercantile
import concurrent.futures as futures
import numpy as np
from tqdm import tqdm
from PIL import Image
from common.colors import make_palette
def get_masks_from_probs(probs_path, pred_masks_path,weights=None, probs_threshold= None, num_workers = 4):
"""generate masks from probabilities files
Args:
probs_path: list of the directory with probabilities files that have a name with aslippy map structure "z_x_y.png"
pred_masks_path: directory where the masks will be saved
weights: array-like for weighting probabilities
"""
if weights and len(probs_path) != len(weights):
sys.exit("Error: number of probs directories and weights must be the same")
os.makedirs(pred_masks_path, exist_ok=True)
tilesets = map(tiles_from_directory, probs_path)
list_tilesets = list(zip(*tilesets))
with futures.ThreadPoolExecutor(num_workers) as executor:
def worker(tileset):
anchors = np.linspace(0, 1, 256)
try:
tiles = [tile for tile, _ in tileset]
paths = [path for _, path in tileset]
assert len(set(tiles)), "tilesets ont sync"
except OSError:
return tiles, False
x, y, z = tiles[0]
probs = [probs_load(path,anchors,probs_threshold) for path in paths]
mask = softvote(probs, axis=0, weights=weights)
mask = mask.astype(np.uint8)
palette = make_palette("denim", "orange")
out = Image.fromarray(mask, mode="P")
out.putpalette(palette)
path = os.path.join(pred_masks_path,"{}_{}_{}.{}".format(z,x,y,"png"))
out.save(path, optimize=True)
return tiles, True
for tiles, ok in executor.map(worker, list_tilesets):
if not ok:
print("Warning: {} tilesets ont sync".format(tiles), file=sys.stderr)
def tiles_from_directory(dir_path):
"""Loads files from a directory
Args:
root: the directory with files that have a name with aslippy map structure "z_x_y.*""
Yields:
The mercantile tiles and file paths
"""
def isdigit(v):
try:
_ = int(v) # noqa: F841
return True
except ValueError:
return False
for name in os.listdir(dir_path):
tile_name = os.path.splitext(name)[0]
tile_name = tile_name.split("_")
if not isdigit(tile_name[0]):
continue
x=int(tile_name[1])
y=int(tile_name[2])
z=int(tile_name[0])
tile = mercantile.Tile(x=x, y=y, z=z)
path = os.path.join(dir_path,"{}_{}_{}.{}".format(z,x,y,"png"))
yield tile, path
def softvote(probs, axis=0, weights=None):
"""Weighted average soft-voting to transform class probabilities into class indices.
Args:
probs: array-like probabilities to average.
axis: axis or axes along which to soft-vote.
weights: array-like for weighting probabilities.
Notes:
See http://scikit-learn.org/stable/modules/ensemble.html#weighted-average-probabilities-soft-voting
"""
return np.argmax(np.average(probs, axis=axis, weights=weights), axis=axis)
def probs_load(path,anchors, threshold = None):
# Note: assumes binary case and probability sums up to one.
quantized = np.array(Image.open(path).convert("P"))
# (512, 512, 1) -> (1, 512, 512)
foreground = np.rollaxis(np.expand_dims(anchors[quantized], axis=0), axis=0)
if threshold:
foreground[foreground < threshold] = 0
background = np.rollaxis(1. - foreground, axis=0)
# (1, 512, 512) + (1, 512, 512) -> (2, 512, 512)
return np.concatenate((background, foreground), axis=0)
# -
probs_path = ['/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/probs/']
masks_path = '/Users/stouzani/Desktop/Unstructured_ML/Cities_projects/SF_test/prediction/diecewce/masks_probs8'
get_masks_from_probs(probs_path, masks_path, probs_threshold=0.85, num_workers=4)
|
.ipynb_checkpoints/mask_from_probs-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="7nyLNypHwp-S"
#
# *LSTM Basics*
#
# https://youtu.be/gjb68a4XsqE
#
# https://youtu.be/ycwthhdx8ws
#
# - Simple RNNs are not always enough when working with text data.
# - Longer sequences, such as a paragraph, often are difficult to handle, as the simple RNN structure loses information about previous inputs fairly quickly.
# - Long Short-Term Memory models, or LSTMs, help resolve this by keeping a “cell state” across time. These include a “forget gate”, where the cell can choose whether to keep or forget certain words to carry forward in the sequence.
# - Another interesting aspect of LSTMs is that they can be bidirectional, meaning that information can be passed both forward (later in the text sequence) and backward (earlier in the text sequence).
# https://video.udacity-data.com/topher/2020/March/5e6fbd24_lstm-forget-gates/lstm-forget-gates.png
# + id="L62G7LTwNzoD"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
# + colab={"base_uri": "https://localhost:8080/"} id="nCOtiRJZbxCH" outputId="a8a4b832-ad6d-488f-8dcf-fa2ebee2ca8d"
# !wget --no-check-certificate \
# https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P -O /tmp/sentiment.csv
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="XuqER_KMD-xS" outputId="228c00d9-4b5d-43bf-83f1-095e4ec573c8"
dataset = pd.read_csv('/tmp/sentiment.csv')
dataset.head()
# + id="v1BS-mULI2Vz"
# Extract out sentences and labels
sentences = dataset['text'].tolist()
labels = dataset['sentiment'].tolist()
# + colab={"base_uri": "https://localhost:8080/"} id="Tbsx1T2CXPNO" outputId="42919452-c343-43aa-f882-a883e1080287"
# Print some example sentences and labels
for x in range(2):
print(sentences[x])
print(labels[x])
print("\n")
# + colab={"base_uri": "https://localhost:8080/"} id="6NaicNCcLYyf" outputId="2d326c0d-d136-4775-d1c9-be3189c3286c"
# making a bag of words here with numbers assigned
import tensorflow_datasets as tfds
vocab_size = 1000
tokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(sentences,
vocab_size,
max_subword_length=5)
# How big is the vocab size?
print("Vocab size is ", tokenizer.vocab_size)
# + colab={"base_uri": "https://localhost:8080/"} id="xvRVoeIVLevh" outputId="3a36015e-53ad-4488-8123-283a12f0a060"
# Check that the tokenizer works appropriately
num = 5
print(sentences[num])
encoded = tokenizer.encode(sentences[num])
print(encoded) # shows the code of the words
# + colab={"base_uri": "https://localhost:8080/"} id="G_vacTCifklV" outputId="65b67829-47c6-4a5b-b21e-da66797c0228"
# Separately print out each subword, decoded
for i in encoded:
print(tokenizer.decode([i])) # now decode the number to text
# + id="lkseMhxjL09F"
# For all sentences, replace sentence data with encoded subwords
for i, sentence in enumerate(sentences):
sentences[i] = tokenizer.encode(sentence)
# + colab={"base_uri": "https://localhost:8080/"} id="y21yRuzmL43U" outputId="5a8e9a49-1444-440d-da8a-c1d75e5f012e"
# Check the sentences are appropriately replaced
print(sentences[5])
# + id="50-hTsogLSL-"
# Final pre-processing
max_length = 50
trunc_type='post'
padding_type='post'
# Pad all sequences
sequences_padded = pad_sequences(sentences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
# Separate out the sentences and labels into training and test sets
training_size = int(len(sentences) * 0.8)
training_sequences = sequences_padded[0:training_size]
testing_sequences = sequences_padded[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Make labels into numpy arrays for use with the network later
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="c_nyQeI0RCCv" outputId="9459f1e8-ee80-4dbb-aa20-323ebdd2dbeb"
# Create the model using an Embedding
embedding_dim = 16
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(max_length,)),
tf.keras.layers.Embedding(vocab_size, embedding_dim), # words and dimentions
# tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="oBKyVYvxRQ_9" outputId="77428a5e-0aa1-4eb9-bb1b-09a25d8b7c9f"
num_epochs = 30
history = model.fit(training_sequences,
training_labels_final,
epochs=num_epochs,
validation_data=(testing_sequences, testing_labels_final))
# + colab={"base_uri": "https://localhost:8080/", "height": 541} id="jzBM1PpJAYfD" outputId="8c3845cc-db39-4b07-f569-28ecebdef5ce"
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
# + id="aPNOYiiaha2y"
# Define a function to take a series of reviews
# and predict whether each one is a positive or negative review
# max_length = 100 # previously defined
def predict_review(model, new_sentences, maxlen=max_length, show_padded_sequence=True ):
# Keep the original sentences so that we can keep using them later
# Create an array to hold the encoded sequences
new_sequences = []
# Convert the new reviews to sequences
for i, frvw in enumerate(new_sentences):
new_sequences.append(tokenizer.encode(frvw))
trunc_type='post'
padding_type='post'
# Pad all sequences for the new reviews
new_reviews_padded = pad_sequences(new_sequences, maxlen=max_length,
padding=padding_type, truncating=trunc_type)
classes = model.predict(new_reviews_padded)
# The closer the class is to 1, the more positive the review is
for x in range(len(new_sentences)):
# We can see the padded sequence if desired
# Print the sequence
if (show_padded_sequence):
print(new_reviews_padded[x])
# Print the review as text
print(new_sentences[x])
# Print its predicted class
print(classes[x])
print("\n")
# + colab={"base_uri": "https://localhost:8080/"} id="Qg-maex27KPW" outputId="4021a90f-8144-47b1-fd4e-cfa9a0336576"
# Use the model to predict some reviews
fake_reviews = ["I love this phone",
"Everything was cold",
"Everything was hot exactly as I wanted",
"Everything was green",
"the host seated us immediately",
"they gave us free chocolate cake",
"we couldn't hear each other talk because of the shouting in the kitchen"
]
predict_review(model, fake_reviews)
# + id="PevUcINXK3gn"
# A function that will take the model, compile it, train it, graph the accuracy and loss, and then predict some results
def fit_model_now (model, sentences) :
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
history = model.fit(training_sequences, training_labels_final, epochs=num_epochs,
validation_data=(testing_sequences, testing_labels_final))
return history
def plot_results (history):
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
def fit_model_and_show_results (model, sentences):
history = fit_model_now(model, sentences)
plot_results(history)
predict_review(model, sentences)
# + [markdown] id="KvqWx_b60Hl7"
# *Add a bidirectional LSTM*
# Create a new model that uses a bidirectional LSTM.
#
# Then use the function we have already defined to compile the model, train it, graph the accuracy and loss, then predict some results.
#
# The code for an LSTM layer itself is just the LSTM layer from tf.keras.layers, with the number of LSTM cells to use. However, this is typically wrapped within a Bidirectional layer to make use of passing information both forward and backward in the network, as we noted on the previous page.
#
# *A bidirectional LSTM layer with 64 nodes*
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))
#
# One thing to note when using a Bidirectional layer is when you look at the model summary, if you put in 64 LSTM nodes, you will actually see a layer shape with 128 nodes (64x2).
# No Need to Flatten
#
# Unlike our more vanilla neural networks in the last lesson, you no longer need to use Flatten or GlobalAveragePooling1D after the LSTM layer - the LSTM can take the output of an Embedding layer and directly hook up to a fully-connected Dense layer with its own output.
# Doubling Up
#
# You can also feed an LSTM layer into another LSTM layer. To do so, on top of just stacking them in order when you create the model, you also need to set return_sequences to True for the earlier LSTM layer - otherwise, as noted above, the output will be ready for fully-connected layers and not be in the sequence format the LSTM layer expects.
#
# Two bidirectional LSTM layers with 64 nodes each
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64), return_sequences=True)
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="scTUsFPAG4zP" outputId="c29be2a9-e19a-4bcd-9eea-f835f6c3d11b"
# Define the model
model_bidi_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile and train the model and then show the predictions for our extra sentences
fit_model_and_show_results(model_bidi_lstm, fake_reviews)
# + [markdown] id="QsxKPbCnPJTj"
# Use multiple bidirectional layers
# - Now let's see if we get any improvements from adding another Bidirectional LSTM layer to the model.
# - Notice that the first Bidirectionl LSTM layer returns a sequence.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3N6Zul47PMED" outputId="197131ae-8e59-4391-b475-794999633e29"
model_multiple_bidi_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim,
return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
fit_model_and_show_results(model_multiple_bidi_lstm, fake_reviews)
# + [markdown] id="ABVYYPwba8Hx"
# Compare predictions for all the models
#
# Predictions that each of the three models gives for each review:
#
# * Embeddings and a Global Average Pooling layer
# * Embeddings and a Bidirectional LSTM layer
# * Embeddings and two Bidirectional LSTM layers
#
# The results are not always what you might expect. The input dataset is fairly small, it has less than 2000 reviews. Some of the reviews are fairly short, and some of the short ones are fairly repetitive which reduces their impact on improving the model, such as these two reviews:
#
# * Bad Quality.
# * Low Quality.
#
# Feel free to add more reviews of your own, or change the reviews. The results will depend on the combination of words in the reviews, and how well they match to reviews in the training set.
#
# How do the different models handle things like "wasn't good" which contains a positive word (good) but is a poor review?
#
# + id="6XebrXt0jtOy"
my_reviews =["lovely", "dreadful", "stay away",
"everything was hot exactly as I wanted",
"everything was not exactly as I wanted",
"they gave us free chocolate cake",
"I've never eaten anything so spicy in my life, my throat burned for hours",
"for a phone that is as expensive as this one I expect it to be much easier to use than this thing is",
"we left there very full for a low price so I'd say you just can't go wrong at this place",
"that place does not have quality meals and it isn't a good place to go for dinner",
]
# + colab={"base_uri": "https://localhost:8080/"} id="tRWGjkJLkY2y" outputId="38e28275-f183-4541-c26b-f1c8757b6e88"
print("===================================\n","Embeddings only:\n", "===================================",)
predict_review(model, my_reviews, show_padded_sequence=False)
# + colab={"base_uri": "https://localhost:8080/"} id="G2FJR3IVBt30" outputId="c5c5696d-7c0d-4e72-f7cf-b0385c9a347f"
print("===================================\n", "With a single bidirectional LSTM:\n", "===================================")
predict_review(model_bidi_lstm, my_reviews, show_padded_sequence=False)
# + colab={"base_uri": "https://localhost:8080/"} id="81v1r3Y2BwvC" outputId="f9a7c62c-b990-4b31-becb-6f39dc6f3c57"
print("===================================\n","With two bidirectional LSTMs:\n", "===================================")
predict_review(model_multiple_bidi_lstm, my_reviews, show_padded_sequence=False)
|
3. NLP/AZ/Text Classification/02_LSTM/01_nlp_lstms_with_reviews_subwords_dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/franklinperseuDS/Ocean_Backend_Cloud_29_10_2020/blob/main/trabalhoEstatistica.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="iqydn_S4pqsc" outputId="ba8ff738-4199-4b56-a733-53fd83f14325" colab={"base_uri": "https://localhost:8080/"}
# Pacotes necessários
import pandas as pd
import numpy as np
import statistics as st
import seaborn as se
import statsmodels.stats.proportion as smp
from scipy import stats
# + id="82F7B9BiqH-_" outputId="caf46224-d064-413c-eb94-c4aa7c41b37f" colab={"base_uri": "https://localhost:8080/"}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="KWmtLulk4xso"
# #DataFrames
# + id="EE3KvgF1sd94"
dfMicroDados = pd.read_csv('/content/drive/Shared drives/Trabalho Estátistica/microdados_enade_2016.csv',sep=';',decimal=',')
# + id="pgUTDeY226aW"
dfConceito = pd.read_excel('/content/drive/Shared drives/Trabalho Estátistica/conceito_enade2016.xlsx')
# + id="HrGTn4HeimO1" outputId="2907efcf-d4ce-4d1a-f2a2-27b99c8aabee" colab={"base_uri": "https://localhost:8080/", "height": 163}
dfMicroDados.shape
# + id="p6S8HmC9s1Rj" outputId="a40f7e3a-cf61-4d2e-d7b8-8252a0c5ba6e" colab={"base_uri": "https://localhost:8080/"}
dfConceito.columns
# + [markdown] id="sO_24gA_9gfb"
# #aqui é a questão 2
# + id="ypH7qv-86g_h"
dfConceito = dfConceito.rename(columns={'Código do Curso': 'CodCurso'})
# + id="CsLKaaxhszbI" outputId="3ad1fbb3-3ddc-42b5-aa43-02859f6321c4" colab={"base_uri": "https://localhost:8080/"}
#dfMedicina.query('CO_REGIAO_CURSO == 1')['CO_UF_CURSO'],['NT_GER']
dfConceito.query('CodCurso == 12')['Modalidade de Ensino'].value_counts() # saber se tem
# + id="kp_LmBjhtMds"
dfMedicina = df.loc[df['CO_GRUPO'] == 12]
# + id="IqTIMs0Wtwcc" outputId="21f8705c-c346-41ec-9118-13645e5a05ed" colab={"base_uri": "https://localhost:8080/"}
dfMedicina.columns[30:40]
# + [markdown] id="sl7NFLvuesFE"
# CO_REGIAO_CURSO
# 1 = Região Norte (NO)
# 2 = Região Nordeste (NE)
# 3 = Região Sudeste (SE)
# 4 = Região Sul (SUL)
# 5 = Região Centro-Oeste (CO)
# + id="IuVKrVQYgxI9"
regiaoNorte = dfMedicina.loc[df["CO_REGIAO_CURSO"] == 1]
# + id="oyc7DbONlbLX" outputId="dc8233d5-ad0f-4f29-c5b1-d07f159da7e2" colab={"base_uri": "https://localhost:8080/", "height": 439}
regiaoNorte
# + id="6F_gCvA2wQeN"
regiaoNorte = dfMedicina.query('CO_REGIAO_CURSO == 1')['CO_UF_CURSO'],['NT_GER']
# + id="i6NS5C6Jv4Qn" outputId="ec618e83-b704-43ab-d9a2-b538a7be6a50" colab={"base_uri": "https://localhost:8080/", "height": 163}
regiaoNorte.groupby(['CO_UF_CURSO']).plot.bar
# + id="zddNLFNVmjLI" outputId="4be959dd-0ff7-4091-83d9-c0bd4e05a92a" colab={"base_uri": "https://localhost:8080/"}
print(RNG)
# + id="ihtM_L4pvuRI" outputId="48646984-9498-4fa0-cc5d-b012ef07b6e9" colab={"base_uri": "https://localhost:8080/", "height": 298}
regiaoNorte['NT_GER'][:30].plot.bar(color = 'gray')
# + id="_v75GpoBlX9j" outputId="c0616a0c-c88e-47f1-ec7d-3292fe0713df" colab={"base_uri": "https://localhost:8080/"}
a
# + id="3ED_pkCw9AIV"
# + id="vbBs_QtJ8_2r"
a = 15
|
trabalhoEstatistica.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PaddlePaddle 1.7.1 (Python 3.5)
# language: python
# name: py35-paddle1.2.0
# ---
# ## 解压数据集
# +
# # !cd /home/aistudio/data/data29604 && unzip -q imgs.zip
# +
# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原
# View dataset directory. This directory will be recovered automatically after resetting environment.
# # !ls /home/aistudio/data
# -
# ## 读取标签,获得类别,创建空目录以便下一步移动数据
# +
# 将'p081','p075'两位司机的数据作为验证集,并将其移动到data/valid/文件夹下
import os
import pandas as pd
import numpy as np
import shutil
root_dir = "/home/aistudio/"
data_dir = "/home/aistudio/data/data29604/"
driver_imgs_list_csv = os.path.join(root_dir, "work/driver_imgs_list.csv")
if not os.path.exists(root_dir + "data/eval_set"):
os.mkdir(root_dir + "data/eval_set")
# for i in range(10):
# os.mkdir(root_dir + "data/eval_set/c%d"%i)
if not os.path.exists(root_dir + "data/train_set"):
os.mkdir(root_dir + "data/train_set")
# for i in range(10):
# os.mkdir(root_dir + "data/train_set/c%d"%i)
datafile = pd.read_csv(driver_imgs_list_csv)
drivers = datafile.subject.values
all_class_list = datafile.classname.values
# 去重
class_list = []
for id in all_class_list:
if id not in class_list:
class_list.append(id)
class_list = class_list[0:10]
print(class_list)
print(drivers)
valid_subjects = ['p081','p075']
train_subjects = list(set(drivers).difference(set(valid_subjects)))
# -
# ## 将训练数据和测试数据复制到train_set and eval_set 目录内,并获取相应的标签txt文件
# +
import codecs # codecs专门用作编码转换
import os
import random
import shutil
from PIL import Image
all_file_dir = "/home/aistudio/data/"
eval_image_dir = "/home/aistudio/data/eval_set/"
train_image_dir = "/home/aistudio/data/train_set/"
train_file = codecs.open(os.path.join(all_file_dir, "train_list.txt"), 'w')
eval_file = codecs.open(os.path.join(all_file_dir, "validate_list.txt"), 'w')
label_file = codecs.open(os.path.join(all_file_dir, "label_list.txt"), 'w')
for label in class_list:
label_file.write("{0} {1}\n".format(label[1], label))
for valid_subject in valid_subjects:
df_valid = datafile[(datafile["subject"]==valid_subject)]
for index, row in df_valid.iterrows():
subpath_s = row["classname"] + "/" + row["img"]
subpath_g = row["img"]
if os.path.exists(os.path.join(data_dir,"train",subpath_s)):
shutil.copy(os.path.join(data_dir, "train", subpath_s),os.path.join(root_dir, "data/eval_set", subpath_g))
final_class = int(row["classname"][1])
# if final_class > 0 and final_class<5:
# final_class = 1
# elif final_class > 4 and final_class != 9:
# final_class = final_class-3
# else:
# final_class = 0
eval_file.write("{0} {1}\n".format(os.path.join(eval_image_dir, subpath_g), final_class))
else:
print("cannot copy {} : {}".format(row["subject"],subpath_s))
for train_subject in train_subjects:
df_train = datafile[(datafile["subject"] == train_subject)]
for index, row in df_train.iterrows():
subpath_s = row["classname"] + "/" + row["img"]
subpath_g = row["img"]
if os.path.exists(os.path.join(data_dir,"train",subpath_s)):
shutil.copy(os.path.join(data_dir, "train", subpath_s),os.path.join(root_dir, "data/train_set", subpath_g))
final_class = int(row["classname"][1])
# if final_class > 0 and final_class<5:
# final_class = 1
# elif final_class > 4 and final_class != 9:
# final_class = final_class-3
# else:
# final_class = 0
train_file.write("{0} {1}\n".format(os.path.join(train_image_dir, subpath_g), final_class))
else:
print("cannot copy {} : {}".format(row["subject"],subpath_s))
train_file.close()
eval_file.close()
# +
import paddlehub as hub
#创建日志文件,储存lenet训练结果
# log_writer = LogWriter("./log/vgg16")
module= hub.Module(name="vgg16_imagenet")
# module = hub.Module(name="vgg19_imagenet")
# module = hub.Module(name="resnet_v2_50_imagenet")
# module =hub.Module(name="mobilenet_v2_imagenet")
input_dict, output_dict, program = module.context(trainable=True)
# +
from paddlehub.dataset.base_cv_dataset import BaseCVDataset
class DriverDataset(BaseCVDataset):
def __init__(self):
# 数据集存放位置
self.dataset_dir = "/home/aistudio/data"
super(DriverDataset, self).__init__(
base_path=self.dataset_dir,
train_list_file="train_list.txt",
validate_list_file="validate_list.txt",
# test_list_file="test_list.txt",
# predict_file="predict_list.txt",
# label_list_file="label_list.txt",
label_list=["c0","c1","c2","c3","c4","c5","c6","c7","c8","c9"]
)
dataset = DriverDataset()
# +
strategy = hub.DefaultFinetuneStrategy(
learning_rate=1e-2,
optimizer_name="adam",
regularization_coeff=0.1)
config = hub.RunConfig(use_cuda=True, use_data_parallel=True, num_epoch=20, batch_size=32, strategy=strategy)
# -
data_reader = hub.reader.ImageClassificationReader(
image_width=module.get_expected_image_width(),
image_height=module.get_expected_image_height(),
images_mean=module.get_pretrained_images_mean(),
images_std=module.get_pretrained_images_std(),
dataset=dataset)
# +
import logging
import os
def init_log_config():
"""
初始化日志相关配置
:return:
"""
global logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_path = os.path.join(os.getcwd(), 'logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
log_name = os.path.join(log_path, 'train.log')
sh = logging.StreamHandler()
fh = logging.FileHandler(log_name, mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
init_log_config()
# +
from paddlehub.common.logger import logger
from paddlehub.finetune.checkpoint import load_checkpoint, save_checkpoint
feature_map = output_dict["feature_map"]
feed_list = [input_dict["image"].name]
from tb_paddle import SummaryWriter
tb_writer = SummaryWriter("./logs/vgg19")
class MyClassifierTask(hub.ImageClassifierTask):
def _build_env_start_event(self):
self.stepmy = 0
def _log_interval_event(self, run_states):
self.stepmy = self.stepmy+1
score, avg_loss, run_speed = self._calculate_metrics(run_states)
avg_acc = score['acc']
logger.info("{}\t{}\t{}\t{}".format(self.stepmy,avg_acc,avg_loss,run_speed))
tb_writer.add_scalar(
tag="Loss_{}".format(self.phase),
scalar_value=avg_loss,
global_step=self._envs['train'].current_step)
tb_writer.add_scalar(
tag="Acc_{}".format(self.phase),
scalar_value=avg_acc,
global_step=self._envs['train'].current_step)
task = MyClassifierTask(
data_reader=data_reader,
feed_list=feed_list,
feature=feature_map,
num_classes=dataset.num_labels,
config=config
)
task.finetune_and_eval()
# -
#
# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.
# View personal work directory. All changes under this directory will be kept even after reset. Please clean unnecessary files in time to speed up environment loading.
# !ls /home/aistudio/work
# +
# # !cp /home/aistudio/work/ResNet50_pretrained.tar /home/aistudio/data/ResNet50_pretrained.tar
# # !cd data && tar -xf ResNet50_pretrained.tar
# -
# +
# 解压数据集
# import os
# import zipfile
# os.chdir('/home/aistudio/data/data29604')
# extracting = zipfile.ZipFile('imgs.zip')
# extracting.extractall()
# -
#
# # VisualDL
# +
# import paddle
# paddle.__version__
# +
# import numpy as np
# from PIL import Image
# from visualdl import LogWriter
# import os
# #确保路径为'/home/aistudio'
# os.chdir('/home/aistudio')
# #创建 LogWriter 对象,将图像数据存放在 `./log/train`路径下
# from visualdl import LogWriter
# log_writer = LogWriter("./log/train",sync_cycle=10)
# +
# # -*- coding: UTF-8 -*-
# """
# 训练常用视觉基础网络,用于分类任务
# 需要将训练图片,类别文件 label_list.txt 放置在同一个文件夹下
# 程序会先读取 train.txt 文件获取类别数和图片数量
# """
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# import os
# import numpy as np
# import time
# import math
# import paddle
# import paddle.fluid as fluid
# import codecs
# import logging
# from paddle.fluid.initializer import MSRA
# from paddle.fluid.initializer import Uniform
# from paddle.fluid.param_attr import ParamAttr
# from PIL import Image
# from PIL import ImageEnhance
# import paddlehub as hub
# #使用VisualDL观察Paddle和手写两种模型进行训练对比
# import paddle.fluid.dygraph as dygraph
# from visualdl import LogWriter
# train_parameters = {
# "input_size": [3, 224, 224],
# "class_dim": -1, # 分类数,会在初始化自定义 reader 的时候获得
# "image_count": -1, # 训练图片数量,会在初始化自定义 reader 的时候获得
# "label_dict": {},
# "data_dir": "data", # 训练数据存储地址
# "train_file_list": "train.txt",
# "label_file": "label_list.txt",
# "save_freeze_dir": "./freeze_model/resnet50",
# "save_persistable_dir": "./persistable_params/50",
# "continue_train": True, # 是否接着上一次保存的参数接着训练,优先级高于预训练模型
# "pretrained": False, # 是否使用预训练的模型,对于inceptionv4模型暂无预训练参数
# "pretrained_dir":"./data/ResNet50_pretrained",
# "mode": "train",
# "num_epochs": 100,
# "train_batch_size": 96,
# "mean_rgb": [127.5, 127.5, 127.5], # 常用图片的三通道均值,通常来说需要先对训练数据做统计,此处仅取中间值
# "use_gpu": True,
# "image_enhance_strategy": { # 图像增强相关策略
# "need_distort": True, # 是否启用图像颜色增强
# "need_rotate": True, # 是否需要增加随机角度
# "need_crop": True, # 是否要增加裁剪
# "need_flip": True, # 是否要增加水平随机翻转
# "hue_prob": 0.5,
# "hue_delta": 18,
# "contrast_prob": 0.5,
# "contrast_delta": 0.5,
# "saturation_prob": 0.5,
# "saturation_delta": 0.5,
# "brightness_prob": 0.5,
# "brightness_delta": 0.125
# },
# "early_stop": {
# "sample_frequency": 500,
# "successive_limit": 5,
# "good_acc1": 0.92
# },
# "rsm_strategy": {
# "learning_rate": 0.001,
# "lr_epochs": [20, 40, 60, 80, 100],
# "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]
# },
# "momentum_strategy": {
# "learning_rate": 0.002,
# "lr_epochs": [20, 40, 60, 80, 100],
# "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]
# },
# "sgd_strategy": {
# "learning_rate": 0.002,
# "lr_epochs": [20, 40, 60, 80, 100],
# "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]
# },
# "adam_strategy": {
# "learning_rate": 0.002
# }
# }
# # 定义残差神经网络(ResNet)
# class ResNet50():
# def __init__(self):
# pass
# def name(self):
# """
# 返回网络名字
# :return:
# """
# return 'ResNet50'
# def net(self, input, class_dim=1000):
# def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):
# conv = fluid.layers.conv2d(input=input,
# num_filters=num_filters,
# filter_size=filter_size,
# stride=stride,
# padding=(filter_size - 1) // 2,
# groups=groups,
# act=None,
# param_attr=ParamAttr(name=name + "_weights"),
# bias_attr=False,
# name=name + '.conv2d.output.1')
# if name == "conv1":
# bn_name = "bn_" + name
# else:
# bn_name = "bn" + name[3:]
# return fluid.layers.batch_norm(input=conv,
# act=act,
# name=bn_name + '.output.1',
# param_attr=ParamAttr(name=bn_name + '_scale'),
# bias_attr=ParamAttr(bn_name + '_offset'),
# moving_mean_name=bn_name + '_mean',
# moving_variance_name=bn_name + '_variance', )
# def shortcut(input, ch_out, stride, name):
# ch_in = input.shape[1]
# if ch_in != ch_out or stride != 1:
# return conv_bn_layer(input, ch_out, 1, stride, name=name)
# else:
# return input
# def bottleneck_block(input, num_filters, stride, name):
# conv0 = conv_bn_layer(input=input,
# num_filters=num_filters,
# filter_size=1,
# act='relu',
# name=name + "_branch2a")
# conv1 = conv_bn_layer(input=conv0,
# num_filters=num_filters,
# filter_size=3,
# stride=stride,
# act='relu',
# name=name + "_branch2b")
# conv2 = conv_bn_layer(input=conv1,
# num_filters=num_filters * 4,
# filter_size=1,
# act=None,
# name=name + "_branch2c")
# short = shortcut(input, num_filters * 4, stride, name=name + "_branch1")
# return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + ".add.output.5")
# depth = [3, 4, 6, 3]
# num_filters = [64, 128, 256, 512]
# conv = conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu', name="conv1")
# conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
# for block in range(len(depth)):
# for i in range(depth[block]):
# conv_name = "res" + str(block + 2) + chr(97 + i)
# conv = bottleneck_block(input=conv,
# num_filters=num_filters[block],
# stride=2 if i == 0 and block != 0 else 1,
# name=conv_name)
# pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)
# # 停止梯度下降
# pool.stop_gradient = True
# # 这里再重新加载网络的分类器,大小为本项目的分类大小
# out = fluid.layers.fc(input=pool, size=class_dim, act='softmax')
# return out
# class InceptionV4():
# def __init__(self):
# pass
# def name(self):
# """
# 返回网络名字
# :return:
# """
# return 'InceptionV4'
# def net(self, input, class_dim=1000):
# x = self.inception_stem(input)
# for i in range(4):
# x = self.inceptionA(x, name=str(i + 1))
# x = self.reductionA(x)
# for i in range(7):
# x = self.inceptionB(x, name=str(i + 1))
# x = self.reductionB(x)
# for i in range(3):
# x = self.inceptionC(x, name=str(i + 1))
# pool = fluid.layers.pool2d(
# input=x, pool_size=8, pool_type='avg', global_pooling=True)
# drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
# stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
# out = fluid.layers.fc(
# input=drop,
# size=class_dim,
# act='softmax',
# param_attr=ParamAttr(
# initializer=fluid.initializer.Uniform(-stdv, stdv),
# name="final_fc_weights"),
# bias_attr=ParamAttr(
# initializer=fluid.initializer.Uniform(-stdv, stdv),
# name="final_fc_offset"))
# return out
# def conv_bn_layer(self,
# data,
# num_filters,
# filter_size,
# stride=1,
# padding=0,
# groups=1,
# act='relu',
# name=None):
# conv = fluid.layers.conv2d(
# input=data,
# num_filters=num_filters,
# filter_size=filter_size,
# stride=stride,
# padding=padding,
# groups=groups,
# act=None,
# param_attr=ParamAttr(name=name + "_weights"),
# bias_attr=False,
# name=name)
# bn_name = name + "_bn"
# return fluid.layers.batch_norm(
# input=conv,
# act=act,
# name=bn_name,
# param_attr=ParamAttr(name=bn_name + "_scale"),
# bias_attr=ParamAttr(name=bn_name + "_offset"),
# moving_mean_name=bn_name + '_mean',
# moving_variance_name=bn_name + '_variance')
# def inception_stem(self, data, name=None):
# conv = self.conv_bn_layer(
# data, 32, 3, stride=2, act='relu', name="conv1_3x3_s2")
# conv = self.conv_bn_layer(conv, 32, 3, act='relu', name="conv2_3x3_s1")
# conv = self.conv_bn_layer(
# conv, 64, 3, padding=1, act='relu', name="conv3_3x3_s1")
# pool1 = fluid.layers.pool2d(
# input=conv, pool_size=3, pool_stride=2, pool_type='max')
# conv2 = self.conv_bn_layer(
# conv, 96, 3, stride=2, act='relu', name="inception_stem1_3x3_s2")
# concat = fluid.layers.concat([pool1, conv2], axis=1)
# conv1 = self.conv_bn_layer(
# concat, 64, 1, act='relu', name="inception_stem2_3x3_reduce")
# conv1 = self.conv_bn_layer(
# conv1, 96, 3, act='relu', name="inception_stem2_3x3")
# conv2 = self.conv_bn_layer(
# concat, 64, 1, act='relu', name="inception_stem2_1x7_reduce")
# conv2 = self.conv_bn_layer(
# conv2,
# 64, (7, 1),
# padding=(3, 0),
# act='relu',
# name="inception_stem2_1x7")
# conv2 = self.conv_bn_layer(
# conv2,
# 64, (1, 7),
# padding=(0, 3),
# act='relu',
# name="inception_stem2_7x1")
# conv2 = self.conv_bn_layer(
# conv2, 96, 3, act='relu', name="inception_stem2_3x3_2")
# concat = fluid.layers.concat([conv1, conv2], axis=1)
# conv1 = self.conv_bn_layer(
# concat, 192, 3, stride=2, act='relu', name="inception_stem3_3x3_s2")
# pool1 = fluid.layers.pool2d(
# input=concat, pool_size=3, pool_stride=2, pool_type='max')
# concat = fluid.layers.concat([conv1, pool1], axis=1)
# return concat
# def inceptionA(self, data, name=None):
# pool1 = fluid.layers.pool2d(
# input=data, pool_size=3, pool_padding=1, pool_type='avg')
# conv1 = self.conv_bn_layer(
# pool1, 96, 1, act='relu', name="inception_a" + name + "_1x1")
# conv2 = self.conv_bn_layer(
# data, 96, 1, act='relu', name="inception_a" + name + "_1x1_2")
# conv3 = self.conv_bn_layer(
# data, 64, 1, act='relu', name="inception_a" + name + "_3x3_reduce")
# conv3 = self.conv_bn_layer(
# conv3,
# 96,
# 3,
# padding=1,
# act='relu',
# name="inception_a" + name + "_3x3")
# conv4 = self.conv_bn_layer(
# data,
# 64,
# 1,
# act='relu',
# name="inception_a" + name + "_3x3_2_reduce")
# conv4 = self.conv_bn_layer(
# conv4,
# 96,
# 3,
# padding=1,
# act='relu',
# name="inception_a" + name + "_3x3_2")
# conv4 = self.conv_bn_layer(
# conv4,
# 96,
# 3,
# padding=1,
# act='relu',
# name="inception_a" + name + "_3x3_3")
# concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1)
# return concat
# def reductionA(self, data, name=None):
# pool1 = fluid.layers.pool2d(
# input=data, pool_size=3, pool_stride=2, pool_type='max')
# conv2 = self.conv_bn_layer(
# data, 384, 3, stride=2, act='relu', name="reduction_a_3x3")
# conv3 = self.conv_bn_layer(
# data, 192, 1, act='relu', name="reduction_a_3x3_2_reduce")
# conv3 = self.conv_bn_layer(
# conv3, 224, 3, padding=1, act='relu', name="reduction_a_3x3_2")
# conv3 = self.conv_bn_layer(
# conv3, 256, 3, stride=2, act='relu', name="reduction_a_3x3_3")
# concat = fluid.layers.concat([pool1, conv2, conv3], axis=1)
# return concat
# def inceptionB(self, data, name=None):
# pool1 = fluid.layers.pool2d(
# input=data, pool_size=3, pool_padding=1, pool_type='avg')
# conv1 = self.conv_bn_layer(
# pool1, 128, 1, act='relu', name="inception_b" + name + "_1x1")
# conv2 = self.conv_bn_layer(
# data, 384, 1, act='relu', name="inception_b" + name + "_1x1_2")
# conv3 = self.conv_bn_layer(
# data, 192, 1, act='relu', name="inception_b" + name + "_1x7_reduce")
# conv3 = self.conv_bn_layer(
# conv3,
# 224, (1, 7),
# padding=(0, 3),
# act='relu',
# name="inception_b" + name + "_1x7")
# conv3 = self.conv_bn_layer(
# conv3,
# 256, (7, 1),
# padding=(3, 0),
# act='relu',
# name="inception_b" + name + "_7x1")
# conv4 = self.conv_bn_layer(
# data,
# 192,
# 1,
# act='relu',
# name="inception_b" + name + "_7x1_2_reduce")
# conv4 = self.conv_bn_layer(
# conv4,
# 192, (1, 7),
# padding=(0, 3),
# act='relu',
# name="inception_b" + name + "_1x7_2")
# conv4 = self.conv_bn_layer(
# conv4,
# 224, (7, 1),
# padding=(3, 0),
# act='relu',
# name="inception_b" + name + "_7x1_2")
# conv4 = self.conv_bn_layer(
# conv4,
# 224, (1, 7),
# padding=(0, 3),
# act='relu',
# name="inception_b" + name + "_1x7_3")
# conv4 = self.conv_bn_layer(
# conv4,
# 256, (7, 1),
# padding=(3, 0),
# act='relu',
# name="inception_b" + name + "_7x1_3")
# concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1)
# return concat
# def reductionB(self, data, name=None):
# pool1 = fluid.layers.pool2d(
# input=data, pool_size=3, pool_stride=2, pool_type='max')
# conv2 = self.conv_bn_layer(
# data, 192, 1, act='relu', name="reduction_b_3x3_reduce")
# conv2 = self.conv_bn_layer(
# conv2, 192, 3, stride=2, act='relu', name="reduction_b_3x3")
# conv3 = self.conv_bn_layer(
# data, 256, 1, act='relu', name="reduction_b_1x7_reduce")
# conv3 = self.conv_bn_layer(
# conv3,
# 256, (1, 7),
# padding=(0, 3),
# act='relu',
# name="reduction_b_1x7")
# conv3 = self.conv_bn_layer(
# conv3,
# 320, (7, 1),
# padding=(3, 0),
# act='relu',
# name="reduction_b_7x1")
# conv3 = self.conv_bn_layer(
# conv3, 320, 3, stride=2, act='relu', name="reduction_b_3x3_2")
# concat = fluid.layers.concat([pool1, conv2, conv3], axis=1)
# return concat
# def inceptionC(self, data, name=None):
# pool1 = fluid.layers.pool2d(
# input=data, pool_size=3, pool_padding=1, pool_type='avg')
# conv1 = self.conv_bn_layer(
# pool1, 256, 1, act='relu', name="inception_c" + name + "_1x1")
# conv2 = self.conv_bn_layer(
# data, 256, 1, act='relu', name="inception_c" + name + "_1x1_2")
# conv3 = self.conv_bn_layer(
# data, 384, 1, act='relu', name="inception_c" + name + "_1x1_3")
# conv3_1 = self.conv_bn_layer(
# conv3,
# 256, (1, 3),
# padding=(0, 1),
# act='relu',
# name="inception_c" + name + "_1x3")
# conv3_2 = self.conv_bn_layer(
# conv3,
# 256, (3, 1),
# padding=(1, 0),
# act='relu',
# name="inception_c" + name + "_3x1")
# conv4 = self.conv_bn_layer(
# data, 384, 1, act='relu', name="inception_c" + name + "_1x1_4")
# conv4 = self.conv_bn_layer(
# conv4,
# 448, (1, 3),
# padding=(0, 1),
# act='relu',
# name="inception_c" + name + "_1x3_2")
# conv4 = self.conv_bn_layer(
# conv4,
# 512, (3, 1),
# padding=(1, 0),
# act='relu',
# name="inception_c" + name + "_3x1_2")
# conv4_1 = self.conv_bn_layer(
# conv4,
# 256, (1, 3),
# padding=(0, 1),
# act='relu',
# name="inception_c" + name + "_1x3_3")
# conv4_2 = self.conv_bn_layer(
# conv4,
# 256, (3, 1),
# padding=(1, 0),
# act='relu',
# name="inception_c" + name + "_3x1_3")
# concat = fluid.layers.concat(
# [conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1)
# return concat
# def init_log_config():
# """
# 初始化日志相关配置
# :return:
# """
# global logger
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
# log_path = os.path.join(os.getcwd(), 'logs')
# if not os.path.exists(log_path):
# os.makedirs(log_path)
# log_name = os.path.join(log_path, 'train.log')
# sh = logging.StreamHandler()
# fh = logging.FileHandler(log_name, mode='w')
# fh.setLevel(logging.DEBUG)
# formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
# fh.setFormatter(formatter)
# sh.setFormatter(formatter)
# logger.addHandler(sh)
# logger.addHandler(fh)
# def init_train_parameters():
# """
# 初始化训练参数,主要是初始化图片数量,类别数
# :return:
# """
# train_file_list = os.path.join(train_parameters['data_dir'], train_parameters['train_file_list'])
# label_list = os.path.join(train_parameters['data_dir'], train_parameters['label_file'])
# index = 0
# with codecs.open(label_list, encoding='utf-8') as flist:
# lines = [line.strip() for line in flist]
# for line in lines:
# parts = line.strip().split()
# train_parameters['label_dict'][parts[1]] = int(parts[0])
# index += 1
# train_parameters['class_dim'] = index
# with codecs.open(train_file_list, encoding='utf-8') as flist:
# lines = [line.strip() for line in flist]
# train_parameters['image_count'] = len(lines)
# def resize_img(img, target_size):
# """
# 强制缩放图片
# :param img:
# :param target_size:
# :return:
# """
# target_size = input_size
# img = img.resize((target_size[1], target_size[2]), Image.BILINEAR)
# return img
# def random_crop(img, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
# aspect_ratio = math.sqrt(np.random.uniform(*ratio))
# w = 1. * aspect_ratio
# h = 1. / aspect_ratio
# bound = min((float(img.size[0]) / img.size[1]) / (w**2),
# (float(img.size[1]) / img.size[0]) / (h**2))
# scale_max = min(scale[1], bound)
# scale_min = min(scale[0], bound)
# target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,
# scale_max)
# target_size = math.sqrt(target_area)
# w = int(target_size * w)
# h = int(target_size * h)
# i = np.random.randint(0, img.size[0] - w + 1)
# j = np.random.randint(0, img.size[1] - h + 1)
# img = img.crop((i, j, i + w, j + h))
# img = img.resize((train_parameters['input_size'][1], train_parameters['input_size'][2]), Image.BILINEAR)
# return img
# def rotate_image(img):
# """
# 图像增强,增加随机旋转角度
# """
# angle = np.random.randint(-14, 15)
# img = img.rotate(angle)
# return img
# def random_brightness(img):
# """
# 图像增强,亮度调整
# :param img:
# :return:
# """
# prob = np.random.uniform(0, 1)
# if prob < train_parameters['image_enhance_strategy']['brightness_prob']:
# brightness_delta = train_parameters['image_enhance_strategy']['brightness_delta']
# delta = np.random.uniform(-brightness_delta, brightness_delta) + 1
# img = ImageEnhance.Brightness(img).enhance(delta)
# return img
# def random_contrast(img):
# """
# 图像增强,对比度调整
# :param img:
# :return:
# """
# prob = np.random.uniform(0, 1)
# if prob < train_parameters['image_enhance_strategy']['contrast_prob']:
# contrast_delta = train_parameters['image_enhance_strategy']['contrast_delta']
# delta = np.random.uniform(-contrast_delta, contrast_delta) + 1
# img = ImageEnhance.Contrast(img).enhance(delta)
# return img
# def random_saturation(img):
# """
# 图像增强,饱和度调整
# :param img:
# :return:
# """
# prob = np.random.uniform(0, 1)
# if prob < train_parameters['image_enhance_strategy']['saturation_prob']:
# saturation_delta = train_parameters['image_enhance_strategy']['saturation_delta']
# delta = np.random.uniform(-saturation_delta, saturation_delta) + 1
# img = ImageEnhance.Color(img).enhance(delta)
# return img
# def random_hue(img):
# """
# 图像增强,色度调整
# :param img:
# :return:
# """
# prob = np.random.uniform(0, 1)
# if prob < train_parameters['image_enhance_strategy']['hue_prob']:
# hue_delta = train_parameters['image_enhance_strategy']['hue_delta']
# delta = np.random.uniform(-hue_delta, hue_delta)
# img_hsv = np.array(img.convert('HSV'))
# img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
# img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
# return img
# def distort_color(img):
# """
# 概率的图像增强
# :param img:
# :return:
# """
# prob = np.random.uniform(0, 1)
# # Apply different distort order
# if prob < 0.35:
# img = random_brightness(img)
# img = random_contrast(img)
# img = random_saturation(img)
# img = random_hue(img)
# elif prob < 0.7:
# img = random_brightness(img)
# img = random_saturation(img)
# img = random_hue(img)
# img = random_contrast(img)
# return img
# def custom_image_reader(file_list, data_dir, mode):
# """
# 自定义用户图片读取器,先初始化图片种类,数量
# :param file_list:
# :param data_dir:
# :param mode:
# :return:
# """
# with codecs.open(file_list) as flist:
# lines = [line.strip() for line in flist]
# def reader():
# np.random.shuffle(lines)
# for line in lines:
# if mode == 'train' or mode == 'val':
# img_path, label = line.split()
# img = Image.open(img_path)
# try:
# if img.mode != 'RGB':
# img = img.convert('RGB')
# if train_parameters['image_enhance_strategy']['need_distort'] == True:
# img = distort_color(img)
# if train_parameters['image_enhance_strategy']['need_rotate'] == True:
# img = rotate_image(img)
# if train_parameters['image_enhance_strategy']['need_crop'] == True:
# img = random_crop(img, train_parameters['input_size'])
# if train_parameters['image_enhance_strategy']['need_flip'] == True:
# mirror = int(np.random.uniform(0, 2))
# if mirror == 1:
# img = img.transpose(Image.FLIP_LEFT_RIGHT)
# # HWC--->CHW && normalized
# img = np.array(img).astype('float32')
# img -= train_parameters['mean_rgb']
# img = img.transpose((2, 0, 1)) # HWC to CHW
# img *= 0.007843 # 像素值归一化
# yield img, int(label)
# except Exception as e:
# pass # 以防某些图片读取处理出错,加异常处理
# elif mode == 'test':
# img_path = os.path.join(data_dir, line)
# img = Image.open(img_path)
# if img.mode != 'RGB':
# img = img.convert('RGB')
# img = resize_img(img, train_parameters['input_size'])
# # HWC--->CHW && normalized
# img = np.array(img).astype('float32')
# img -= train_parameters['mean_rgb']
# img = img.transpose((2, 0, 1)) # HWC to CHW
# img *= 0.007843 # 像素值归一化
# yield img
# return reader
# def optimizer_momentum_setting():
# """
# 阶梯型的学习率适合比较大规模的训练数据
# """
# learning_strategy = train_parameters['momentum_strategy']
# batch_size = train_parameters["train_batch_size"]
# iters = train_parameters["image_count"] // batch_size
# lr = learning_strategy['learning_rate']
# boundaries = [i * iters for i in learning_strategy["lr_epochs"]]
# values = [i * lr for i in learning_strategy["lr_decay"]]
# learning_rate = fluid.layers.piecewise_decay(boundaries, values)
# optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
# return optimizer
# def optimizer_rms_setting():
# """
# 阶梯型的学习率适合比较大规模的训练数据
# """
# batch_size = train_parameters["train_batch_size"]
# iters = train_parameters["image_count"] // batch_size
# learning_strategy = train_parameters['rsm_strategy']
# lr = learning_strategy['learning_rate']
# boundaries = [i * iters for i in learning_strategy["lr_epochs"]]
# values = [i * lr for i in learning_strategy["lr_decay"]]
# optimizer = fluid.optimizer.RMSProp(
# learning_rate=fluid.layers.piecewise_decay(boundaries, values))
# return optimizer
# def optimizer_sgd_setting():
# """
# loss下降相对较慢,但是最终效果不错,阶梯型的学习率适合比较大规模的训练数据
# """
# learning_strategy = train_parameters['sgd_strategy']
# batch_size = train_parameters["train_batch_size"]
# iters = train_parameters["image_count"] // batch_size
# lr = learning_strategy['learning_rate']
# boundaries = [i * iters for i in learning_strategy["lr_epochs"]]
# values = [i * lr for i in learning_strategy["lr_decay"]]
# learning_rate = fluid.layers.piecewise_decay(boundaries, values)
# optimizer = fluid.optimizer.SGD(learning_rate=learning_rate)
# return optimizer
# def optimizer_adam_setting():
# """
# 能够比较快速的降低 loss,但是相对后期乏力
# """
# learning_strategy = train_parameters['adam_strategy']
# learning_rate = learning_strategy['learning_rate']
# optimizer = fluid.optimizer.Adam(learning_rate=learning_rate)
# return optimizer
# def load_params(exe, program):
# if train_parameters['continue_train'] and os.path.exists(train_parameters['save_persistable_dir']):
# logger.info('load params from retrain model')
# fluid.io.load_persistables(executor=exe,
# dirname=train_parameters['save_persistable_dir'],
# main_program=program)
# elif train_parameters['pretrained'] and os.path.exists(train_parameters['pretrained_dir']):
# logger.info('load params from pretrained model')
# def if_exist(var):
# return os.path.exists(os.path.join(train_parameters['pretrained_dir'], var.name))
# fluid.io.load_vars(exe, train_parameters['pretrained_dir'], main_program=program,
# predicate=if_exist)
# def train():
# EPOCH_NUM = train_parameters["num_epochs"]
# #定义分别存储Paddle和python模型的训练信息的多维数组, 先存Paddle编写的模型的信息,后存python编写模型的信息
# train_loss_acc = np.empty([2,EPOCH_NUM], dtype = float) #用二维数组分别存储两种模型的train_loss、test_loss
# #定义VisualDL使用的变量。train和test集的loss定义为scalar类型(折线图),weight定义为histogram类型(直方图)
# # log_writter = LogWriter("./log", sync_cycle=1000) #定义日志写入器。参数为日志存储位置和写入硬盘的数据量间隔
# # with log_writter.mode("train") as logger: #变量定义在logger下操作。设置日志写入模式为“train”
# # train_loss_paddle = logger.scalar("Train Loss") #记录paddle模型train集loss
# # train_acc_paddle = logger.scalar("Train Acc") #记录paddle模型train集loss
# train_prog = fluid.Program()
# train_startup = fluid.Program()
# logger.info("create prog success")
# logger.info("train config: %s", str(train_parameters))
# logger.info("build input custom reader and data feeder")
# file_list = os.path.join(train_parameters['data_dir'], "train.txt")
# mode = train_parameters['mode']
# batch_reader = paddle.batch(custom_image_reader(file_list, train_parameters['data_dir'], mode),
# batch_size=train_parameters['train_batch_size'],
# drop_last=True)
# place = fluid.CUDAPlace(0) if train_parameters['use_gpu'] else fluid.CPUPlace()
# # 定义输入数据的占位符
# img = fluid.layers.data(name='img', shape=train_parameters['input_size'], dtype='float32')
# label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
# # 选取不同的网络
# logger.info("build newwork")
# # model = InceptionV4()
# model = ResNet50()
# out = model.net(input=img, class_dim=train_parameters['class_dim'])
# cost = fluid.layers.cross_entropy(out, label)
# avg_cost = fluid.layers.mean(x=cost)
# acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
# # 选取不同的优化器
# # optimizer = optimizer_rms_setting()
# # optimizer = optimizer_momentum_setting()
# # optimizer = optimizer_sgd_setting()
# optimizer = optimizer_adam_setting()
# optimizer.minimize(avg_cost)
# exe = fluid.Executor(place)
# main_program = fluid.default_main_program()
# exe.run(fluid.default_startup_program())
# train_fetch_list = [avg_cost.name, acc_top1.name, out.name]
# load_params(exe, main_program)
# # 训练循环主体
# stop_strategy = train_parameters['early_stop']
# successive_limit = stop_strategy['successive_limit']
# sample_freq = stop_strategy['sample_frequency']
# good_acc1 = stop_strategy['good_acc1']
# successive_count = 0
# stop_train = False
# total_batch_count = 0
# for pass_id in range(train_parameters["num_epochs"]):
# logger.info("current pass: %d, start read image", pass_id)
# batch_id = 0
# for step_id, data in enumerate(batch_reader()):
# t1 = time.time()
# # logger.info("data size:{0}".format(len(data)))
# loss, acc1, pred_ot = exe.run(main_program,
# feed=feeder.feed(data),
# fetch_list=train_fetch_list)
# t2 = time.time()
# batch_id += 1
# total_batch_count += 1
# period = t2 - t1
# loss = np.mean(np.array(loss))
# acc1 = np.mean(np.array(acc1))
# if batch_id % 100 == 0:
# logger.info("Pass {0}, trainbatch {1}, loss {2}, acc1 {3}, time {4}".format(pass_id, batch_id, loss, acc1,
# "%2.2f sec" % period))
# # print("Pass {0}, trainbatch {1}, loss {2}, acc1 {3}, time {4}".format(pass_id, batch_id, loss, acc1,
# # "%2.2f sec" % period))
# # 简单的提前停止策略,认为连续达到某个准确率就可以停止了
# if acc1 >= good_acc1:
# successive_count += 1
# logger.info("current acc1 {0} meets good {1}, successive count {2}".format(acc1, good_acc1, successive_count))
# fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'],
# feeded_var_names=['img'],
# target_vars=[out],
# main_program=main_program,
# executor=exe)
# if successive_count >= successive_limit:
# logger.info("end training")
# stop_train = True
# break
# else:
# successive_count = 0
# # 通用的保存策略,减小意外停止的损失
# if total_batch_count % sample_freq == 0:
# logger.info("temp save {0} batch train result, current acc1 {1}".format(total_batch_count, acc1))
# fluid.io.save_persistables(dirname=train_parameters['save_persistable_dir'],
# main_program=main_program,
# executor=exe)
# train_loss_acc[0][pass_id] = loss
# train_loss_acc[1][pass_id] = acc1
# # #写入Paddle模型的train和test的loss以及模型的权重weight到log文件夹下的对应文件中
# # train_loss_paddle.add_record(pass_id, loss.numpy()[0])
# # train_acc_paddle.add_record(pass_id, acc1.numpy()[0])
# if stop_train:
# break
# logger.info("training till last epcho, end training")
# fluid.io.save_persistables(dirname=train_parameters['save_persistable_dir'],
# main_program=main_program,
# executor=exe)
# fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'],
# feeded_var_names=['img'],
# target_vars=[out],
# main_program=main_program.clone(for_test=True),
# executor=exe)
# init_log_config()
# init_train_parameters()
# train()
# -
# # 模型评估
# +
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# import os
# import numpy as np
# import random
# import time
# import codecs
# import sys
# import functools
# import math
# import paddle
# import paddle.fluid as fluid
# from paddle.fluid import core
# from paddle.fluid.param_attr import ParamAttr
# from PIL import Image, ImageEnhance
# target_size = [3, 224, 224]
# mean_rgb = [127.5, 127.5, 127.5]
# data_dir = "data/"
# eval_file = "eval.txt"
# use_gpu = True
# place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
# exe = fluid.Executor(place)
# save_freeze_dir = "./freeze-model"
# [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=save_freeze_dir, executor=exe)
# # print(fetch_targets)
# def crop_image(img, target_size):
# width, height = img.size
# w_start = (width - target_size[2]) / 2
# h_start = (height - target_size[1]) / 2
# w_end = w_start + target_size[2]
# h_end = h_start + target_size[1]
# img = img.crop((w_start, h_start, w_end, h_end))
# return img
# def resize_img(img, target_size):
# ret = img.resize((target_size[1], target_size[2]), Image.BILINEAR)
# return ret
# def read_image(img_path):
# img = Image.open(img_path)
# if img.mode != 'RGB':
# img = img.convert('RGB')
# img = crop_image(img, target_size)
# img = np.array(img).astype('float32')
# img -= mean_rgb
# img = img.transpose((2, 0, 1)) # HWC to CHW
# img *= 0.007843
# img = img[np.newaxis,:]
# return img
# def infer(image_path):
# tensor_img = read_image(image_path)
# label = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets)
# return np.argmax(label)
# def eval_all():
# eval_file_path = os.path.join(data_dir, eval_file)
# total_count = 0
# right_count = 0
# with codecs.open(eval_file_path, encoding='utf-8') as flist:
# lines = [line.strip() for line in flist]
# t1 = time.time()
# for line in lines:
# total_count += 1
# parts = line.strip().split()
# result = infer(parts[0])
# # print("infer result:{0} answer:{1}".format(result, parts[1]))
# if str(result) == parts[1]:
# right_count += 1
# period = time.time() - t1
# print("total eval count:{0} cost time:{1} predict accuracy:{2}".format(total_count, "%2.2f sec" % period, right_count / total_count))
# if 1:
# eval_all()
# -
#
# 请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法. <br>
# Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions.
|
CNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + cellView="form" id="KQa9t_gadIuR"
#@title Copyright 2022 The Cirq Developers
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="xwec7FrkdFmi"
# # Gate Zoo
# + [markdown] id="5KZia7jmdJ3V"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://quantumai.google/cirq/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/gatezoo.ipynbb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/gatezoo.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="541571c2edcd"
# ## Setup
# Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via `pip install cirq --pre`
# + id="bd9529db1c0b"
try:
import cirq
except ImportError:
print("installing cirq...")
# !pip install --quiet --pre cirq
print("installed cirq.")
import IPython.display as ipd
import cirq
import inspect
def display_gates(*gates):
for gate_name in gates:
ipd.display(ipd.Markdown("---"))
gate = getattr(cirq, gate_name)
ipd.display(ipd.Markdown(f"#### cirq.{gate_name}"))
ipd.display(ipd.Markdown(inspect.cleandoc(gate.__doc__)))
else:
ipd.display(ipd.Markdown("---"))
# + [markdown] id="1cd004cc2f3a"
# Cirq comes with many gates that are standard across quantum computing. This notebook serves as a reference sheet for these gates.
#
# ## Single Qubit Gates
#
#
# ### Gate constants
#
# Cirq defines constants which are gate instances for particular important single qubit gates.
# + id="0c3a029e2155"
display_gates("X", "Y", "Z", "H", "S", "T")
# + [markdown] id="10c855370f45"
# ### Traditional Pauli Rotation Gates
#
# Traditional single qubit rotations expressed in radians of rotation about Pauli axis are defined.
# + id="e96e1c459258"
display_gates("Rx", "Ry", "Rz")
# + [markdown] id="4bfc17ef80bb"
# ### Pauli PowGates
#
# If you think of the `cirq.Z` gate as phasing the state $|1\rangle$ by $-1$, then you might think that the square root of this gate phases the state $|1\rangle$ by $i=\sqrt{-1}$. The `XPowGate`, `YPowGate` and `ZPowGate`s all act in this manner, phasing the state corresponding to their $-1$ eigenvalue by a prescribed amount. This ends up being the same as the `Rx`, `Ry`, and `Rz` up to a global phase.
# + id="0e2ea8a0a0ae"
display_gates("XPowGate", "YPowGate", "ZPowGate")
# + [markdown] id="6631a361ac42"
# ### More Single Qubit Gate
#
# Many quantum computing implementations use qubits whose energy eigenstates for a qubit that is not interacting are the computational basis states. In these cases it is often useful to move `cirq.ZPowGate`'s through other single qubit gates, "phasing" the
# + id="b5ffeefa3c76"
display_gates("PhasedXPowGate", "PhasedXZGate", "HPowGate")
|
docs/gatezoo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow2] *
# language: python
# name: conda-env-tensorflow2-py
# ---
# # Analysis of Covid-19 data
# Load main libraries...
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import re
import sys
import time
import yaml
import dateutil.parser
from datetime import date, datetime, timedelta
from pprint import pprint as pp
import numpy as np
import pandas as pd
from pyathena import connect
from elasticsearch import Elasticsearch
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import ipywidgets as widgets
from IPython.display import display
import matplotlib.dates as mdates
import zipfile
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
sys.path.insert(0, r'..')
# -
# Local libraries
from covid.config import DATA_FOLDER
from covid.graphs import stacked_bars, grouped_bars, multi_graph_bar
# +
# Main data sources
GLOBAL_CONF = 'global-covid-confirmed.csv'
GLOBAL_DEATHS = 'global-covid-deaths.csv'
GLOBAL_REC = 'global-covid-recovered.csv'
POPULATION_DATA = 'global-population.csv'
df_glo_conf = pd.read_csv(DATA_FOLDER.joinpath(GLOBAL_CONF))
df_glo_deaths = pd.read_csv(DATA_FOLDER.joinpath(GLOBAL_DEATHS))
df_glo_rec = pd.read_csv(DATA_FOLDER.joinpath(GLOBAL_REC))
df_population = pd.read_csv(DATA_FOLDER.joinpath(POPULATION_DATA))
print(f"Confirmed shape: {df_glo_conf.shape}")
print(f"Deaths shape: {df_glo_deaths.shape}")
print(f"Recovered shape: {df_glo_rec.shape}")
print(f"Recovered shape: {df_population.shape}")
# -
df_glo_conf.head(5)
df_population.head(5)
# Set the country as index
df_population = df_population.set_index('Country (or dependency)')
def to_numeric(x):
if isinstance(x, str):
if x == 'N.A.':
return np.nan
else:
x = x.replace(',', '')
if '.' in x:
return float(x)
else:
return int(x)
else:
return x
# +
columns_pct = ['Yearly Change', 'Urban Pop %', 'World Share']
columns_to_transform = [[k for k in df_population.columns if k not in columns_pct]]
for c in columns_to_transform:
df_population[c] = df_population[c].applymap(to_numeric)
# +
transform = {
'United States': 'US',
'South Korea': 'Korea, South',
}
df_population.index = [k if k not in transform.keys() else transform[k] for k in df_population.index ]
# -
# #### Create single source of the data for the global analysis
# +
# Capture relevant columns for the analysis...
is_date = re.compile(r'[\d]{1,2}/[\d]{1,2}/[\d]{1,4}')
date_columns = [k for k in df_glo_conf.columns if is_date.match(k)]
columns2extract = ['Country/Region', *date_columns]
print(f"Columns selected: {columns2extract[:2]} ...")
# Transform string dates to datetime...
updated_indexes = [dateutil.parser.parse(k) for k in date_columns]
# +
# Select columns and transpose to date as index
df_global_confirmed = df_glo_conf.loc[:, columns2extract].groupby('Country/Region').sum().transpose()
df_global_deaths = df_glo_deaths.loc[:, columns2extract].groupby('Country/Region').sum().transpose()
df_global_recovered = df_glo_rec.loc[:, columns2extract].groupby('Country/Region').sum().transpose()
# Update indices to datetime format
df_global_confirmed.index = updated_indexes
df_global_deaths.index = updated_indexes
df_global_recovered.index = updated_indexes
# -
df_global_confirmed.tail(3)
# Pack the datasets together
global_dict = {
'confirmed': df_global_confirmed,
'deaths': df_global_deaths,
'recovered': df_global_recovered,
'population': df_population,
}
countries_list = global_dict['confirmed'].columns
def get_country_from_dict(data_dict, country='Spain'):
sections = ['confirmed', 'deaths', 'recovered']
got_population = 'population' in data_dict.keys()
final_data = {}
if country == 'all':
for section in sections:
selection = data_dict[section].sum(axis=1)
selection.name = section
final_data[section] = selection
if got_population:
population = (data_dict['population']['Population (2020)'].sum() / 1e6) # in Mill.
for section in sections:
new_col = section + '_per_mill'
final_data[new_col] = final_data[section] / population
elif country in data_dict[sections[0]].columns:
for section in sections:
selection = data_dict[section].loc[:, country]
selection.name = section
final_data[section] = selection
if got_population:
population = (data_dict['population'].loc[country, 'Population (2020)'] / 1e6) # in Mill.
for section in sections:
new_col = section + '_per_mill'
final_data[new_col] = final_data[section] / population
else:
return pd.DataFrame({k: None for k in sections})
return pd.concat(final_data, axis=1)
data = get_country_from_dict(global_dict)
# Load widgets
from ipywidgets import *
def totals_plot(country, default=None, logScaleY=True, baseline=10):
if default:
country = 'Spain'
colors=['blue', 'red', 'olive']
df = get_country_from_dict(global_dict, country)
df = df[df['confirmed'] > baseline]
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
ax.plot('confirmed', data=df, marker='', color='blue', linewidth=2, label="Confirmed")
ax.plot('deaths', data=df, marker='', color='red', linewidth=2, label="Deaths")
ax.plot('recovered', data=df, marker='', color='olive', linewidth=2, label="Recovered")
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=65)
if logScaleY:
ax.set_yscale('log')
title = f"{country} (Log scale)"
else:
title = f"{country} (Linear scale)"
ax.legend()
ax.set_title(title, fontsize=24);
# Plot totals for selected country. (Deactivate default for other countries)
interact(totals_plot, country=countries_list, default=True, baseline=(0,100));
def totals_x_pop_plot(country, default=None, logScaleY=False, baseline=10):
if default:
country = 'Spain'
columns=['confirmed_per_mill', 'deaths_per_mill', 'recovered_per_mill']
colors=['blue', 'red', 'olive']
df = get_country_from_dict(global_dict, country)
df = df[df['confirmed'] > baseline]
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
for col, color in zip(columns, colors):
ax.plot(col, data=df, marker='', color=color, linewidth=2, label=col.title())
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=65)
if logScaleY:
ax.set_yscale('log')
title = f"{country} Cases Per Million (Log scale)"
else:
title = f"{country} Cases Per Million (Linear scale)"
ax.legend()
ax.set_title(title, fontsize=24);
# Plot totals for selected country. (Deactivate default for other countries)
interact(totals_x_pop_plot, country=countries_list, default=True, baseline=(0,100), logScaleY=True);
def daily_plot(country, default=None, baseline=10):
if default:
country = 'Spain'
df = get_country_from_dict(global_dict, country)
df = df[df['confirmed'] > baseline]
# Get the new cases per day
df = df.diff(1)
# Clean up of negative corrections
df['confirmed'][df['confirmed'] < 0] = -10
column = [('confirmed', 'blue'), ('deaths', 'red'), ('recovered', 'green')]
fig, ax = plt.subplots(figsize=(16, 4), ncols=3) # , sharey='all'
for idx, x in enumerate(ax):
x.bar(df.index, df[column[idx][0]], color=column[idx][1])
x.grid(color='grey', linestyle='-', linewidth=0.5)
x.set_title(f"{country} {column[idx][0]}", fontsize=16);
x.tick_params(axis='x', rotation=75)
x.set_xlabel("Date")
interact(daily_plot, country=countries_list, default=True, baseline=(0,100));
# ## World Totals
df_totals = get_country_from_dict(global_dict, 'all')
df_totals_adj = df_totals.copy()
df_totals_adj['confirmed'] = df_totals_adj.apply(lambda x: x.confirmed - x.deaths - x.recovered, axis=1)
series_labels = ['deaths', 'recovered', 'confirmed']
data = [df_totals_adj.values[:, 1], df_totals_adj.values[:, 2], df_totals_adj.values[:, 0]]
category_labels = [k.strftime("%Y-%d-%m") for k in df_totals_adj.index.tolist()]
stacked_bars(data, series_labels, category_labels=category_labels, orientation='horizontal',
show_values=False, value_format="{:.0f}", plotsize=(14, 24), reverse=True,
colors=['tab:red', 'tab:green', 'tab:blue'],
y_label="Quantity (cases)", rotation=75, title="Global Cases")
df_totals_inc = (100 * df_totals.diff(1) / df_totals.shift(1)).round(decimals=2)[1:]
grouped_bars(df_totals_inc, ['confirmed', 'deaths', 'recovered'], colours=['blue', 'red', 'green'],
title="Global Daily percentual increments", xlabel="Dates", ylabel="% Increment")
multi_graph_bar(df_totals_inc, ['confirmed', 'deaths', 'recovered'], colours=['blue', 'red', 'green'],
title="Global Daily percentual increments", xlabel="Dates", ylabel="% Increment")
# ## Progression from +n cases
# +
# Set parameters
TOP_SELECTED = 20
# Retrieve countries with top number of confirmed cases:
top_countries = global_dict['confirmed'].iloc[-1, :].sort_values(ascending=False)[:TOP_SELECTED].index
# Retrieve data for those countries
df_top = {
'confirmed': df_global_confirmed.loc[:, top_countries],
'deaths': df_global_deaths.loc[:, top_countries],
'recovered': df_global_recovered.loc[:, top_countries],
}
# -
# Normalize starting point as per selected threshold
def normalize_data(df, threshold=50, margin=25):
max_day = 0
processing = {}
recompute = []
removed = []
for country in df.columns:
processing[country] = df[country][df[country] >= threshold]
if processing[country].empty:
del processing[country]
removed.append(country)
continue
if processing[country][0] >= threshold * 2:
recompute.append(country)
new_index = np.arange(len(processing[country].index))
processing[country].index = new_index
if new_index[-1] > max_day:
max_day = new_index[-1]
if recompute:
# Adjust series that may have missing values or start for a very high number
countries_within_threshold = [c for c in df.columns if c not in [*recompute, *removed]]
df_top = pd.concat(processing.values(), axis=1)
for r in recompute:
find_point = processing[r][0]
m = (df_top[countries_within_threshold] > find_point - margin) | \
(df_top[countries_within_threshold] > find_point + margin)
initial_row = m.sum(axis=1).to_numpy().nonzero()[0][0]
processing[r].index = np.arange(initial_row, len(processing[r].index) + initial_row)
return pd.concat(processing.values(), axis=1)
# #### Progression from +50 cases
# +
THRESHOLD = 50
df_normalized_50 = normalize_data(df_top['confirmed'], threshold=THRESHOLD, margin=25)
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
for c in df_normalized_50.columns:
ax.plot(c, data=df_normalized_50, marker='', linewidth=2, linestyle='dashed', label=c)
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=30)
ax.set_xlabel('# days', fontsize=16)
ax.set_ylabel('# confirmed cases (linear)', fontsize=16)
ax.legend()
ax.set_title(f"Progression from first day with +{THRESHOLD} cases (Linear scale)", fontsize=20);
# -
# ## Progression from +1k cases
# +
threshold = int(1e3)
df_normalized_1k = normalize_data(df_top['confirmed'], threshold=threshold, margin=25)
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
for c in df_normalized_1k.columns:
ax.plot(c, data=df_normalized_1k, marker='', linewidth=2, linestyle='dashed', label=c)
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=30)
ax.set_yscale('log')
ax.set_xlabel('# days', fontsize=16)
ax.set_ylabel('# confirmed cases (log)', fontsize=16)
ax.legend()
ax.set_title(f"Progression from first day with +{threshold} cases (Log Scale)", fontsize=20);
# -
cases_array = [10, 50, 100, 500, 1000, 5000]
modes = ['confirmed', 'deaths', 'recovered']
@interact(ncases=cases_array, mode=modes, logScaleY=True)
def country_plot(ncases, mode, logScaleY=True):
df_normalized = normalize_data(df_top[mode], threshold=ncases, margin=25)
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
for c in df_normalized.columns:
ax.plot(c, data=df_normalized, marker='', linewidth=2, linestyle='dashed', label=c)
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=30)
ax.set_xlabel('# days', fontsize=16)
ax.set_ylabel('# confirmed cases (log)', fontsize=16)
ax.legend()
if logScaleY:
ax.set_yscale('log')
scale_str = 'Log scale'
else:
scale_str = 'Linear scale'
ax.set_title(f"Progression: {mode} cases from 1st day with +{ncases} cases ({scale_str})", fontsize=20);
# ### Progression in Cases vs Millions of Users
# +
def apply_population(val, popul):
if np.isnan(val) or (popul is None):
return val
elif val > 0:
return val / popul
else:
return val
def cases_x_million(df, global_data):
df = df.copy()
countries = df.columns
for cou in countries:
popul = global_data.loc[cou, 'Population (2020)']/1e6 if c in global_data.index else None
df[cou] = df[cou].apply(lambda x: apply_population(x, popul))
return df
# -
cases_x_mill_array = [5, 50, 100, 500]
# Normalize starting point as per selected threshold
def normalize_data(df, threshold=50, margin=25):
max_day = 0
processing = {}
recompute = []
removed = []
for country in df.columns:
processing[country] = df[country][df[country] >= threshold]
if processing[country].empty:
del processing[country]
removed.append(country)
continue
elif processing[country][0] >= threshold * 2:
recompute.append(country)
new_index = np.arange(len(processing[country].index))
processing[country].index = new_index
if new_index[-1] > max_day:
max_day = new_index[-1]
if recompute:
# Adjust series that may have missing values or start for a very high number
countries_within_threshold = [c for c in df.columns if c not in [*recompute, *removed]]
df_top = pd.concat(processing.values(), axis=1)
for r in recompute:
find_point = processing[r][0]
m = (df_top[countries_within_threshold] > find_point - margin) | \
(df_top[countries_within_threshold] > find_point + margin)
initial_row = m.sum(axis=1).to_numpy().nonzero()[0][0]
processing[r].index = np.arange(initial_row, len(processing[r].index) + initial_row)
return pd.concat(processing.values(), axis=1)
@interact(ncases=cases_x_mill_array, mode=modes, logScaleY=True)
def country_plot(ncases, mode, logScaleY=True):
df_normalized = normalize_data(cases_x_million(df_top[mode], df_population),
threshold=ncases,
margin=5)
fig, ax = plt.subplots(figsize=(16, 8), ncols=1)
for c in df_normalized.columns:
ax.plot(c, data=df_normalized, marker='', linewidth=2, linestyle='dashed', label=c)
ax.grid(color='grey', linestyle='-', linewidth=0.5)
ax.tick_params(axis='x', rotation=30)
ax.set_xlabel('# days', fontsize=16)
ax.set_ylabel('# confirmed cases (log)', fontsize=16)
ax.legend()
if logScaleY:
ax.set_yscale('log')
scale_str = 'Log scale'
else:
scale_str = 'Linear scale'
ax.set_title(f"Progression: {mode} cases from 1st day with +{ncases} Cases Per Million({scale_str})",
fontsize=20);
|
notebooks/global_covid-19_graphs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/deepanshupant2002/zee-media-internship/blob/main/first_attempt.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="N33dy12Y5gyM" outputId="5f550cd9-0bea-4f9a-e152-fbf1b90a2560"
import moviepy.editor as mp
clip = mp.VideoFileClip("Imran Khans PTI Party Admits Pakistan Terrorists Carried out 2611 Terror Attack on India.3gpp")
clip.audio.write_audiofile(r"audio_full.wav")
# + id="P3TFz4M-6Q30" colab={"base_uri": "https://localhost:8080/"} outputId="a7944000-badc-43bd-8c28-718b55062eed"
pip install pydub
# + colab={"base_uri": "https://localhost:8080/"} id="8jnFUkA5JMLF" outputId="18e81a6e-61ba-4b55-a559-a9381eca56cc"
pip install transformers
# + colab={"base_uri": "https://localhost:8080/"} id="9zT1zf615jST" outputId="3698be99-533e-4ce1-d0ef-79f1d61ebc63"
from pydub import AudioSegment
mp3_audio = AudioSegment.from_file(r"audio_full.wav", format="wav")
print(len(mp3_audio)/(1000*60))
# 12 Minutes audio breaks into 3 minutes 4 audio files (slicingis done by milliseconds)
counter_audio = 180
split_audio = [mp3_audio[:180*1000]]
for i in range(4):
split_audio.append(mp3_audio[counter_audio*1000:(counter_audio+180)*1000])
counter_audio += 180
count = 0
# # lets save it!
for count, audio_object in enumerate(split_audio):
count += 1
with open(f"{count}_audi_file.wav", 'wb') as out_f:
audio_object.export(out_f, format='wav')
# + colab={"base_uri": "https://localhost:8080/"} id="Wz1Xwrmo6Bhx" outputId="849d109a-acfb-46f5-ffb6-28a6f636f909"
import librosa
import torch
import transformers
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer,Wav2Vec2Processor
# load model and tokenizer
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
# model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
# processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio.
# When using the model make sure that your speech input is also sampled at 16Khz.
# load any audio file of your choice
collection_of_text = []
for i in range(4):
speech, rate = librosa.load(f"1_audi_file.wav", sr=16000)
input_values = tokenizer(speech, return_tensors='pt').input_values
# Store logits (non-normalized predictions)
with torch.no_grad():
logits = model(input_values).logits
# Store predicted id's
predicted_ids = torch.argmax(logits, dim=-1)
# decode the audio to generate text
# Passing the prediction to the tokenzer decode to get the transcription
transcription = tokenizer.batch_decode(predicted_ids)[0]
# transcriptions = tokenizer.decode(predicted_ids[0])
print(transcription)
collection_of_text.append(transcription)
print(collection_of_text)
final_complete_speech = ""
# convert batch of text into one complete sentence
for i in collection_of_text:
final_complete_speech += i
print(final_complete_speech)
# + [markdown] id="K0FDeGiplDeG"
# # New Section
# + [markdown] id="5xP9u8vtlD6k"
# # New Section
# + id="uaKOivRgJYIq"
|
first_attempt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %load_ext autoreload
# %autoreload 2
import ib_insync
print(ib_insync.__all__)
import helpers.hdbg as dbg
import helpers.hprint as pri
import core.explore as exp
import im.ib.data.extract.gateway.utils as ibutils
# %%
ib = ibutils.ib_connect(client_id=33, is_notebook=True)
# %%
# Look for ES.
#symbol = "ES"
symbol = "NG"
#symbol = "CL"
contract = ib_insync.Future(symbol, includeExpired=True)
df = ibutils.get_contract_details(ib, contract, simplify_df=False)
display(df)
# cds = ib.reqContractDetails(contract)
# contracts = [cd.contract for cd in cds]
# ib_insync.util.df(contracts)
# %%
df
# %%
#df.reset_index(drop=True)
# %%
import copy
# %%
def create_contracts(ib, contract, symbols):
contracts = []
for symbol in symbols:
contract_tmp = copy.copy(contract)
contract_tmp.symbol = symbol
#ib.qualifyContracts(contract_tmp)
contracts.append(contract_tmp)
return contracts
contract = ib_insync.Future(symbol, includeExpired=True)
symbols = "ES CL NG".split()
create_contracts(ib, contract, symbols)
# %%
contract2.symbol = "E"
# %%
import im.ib.data.extract.gateway.metadata as ibmetadata
file_name = "./metadata.csv"
ibmeta = ibmetadata.IbMetadata()
ibmeta.update(ib, [contract], file_name, reset=True)
# %%
ibmeta.load(file_name)
# %%
|
im/ib/data/extract/gateway/notebooks/AmpTask1086_Get_metadata_for_futures_universe.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import random
import torchvision
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import argparse,os,time
import os
import time
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
num_gpus=4
data = pd.read_csv('train_augmented.csv', index_col=0)
val_data=pd.read_csv('val_augmented.csv', index_col=0)
y_data=data["digit"].values
y_data_val=val_data["digit"].values
y_data.shape
x_data=data.loc[:,"0":"783"].values
x_data_val=val_data.loc[:,"0":"783"].values
x_data_train=x_data
x_data_train.shape
x_data_test=x_data_val
x_data_test.shape
x_data_train=x_data_train/x_data_train.max()
x_data_test=x_data_test/x_data_test.max()
x_data_train.shape
class CustomDataset(Dataset):
def __init__(self,x_dat,y_dat):
x = x_dat
y = y_dat
self.len = x.shape[0]
y=y.astype('int')
x=x.astype('float32')
self.x_data = torch.tensor(x)
self.y_data = torch.tensor(y)
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
batch_size=256
train_dataset = CustomDataset(x_data_train,y_data)
train_loader = DataLoader(dataset=train_dataset,pin_memory=True,
batch_size=batch_size,
shuffle=True,
num_workers=60,drop_last=True)
test_dataset = CustomDataset(x_data_test,y_data_val)
test_loader = DataLoader(dataset=test_dataset,pin_memory=True,
batch_size=batch_size,
shuffle=True,
num_workers=60,drop_last=True)
# +
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# +
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
# -
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = x.view(batch_size,1,28,28)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(batch_size,-1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
model = ResNet(block=Bottleneck, layers=[3, 8, 36, 3])
model=model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),weight_decay=0.001)
# +
trn_loss_list = []
val_loss_list = []
total_epoch=200
model_char="res152"
model_name=""
patience=5
start_early_stop_check=0
saving_start_epoch=10
for epoch in range(total_epoch):
trn_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
if torch.cuda.is_available():
inputs=inputs.cuda()
labels=labels.cuda()
# grad init
optimizer.zero_grad()
# forward propagation
output= model(inputs)
# calculate loss
loss=criterion(output, labels)
# back propagation
loss.backward()
# weight update
optimizer.step()
# trn_loss summary
trn_loss += loss.item()
# del (memory issue)
del loss
del output
with torch.no_grad():
val_loss = 0.0
mis_match = 0
for j, val in enumerate(test_loader):
val_x, val_label = val
if torch.cuda.is_available():
val_x = val_x.cuda()
val_label =val_label.cuda()
val_output = model(val_x)
v_loss = criterion(val_output, val_label)
val_loss += v_loss
_, predicted=torch.max(val_output,1)
mis_match+=np.count_nonzero(predicted.cpu().detach()==val_label.cpu().detach())
del val_output
del v_loss
del predicted
trn_loss_list.append(trn_loss/len(train_loader))
val_loss_list.append(val_loss/len(test_loader))
val_acc=mis_match/(len(test_loader)*batch_size)
now = time.localtime()
print ("%04d/%02d/%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec))
print("epoch: {}/{} | trn loss: {:.4f} | val loss: {:.4f} | val accuracy: {:.4f}% \n".format(
epoch+1, total_epoch, trn_loss / len(train_loader), val_loss / len(test_loader), val_acc*100
))
if epoch+1>2:
if val_loss_list[-1]>val_loss_list[-2]:
start_early_stop_check=1
else:
val_loss_min=val_loss_list[-1]
if start_early_stop_check:
early_stop_temp=val_loss_list[-patience:]
if all(early_stop_temp[i]<early_stop_temp[i+1] for i in range (len(early_stop_temp)-1)):
print("Early stop!")
break
if epoch+1>saving_start_epoch:
if val_loss_list[-1]<val_loss_min:
if os.path.isfile(model_name):
os.remove(model_name)
val_loss_min=val_loss_list[-1]
model_name="RESNET_"+model_char+"_{:.3f}".format(val_loss_min)
torch.save(model, model_name)
print("Model replaced and saved as ",model_name)
# -
fin_name="RESNET_fin"
torch.save(model, fin_name)
print("Fin model saved", fin_name)
|
4-2.RESNET.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Matplotlib
#
# Matplotlib ist ein Python-Modul zur Erzeugung grafischer Darstellungen. Es nutzt die Array-Struktur von NumPy und eignet sich daher sehr gut für die Visualisierung von sehr großen Datenmengen. In der Matplotlib-Bibliothek sind viele veschiedene 2D- und 3D-Grafiken enthalten
#
# Mehr zu Matplotlib auf der offiziellen Website: http://matplotlib.org/
# ### Download von Matplotlib
# nicht starten, da Matplotlib bereits installiert wurde und die notwendigen Rechte fehlen
# !pip3 install matplotlib
# ## Anmerkungen
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Berechnung der Nullstelle
t = np.arange(0.0, 6.4, 0.1)
y = 5*np.sin(t)
plt.plot(t,y,"ro-", linewidth=2)
plt.title("Sinus-Funktion")
plt.text(1.5, 5.2, "Maximum")
plt.text(4.5, -5.5, "Minimum")
plt.annotate("Nulldurchgang", xy=(3.2, 0), xytext=(5, 5), arrowprops={"facecolor":"b"})
plt.show()
# !pip install sympy
from sympy import *
x = symbols("x")
solve(x**2-4*x+13)
solve(x**2+(1*I)*x+(6+18*I), x)
x, y = symbols("x y", real=True)
z = x + y * I
solve(z**2+(1+I)*z+(6+18*I), (x, y))
# ## Weitere Diagrammtypen
# ### Balkendiagramme
#
# x-Achsenbezeichnung als Integers setzen.
quartal = np.arange(1, 5, 1) # [1, 2, 3, 4]
umsatz = [120000,154000,133000,198000]
plt.bar(quartal, umsatz, width=0.5, align="center") # standardmaessig ist die x-Achse auf Float gesetzt
plt.xlabel('Quartal des Jahres 2020')
plt.ylabel('Umsatz')
plt.title('Umsatzentwicklung in 2020')
plt.grid(True)
plt.axis([0, 5, 0, 200000])
plt.show()
# ## Trennung der Tausender
# ### Variante A
# Lediglich innerhalb der `.py`-Datei
value = 10000.12
f'{value:,}'
# ### Variante B
import locale
# Amerikanische Trennung
locale.setlocale(locale.LC_ALL, 'en_US')
locale.format_string("%d", value, grouping=True)
# Deutsche Trennung
value_1 = 10000.3
locale.setlocale(locale.LC_ALL, 'de_DE')
locale.format_string("%f", value_1, grouping=True)
value_2 = 1234567
print(value_2)
|
Lektion_12/fragen_lektion_12.ipynb
|
# ##### Copyright 2020 The OR-Tools Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # bus_schedule
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/bus_schedule.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/bus_schedule.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Bus scheduling in Google CP Solver.
Problem from Taha "Introduction to Operations Research", page 58.
This is a slightly more general model than Taha's.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/bus_scheduling.mzn
* Comet : http://www.hakank.org/comet/bus_schedule.co
* ECLiPSe : http://www.hakank.org/eclipse/bus_schedule.ecl
* Gecode : http://www.hakank.org/gecode/bus_schedule.cpp
* Tailor/Essence' : http://www.hakank.org/tailor/bus_schedule.eprime
* SICStus: http://hakank.org/sicstus/bus_schedule.pl
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver("Bus scheduling")
# data
time_slots = 6
demands = [8, 10, 7, 12, 4, 4]
max_num = sum(demands)
# declare variables
x = [solver.IntVar(0, max_num, "x%i" % i) for i in range(time_slots)]
num_buses = solver.IntVar(0, max_num, "num_buses")
#
# constraints
#
solver.Add(num_buses == solver.Sum(x))
# Meet the demands for this and the next time slot
for i in range(time_slots - 1):
solver.Add(x[i] + x[i + 1] >= demands[i])
# The demand "around the clock"
solver.Add(x[time_slots - 1] + x[0] == demands[time_slots - 1])
if num_buses_check > 0:
solver.Add(num_buses == num_buses_check)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(num_buses)
collector = solver.AllSolutionCollector(solution)
cargs = [collector]
# objective
if num_buses_check == 0:
objective = solver.Minimize(num_buses, 1)
cargs.extend([objective])
solver.Solve(
solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
cargs)
num_solutions = collector.SolutionCount()
num_buses_check_value = 0
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(len(x))], end=" ")
num_buses_check_value = collector.Value(s, num_buses)
print(" num_buses:", num_buses_check_value)
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
print()
if num_buses_check == 0:
return num_buses_check_value
|
examples/notebook/contrib/bus_schedule.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
ad = pd.read_csv('advertising.csv')
ad.head()
ad = (ad - ad.mean())/ad.std()
ad.head()
# Putting feature variable to X
X = ad[['TV','Radio','Newspaper']]
# Putting response variable to y
y = ad['Sales']
X['intercept'] = 1
X = X.reindex(['intercept','TV','Radio','Newspaper'], axis=1)
X.head()
import numpy as np
X = np.array(X)
y = np.array(y)
# Theta needed to be changed with the number of response varaible used.
theta = np.matrix(np.array([0,0,0,0]))
alpha = 0.01
iterations = 1000
# +
import numpy as np
def compute_cost(X, y, theta):
return np.sum(np.square(np.matmul(X, theta) - y)) / (2 * len(y))
# -
def gradient_descent_multi(X, y, theta, alpha, iterations):
theta = np.zeros(X.shape[1])
m = len(X)
gdm_df = pd.DataFrame( columns = ['Bets','cost'])
for i in range(iterations):
gradient = (1/m) * np.matmul(X.T, np.matmul(X, theta) - y)
theta = theta - alpha * gradient
cost = compute_cost(X, y, theta)
gdm_df.loc[i] = [theta,cost]
return gdm_df
gradient_descent_multi(X, y, theta, alpha, iterations)
print(gradient_descent_multi(X, y, theta, alpha, iterations).values[999])
gradient_descent_multi(X, y, theta, alpha, iterations).reset_index().plot.line(x='index', y=['cost'])
# +
# import LinearRegression from sklearn
from sklearn.linear_model import LinearRegression
# Representing LinearRegression as lr(Creating LinearRegression Object)
lr = LinearRegression()
#You don't need to specify an object to save the result because 'lr' will take the results of the fitted model.
lr.fit(X, y)
# -
print(lr.intercept_)
print(lr.coef_)
|
7. Machine Learning/1. Gradient Descent/Practice/.ipynb_checkpoints/Solution-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Assume we use a neural network in a regression problem, which produces a set of predictions $\hat y$. Let's say that instead of the conventional MSE, we use correlation as the loss to optimize. We further assume that our signal (our $y$) is a time-series, and that it's long, so that it will be split up into several batches. Unfortunately, correlation does not work well in this situation because it's not additive over batches, unlike the MSE.
#
# Let's derive a metric that has some of the same properties as correlation, but without the problems. Our first step is to reduce our sights; rather than emulating the full set of properties of correlation, let's try to match those of a related similarity function, cosine similarity. Desirable properties of this similarity function $L(y, \hat y)$ are:
#
# 1. Invariant to scaling, i.e. for $\beta$, $\gamma$ scalar, $L(y, \hat y) = L(\beta y, \hat y) = L(y, \gamma \hat y)$
# 2. Max of 1. $L(y, y) = 1$
# 3. Equal to 0 for orthogonal vectors. $L(y, \hat y) = 0$ if $<y, \hat y> = 0$.
# 4. (not part of cosine similarity, but important for us): additive over examples.
#
# Consider the metric:
#
# $$L(y, \hat y) = 1 - \arg \min_\alpha ||y - \alpha \hat y||^2_2 / ||y||_2^2$$
#
# Let's show each property holds:
#
# 1. Invariance to scaling with respect to the first argument is clear from the division by the L2 norm of y. The metric is also invariant to scaling in the second argument, via the free $\alpha$.
# 2. By substitution.
# 3. $||y - \alpha \hat y||^2_2$ expands to $\sum_i y_i^2 - 2 \alpha y_i \hat y_i + \alpha^2 \hat y_i^2$; the second term is zero by orthogonality, therefore the minimum lies at $\alpha = 0$, and L is 0.
# 4. We can rewrite the equation as:
#
# $$L(y, \hat y) = \arg \max_ \alpha {1 - 1 / ||y||_2^2 \sum_i (y_i - \alpha \hat y_i)^2}$$
#
# Thus, assuming we have an estimate of the optimal $\alpha$, via, for example, stochastic gradient descent, and that we've cached the value of $||y||_2$, we can estimate the contribution of a single example to the loss using only local information.
|
Block-wise cosine distance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The final project
#
# There is no one way to do all of this, but if you feel as though you can go further, you probably can, and you should.
#
# The project will have several deliverable stages to keep us on track, according to the [schedule](../about/schedule). _Changes to that master schedule supercede any dates below._
#
# 1. 15% **Project proposal**. Discussed [below](#initial-proposals)
# 2. 5% **Project proposal (final revision)**. Discussed [below](#final-proposals).
# 3. 20% **Project status report**. Discussed [below](#project-status-report).
# 4. 45% **Written component - report and presentation files**. Discussed [below](#report-and-presentation-files).
# 5. 15% **Presentations**. Discussed [below](#report-and-presentation-files).
#
# ## A note on ambition
#
# Ambition will be considered when grading the written component and results.
#
# The goal of the project is not to simply take a pre-cleaned dataset and run a basic analysis. You should collect data, from one or many sources, and combine them into a usable, clean dataset. Consider the depth of your analysis as you gather and clean this data.
#
# As a silly example: Are you just gathering data about the height and weight of a sample of individuals and looking at the correlation of these two variables? That level is not sufficient for the end goal of this project. You would be better off using this data to predict the gender of an individual, given height and weight entered by a user, and using the data to make these predictions.
#
# Always ask yourself if you can take your analysis one step further. If you answered yes, go for it! Use your data to extract information, gather this information, and draw conclusions.
#
# ## Initial proposals
#
# - General idea: The question are you interested in, the data you need to acquire, the variables you'll use, and the plan for how you'll analyze it (what methods you'll try and why you think they apply to your problem), considerations about how data might impact that.
# - Treat this document as if it is public facing, and a proposal for which you would like research funding. That is, the proposal document should be polished (both in visual formatting and editing) for external audiences.
# - Graded on: question viability, creativity, finance application, plan sketch, writing quality.
# - Instructions for the proposals are [here](project_prop_template).
#
# ## Final proposals
#
# - Graded on: The improvement from the prior version, how feedback was incorporated, and current status
#
# ## Project status report
#
# - General idea: You've now acquired the key data and finished most of the data cleaning.
# - Purpose: Needs to show progress and that you're on track!
# - _Ideal deliverable_: A notebook file with nice data sections describing data source(s) and how you got/cleaned the data. This section could go straight into your final report if it's polished enough.
# - **Actual deliverable** A notebook file that
# - describes (short bullet points) your data sources,
# - outlines (numbered list, broad steps, not minutia) how you acquired the data (for many groups, the downloading is in a separate file), got the data into python, and if you found any issues with the data you cleaned up (again, possibly a different file)
# - includes a bullet point list of your main observations from your EDA
# - shows your exploratory data analysis (EDA) (tables and figures and whatnot, does not need to be pretty or formatted)
#
# ## Report and Presentation Files
#
# On the due date (listed in the schedule), your repo should be cleaned and polished for publication. That means it should be cleaned of excess and random files, and that folders are sensible (data, temporary, code), the readme helps me/the TA/future visitors explore your repo easily. Your folder structure is up to you and will respond to the nature of your particular project, but I should be able to easily find
# - Your final report
# - Your presentation file
# - The code used to scrape and download data (and if you click-and-download anything, a link to the source) can be separate files, and the code used to load, clean, merge, and explore the data.
#
# ```{dropdown} **What should my final report jupyter file look like?**
#
# - Your file should look like a nice report as if built in word, except that it also contains code to produce tables and figures. _(Some students are in the habit of submitting jupyter files that have no visibile output. Don't do that here! We should be able to see the results in the file!)_
# - The code itself should be minimal and clean.
#
# The structure of any two reports can vary as is dictated by their purpose, goals, and steps. But, it should contain:
# - Start with a summary (goal and main findings in one paragraph)
# - An introduction section that motivates why your project is interesting and outlines your approach
# - A data section that describes your data sources, the steps you used to acquire and clean the data (a good idea: refer to your code file that does all this), and some of the key insights and observations from your EDA
# - An analysis section that explains and shows your steps leading to the main results and tables.
# - Take care to explain why figures and tables lead you to your conclusions.
# - Make sure to also carefully describe the methods you are using and why they are appropriate for the question to be answered.
# - Summarize and interpret your results (including visualization).
# - Provide an evaluation of your approach and discuss any limitations of the methods you used.
# - Describe any surprising discoveries that you made and future work.
# - A short 1-2 paragraph conclusion.
#
# Obvious caveats for grading: Form matters, check grammar, and cite work you build on. **Plagarism is not acceptable.**
# ```
#
# ```{dropdown} **What should the presentation file look like?**
#
# You can present a powerpoint, a jupyter file, or [jupyter slides](https://medium.com/@mjspeck/presenting-code-using-jupyter-notebook-slides-a8a3c3b59d67) (nice!). I'll leave it up to your group to present in the manner you consider most effective for your project.
#
# Each group will have 15 minutes to present your project, so build your presentation file accordingly. Try to avoid "speed talking" to make the time work. Less is more, usually. Sadly, 15 minutes won't be enough to show everything you did, so focus on big picture details rather than on the syntax of line 89 of your code.
#
# A presentation's structure is tailored even more to its material than a report is, so what your slides show is up to you. Be creative, and have fun. Try to convey to myself and your peers why the question is interesting, describe plainly your approach and why it approach makes sense, what your main analytical findings are, and what you concluded from the exercise. Don't be afraid to "market yourselves": If you didn't something impressive (tons and tons of data, or an impressive scraper, or a great model), work that in (but tastefully).
#
# Obvious caveats for grading: Form matters, check grammar, and cite work you build on. **Plagarism is not acceptable.**
#
# ```
#
# ```{dropdown} **Presentations**
#
# I'll discuss scheduling later.
#
# - You have 15 minutes
# - Everyone should contribute
# - There will be Q&A (from myself)
# - Teach your classmates and myself something! Strive for clarity and making something about it memorable.
#
# ```
|
content/assignments/project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Static Checking via Metaclasses
# + [markdown] slideshow={"slide_type": "slide"}
# Suppose you are maintaining a domain-specific machine learning library. Users of the library's API expect that every machine learning algorithm offered by the API will have the same interface (*i.e.*, the same methods with the same signatures) regardless of its underlying implementation. You would like to allow a community of contributors to define new algorithms that can be added to the library, but you would like to reduce your own effort and that of contributors when it comes to validating that a new algorithm conforms to the API.
# + [markdown] slideshow={"slide_type": "slide"}
# Python [metaclasses](https://docs.python.org/3/reference/datamodel.html#metaclasses) are the underlying, higher-order constructs that instantiate class definitions. Understanding what metaclasses are and how they can be used gives you a significant amount of control over what happens when a new class is introduced by users. This in turn allows you to constrain users when necessary *and* to provide assistance to users that can save them time and effort.
# + [markdown] slideshow={"slide_type": "slide"}
# ## How Classes are Made
# + [markdown] slideshow={"slide_type": "slide"}
# In Python, functions, classes, objects, and values are all on an equal footing. One consequence of this is that it is possible to pass any of these entities as arguments to functions and to return any of these entities as the result of a function (this fact was discussed in [another article](https://python.supply/higher-order-functions-and-decorators) that covered Python decorators). But this also means that much of the syntax you normally use is actually just syntactic sugar for function calls.
# + [markdown] slideshow={"slide_type": "slide"}
# What happens when the Python interpreter executes a class definition such as the one below?
# + slideshow={"slide_type": "skip"}
class Document():
def __init__(self):
self.is_document = True
# + [markdown] slideshow={"slide_type": "slide"}
# The *class* (*not* an instance or object of that class, but *the class itself*) is created and assigned to a variable that is in the scope. In the example above, that variable is `Document`.
# + slideshow={"slide_type": "skip"}
Document
# + [markdown] slideshow={"slide_type": "slide"}
# Python's built-in [`type`](https://docs.python.org/3/library/functions.html#type) function actually serves a number of purposes beyond determining the type of a value. Given a few additional parameters, the `type` function can be used to *define a new class*. Executing the statement in the example below is equivalent to executing the class definition for `Document` above.
# + slideshow={"slide_type": "skip"}
def __init__(self):
self.is_document = True
Document = type('Document', (), {'__init__': __init__})
# + [markdown] slideshow={"slide_type": "slide"}
# Now that `Document` is a class, it is possible to create objects of this class.
# -
d = Document()
d.is_document
# + [markdown] slideshow={"slide_type": "slide"}
# ## How Metaclasses are Made
# + [markdown] slideshow={"slide_type": "slide"}
# In a manner similar to that of many programmaing languages that support the [object-oriented programming](https://en.wikipedia.org/wiki/Object-oriented_programming) paradigm, Python allows programmers to define *derived* classes that [inherit](https://docs.python.org/3/tutorial/classes.html#inheritance) the attributes and methods of a base class. The example below illustrates this by defining a class `Passport` that is derived from the `Document` class. Notice that the base class constructor `Document` is specified in the class definition.
# -
class Passport(Document):
pass
# + [markdown] slideshow={"slide_type": "slide"}
# The `Passport` class inherits the attributes of the `Document` class. The example below illustrates that it inherits the `__init__` method of the `Document` class.
# -
p = Passport()
p.is_document
# + [markdown] slideshow={"slide_type": "slide"}
# The example in which `Document` was defined using the built-in `type` function suggests that `type` can be viewed (at least as a loose analogy) as a means for creating classes. In a way, it behaves like a constructor for the "class of all possible classes". Thus, if `type` is a kind of constructor for a class, it should be possible to use it in the same context as any other class constructor. But what should this mean? What is `MetaClass` in the example below?
# -
class MetaClass(type):
pass
# + [markdown] slideshow={"slide_type": "slide"}
# Following the analogy to its logical conclusion, this must mean that `MetaClass` has inherited the capabilities of `type`. And, indeed, it has. In the example below, `MetaClass` is used to define a new class in the same way that `type` was used before.
# -
Document = MetaClass('Document', (), {'__init__': __init__})
d = Document()
d.is_document
# + [markdown] slideshow={"slide_type": "slide"}
# The ability to use a metaclass in place of `type` as in the above example is also supported by the more common `class` syntactic construct.
# -
class Document(metaclass=MetaClass):
def __init__(self):
self.is_document = True
# + [markdown] slideshow={"slide_type": "slide"}
# ## Using Metaclasses to Enforce an API
# + [markdown] slideshow={"slide_type": "slide"}
# Returning to the motivating example from the first paragraph, suppose you introduce a metaclass called `MetaAlgorithm` for machine learning algorithms that is derived from `type`. This metaclass definition can override the method [`__new__`](https://docs.python.org/3/reference/datamodel.html?highlight=__new__#object.__new__) that is normally invoked when a new class is defined using `type` (or using the equivalent `class` syntactic construct). This alternate definition of `__new__` performs some additional checks before the class is actually created. In this use case, that additional work involves validating that the class being defined (corresponding to a new machine learning algorithm) conforms to your API.
# +
from types import FunctionType
class MetaAlgorithm(type):
def __new__(cls, clsname, bases, attrs):
# The base class does not need to conform to the API.
# See the paragraph below for an explanation of this check.
if clsname != 'Algorithm':
# Check that the programmer-defined
# class has a contributor string.
if 'contributor' not in attrs or\
not isinstance(attrs['contributor'], str):
raise RuntimeError('missing contributor')
# Check that the programmer-defined class has the
# methods required for your API.
if 'train' not in attrs or\
not isinstance(attrs['train'], FunctionType):
raise RuntimeError('missing training method')
if 'classify' not in attrs or\
not isinstance(attrs['classify'], FunctionType):
raise RuntimeError('missing classification method')
return\
super(MetaAlgorithm, cls)\
.__new__(cls, clsname, bases, attrs)
# + [markdown] slideshow={"slide_type": "slide"}
# Now that there is a way to define new classes, there are two ways to proceed. One approach is to require that all algorithm classes that contributors implement must include the `metaclass=MetaAlgorithm` parameter in the class definition. However, this is easy for a contributor to forget and also may require that contributors have a solid understanding of metaclasses. An alternative is to create a base class from which all contributed algorithm classes must be derived.
# -
class Algorithm(metaclass=MetaAlgorithm):
pass
# + [markdown] slideshow={"slide_type": "slide"}
# Using this approach, it is sufficient to export the `Algorithm` base class and to inform all contributors that their classes must be derived from this base class. The example below illustrates how a contributor might do so for a very basic algorithm.
# -
class Guess(Algorithm):
contributor = "Author"
def train(items, labels):
pass
def classify(item):
import random
return random.choice([True, False])
# + [markdown] slideshow={"slide_type": "slide"}
# As the example below illustrates, an attempt by a user to define a class that does not conform to the API results in an error.
# -
try:
class Guess(Algorithm):
def classify(item):
return False
except RuntimeError as error:
print("RuntimeError:", str(error))
# + [markdown] slideshow={"slide_type": "slide"}
# To emphasize: the error above occurs when the Python interpreter tries to execute the *definition* of the class, and *not* when an object of the class is created. It would be impossible to reach the point at which the interpreter attempts to create an object of this class because the class itself can never be defined.
# + [markdown] slideshow={"slide_type": "slide"}
# Despite the fact that Python does not technically support static checking beyond ensuring that the syntax of a module is correct, it is arguably justifiable to say that what `MetaAlgorithm` does is a form of static checking. In many routine scenarios, the checks would be performed at the time that module is imported and before any other code has had a chance to run.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Further Reading
# + [markdown] slideshow={"slide_type": "slide"}
# This article reviewed how user-defined classes are defined in Python and how the mechanism for creating classes can itself be customized. The built-in [types](https://docs.python.org/3/library/types.html#module-types) library provides a number of additional methods that can assist in the dynamic creation of new types and classes. The motivating example in this article illustrated how these capabilities can be used to perform a form of [static analysis](https://en.wikipedia.org/wiki/Static_program_analysis) of user-defined classes.
# + [markdown] slideshow={"slide_type": "slide"}
# It is worth noting that the approach presented in this article is compatible with the methods for checking type annotations and unit testing functions presented in the article on [type annotations](https://python.supply/advantages-of-type-annotations/). For example, it would be straightforward to require that the training and classification method definitions include annotations specifying the exact types of the data that they can handle. It would even be possible to test the methods by generating test cases having the appropriate types. Another observation is that the motivating use case in this article can also be solved by using techniques presented in other articles, such as by applying [decorators](https://python.supply/higher-order-functions-and-decorators/) to class definitions or by performing a [static analysis](https://python.supply/analyzing-and-transforming-abstract-syntax/) of the class definition itself.
|
static-checking-via-metaclasses.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Extract barrier island metrics along transects
#
# Author: <NAME>, <EMAIL>
#
# ***
#
# Extract barrier island metrics along transects for Barrier Island Geomorphology Bayesian Network. See the project [README](https://github.com/esturdivant-usgs/BI-geomorph-extraction/blob/master/README.md) and the Methods Report (Zeigler et al., in review).
#
#
# ## Pre-requisites:
# - All the input layers (transects, shoreline, etc.) must be ready. This is performed with the notebook file prepper.ipynb.
# - The files servars.py and configmap.py may need to be updated for the current dataset.
#
# ## Notes:
# - This notebook includes interactive quality checking, which requires the user's attention. For thorough QC'ing, we recommend displaying the layers in ArcGIS, especially to confirm the integrity of values for variables such as distance to inlet (__Dist2Inlet__) and widths of the landmass (__WidthPart__, etc.).
#
#
# ***
#
# ## Import modules
import os
import sys
import pandas as pd
import numpy as np
import io
import arcpy
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
import core.functions_warcpy as fwa
import core.functions as fun
# ### Initialize variables
#
# This cell prompts you for the site, year, and project directory path. `setvars.py` retrieves the pre-determined values for that site in that year from `configmap.py`. The project directory will be used to set up your workspace. It's hidden for security – sorry! I recommend that you type the path somewhere and paste it in.
from core.setvars import *
# Change the filename variables to match your local files. They should be in an Esri file geodatabase named site+year.gdb in your project directory, which you input above and is the value of the variable `home`.
# +
# Extended transects: NASC transects extended and sorted, ready to be the base geometry for processing
extendedTrans = os.path.join(home, 'fiis_trans_wLRR')
# Tidied transects: Extended transects without overlapping transects
extTrans_tidy = os.path.join(home, 'tidyTrans')
# Geomorphology points: positions of indicated geomorphic features
ShorelinePts = os.path.join(home, 'SLpts') # shoreline
dlPts = os.path.join(home, 'DLpts') # dune toe
dhPts = os.path.join(home, 'DHpts') # dune crest
# Inlet lines: polyline feature classes delimiting inlet position. Must intersect the full island shoreline
inletLines = os.path.join(home, 'inletLines')
# Full island shoreline: polygon that outlines the island shoreline, MHW on oceanside and MTL on bayside
barrierBoundary = os.path.join(home, 'bndpoly_2sl')
# Elevation grid: DEM of island elevation at either 5 m or 1 m resolution
elevGrid = os.path.join(home, 'DEM_5m')
# ---
# OPTIONAL - comment out each one that is not available
# ---
#
# morphdata_prefix = '14CNT01'
# Study area boundary; manually digitize if the barrier island study area does not end at an inlet.
SA_bounds = os.path.join(home, 'SA_bounds')
# Armoring lines: digitize lines of shorefront armoring to be used if dune toe points are not available.
armorLines = os.path.join(home, 'armorLines')
# Extended transects with Construction, Development, and Nourishment coding
tr_w_anthro = os.path.join(home, 'fiis_trans_wAnthro')
# Piping Plover Habitat BN raster layers
SubType = os.path.join(home, 'FI12_SubType') # substrate type
VegType = os.path.join(home, 'FI12_VegType') # vegetation type
VegDens = os.path.join(home, 'FI12_VegDen') # vegetation density
GeoSet = os.path.join(home, 'FI12_GeoSet') # geomorphic setting
# Derivatives of inputs: They will be generated during process if they are not found.
shoreline = os.path.join(home, 'ShoreBetweenInlets') # oceanside shoreline between inlets; generated from shoreline polygon, inlet lines, and SA bounds
slopeGrid = os.path.join(home, 'slope_5m') # Slope at 5 m resolution; generated from DEM
# -
# ## Transect-averaged values
# We work with the shapefile/feature class as a pandas DataFrame as much as possible to speed processing and minimize reliance on the ArcGIS GUI display.
#
# 1. Create a pandas dataframe from the transects feature class. In the process, we remove some of the unnecessary fields. The resulting dataframe is indexed by __sort_ID__ with columns corresponding to the attribute fields in the transects feature class.
# 2. Add __DD_ID__.
# 3. Join the values from the transect file that includes the three anthropologic development fields, __Construction__, __Development__, and __Nourishment__.
# +
# Copy feature class to dataframe.
trans_df = fwa.FCtoDF(extendedTrans, id_fld=tID_fld, extra_fields=extra_fields)
trans_df['DD_ID'] = trans_df[tID_fld] + sitevals['id_init_val']
# Get anthro fields and join to DF
if 'tr_w_anthro' in locals():
trdf_anthro = fwa.FCtoDF(tr_w_anthro, id_fld=tID_fld, dffields=['Development', 'Nourishment','Construction'])
trans_df = fun.join_columns(trans_df, trdf_anthro)
# Save
trans_df.to_pickle(os.path.join(scratch_dir, 'trans_df.pkl'))
# Display
print("\nHeader of transects dataframe (rows 1-5 out of {}): ".format(len(trans_df)))
trans_df.head()
# -
# ### Get XY and Z/slope from SL, DH, DL points within 25 m of transects
# Add to each transect row the positions of the nearest pre-created beach geomorphic features (shoreline, dune toe, and dune crest).
#
# #### If needed, convert morphology points stored locally to feature classes for use.
# After which, view the new feature classes in a GIS. Isolate the points to the region of interest. Quality check them. Then copy them for use with this code, which will require setting the filenames to match those included here or changing the values included here to match the final filenames.
if "morphdata_prefix" in locals():
csvpath = os.path.join(proj_dir, 'Input_Data', '{}_morphology'.format(morphdata_prefix),
'{}_morphology.csv'.format(morphdata_prefix))
dt_fc, dc_fc, sl_fc = fwa.MorphologyCSV_to_FCsByFeature(csvpath, state, proj_code,
csv_fill = 999, fc_fill = -99999, csv_epsg=4326)
print("OUTPUT: morphology point feature classes in the scratch gdb. We recommend QC before proceeding.")
# #### Shoreline
#
# The MHW shoreline easting and northing (__SL_x__, __SL_y__) are the coordinates of the intersection of the oceanside shoreline with the transect. Each transect is assigned the foreshore slope (__Bslope__) from the nearest shoreline point within 25 m. These values are populated for each transect as follows:
# 1. get __SL_x__ and __SL_y__ at the point where the transect crosses the oceanside shoreline;
# 2. find the closest shoreline point to the intersection point (must be within 25 m) and copy the slope value from the point to the transect in the field __Bslope__.
# +
if not arcpy.Exists(inletLines):
# manually create lines that correspond to end of land and cross the MHW line (refer to shoreline polygon)
arcpy.CreateFeatureclass_management(home, os.path.basename(inletLines), 'POLYLINE', spatial_reference=utmSR)
print("OUTPUT: {}. Interrupt execution to manually create lines at each inlet.".format(inletLines))
if not arcpy.Exists(shoreline):
if not 'SA_bounds' in locals():
SA_bounds = ''
shoreline = fwa.CreateShoreBetweenInlets(barrierBoundary, inletLines, shoreline, ShorelinePts, proj_code, SA_bounds)
# +
# Get the XY position where transect crosses the oceanside shoreline
sl2trans_df = fwa.add_shorelinePts2Trans(extendedTrans, ShorelinePts, shoreline,
tID_fld, proximity=pt2trans_disttolerance)
# Save and print sample
sl2trans_df.to_pickle(os.path.join(scratch_dir, 'sl2trans.pkl'))
sl2trans_df.sample(5)
# -
# Export the inlet delineation and shoreline polygons to the scratch directory ultimately for publication
arcpy.FeatureClassToFeatureClass_conversion(inletLines, scratch_dir, pts_name.split('_')[0] + '_inletLines.shp')
arcpy.FeatureClassToFeatureClass_conversion(barrierBoundary, scratch_dir, pts_name.split('_')[0] + '_shoreline.shp')
print('OUTPUT: Saved inletLines and shoreline shapefiles in the scratch directory.')
# +
# fun.AddGeographicCoordinates(ShorelinePts)
# Convert to pandas DF
slpts_df = fwa.FCtoDF(ShorelinePts)
slpts_df.head()
# Report values
xmlfile = os.path.join(scratch_dir, pts_name.split('_')[0] + '_SLpts_eainfo.xml')
sl_extra_flds = fun.report_fc_values(slpts_df, field_defs, xmlfile)
# Delete extra fields from points feature class and dataframe (which will become CSV)
if len(sl_extra_flds) > 0:
for fld in sl_extra_flds:
try:
arcpy.DeleteField_management(ShorelinePts, fld)
print('Deleted field "{}"'.format(fld))
except:
print('WARNING: Failed to delete field "{}"'.format(fld))
pass
arcpy.Delete_management(pts_name.split('_')[0] + '_SLpts.shp')
arcpy.FeatureClassToFeatureClass_conversion(ShorelinePts, scratch_dir, pts_name.split('_')[0] + '_SLpts.shp')
print("\nOUTPUT: {} in specified scratch_dir.".format(os.path.basename(pts_name.split('_')[0] + '_SLpts.shp')))
# Save CSV in scratch_dir
slpts_df.drop(sl_extra_flds, axis=1, inplace=True)
csv_fname = os.path.join(scratch_dir, pts_name.split('_')[0] + '_SLpts.csv')
slpts_df.to_csv(csv_fname, na_rep=fill, index=False)
print("\nOUTPUT: {} in specified scratch_dir.".format(os.path.basename(csv_fname)))
# -
# #### Dune positions along transects
#
# __DL_x__, __DL_y__, and __DL_z__ are the easting, northing, and elevation, respectively, of the nearest dune toe point within 25 meters of the transect. __DH_x__, __DH_y__, and __DH_z__ are the easting, northing, and elevation, respectively, of the nearest dune crest point within 25 meters.
#
# __DL_snapX__, __DL_snapY__, __DH_snapX__, and __DH_snapY__ are the eastings and northings of the points "snapped" to the transect. "Snapping" finds the position along the transect nearest to the point, i.e. orthogonal to the transect. These values are used to find the beach width. The elevation values are not snapped; we use the elevation values straight from the original points.
#
# These values are populated as follows:
#
# 1. Find the nearest dune crest/toe point to the transect and proceed if the distance is less than 25 m. If there are no points within 25 m of the transect, populate the row with Null values.
# 2. Get the X, Y, and Z values of the point.
# 3. Find the position along the transect of an orthogonal line drawn to the dune point (__DL_snapX__, __DL_snapY__, __DH_snapX__, and __DH_snapY__). This is considered the 'snapped' XY position and is calculated using the arcpy geometry method.
# +
# Create dataframe for both dune crest and dune toe positions
dune2trans_df = fwa.find_ClosestPt2Trans_snap(extendedTrans, dhPts, dlPts, trans_df,
tID_fld, proximity=pt2trans_disttolerance)
# Save and print sample
dune2trans_df.to_pickle(os.path.join(scratch_dir, 'dune2trans.pkl'))
dune2trans_df.sample(5)
# +
# Convert to pandas DF
dlpts_df = fwa.FCtoDF(dlPts)
# Report values
xmlfile = os.path.join(scratch_dir, pts_name.split('_')[0] + '_DTpts_eainfo.xml')
dl_extra_flds = fun.report_fc_values(dlpts_df, field_defs, xmlfile)
# Delete extra fields from points feature class and dataframe (which will become CSV)
for fld in dl_extra_flds:
try:
arcpy.DeleteField_management(dlPts, fld)
print('Deleted field "{}"'.format(fld))
except:
print('WARNING: Failed to delete field "{}"'.format(fld))
pass
arcpy.FeatureClassToFeatureClass_conversion(dlPts, scratch_dir, pts_name.split('_')[0] + '_DTpts.shp')
# Save CSV in scratch_dir
dlpts_df.drop(dl_extra_flds, axis=1, inplace=True)
csv_fname = os.path.join(scratch_dir, pts_name.split('_')[0] + '_DTpts.csv')
dlpts_df.to_csv(csv_fname, na_rep=fill, index=False)
print("\nOUTPUT: {} in specified scratch_dir.\n".format(os.path.basename(csv_fname)))
# +
# Convert to pandas DF
dhpts_df = fwa.FCtoDF(dhPts)
# Report values
xmlfile = os.path.join(scratch_dir, pts_name.split('_')[0] + '_DCpts_eainfo.xml')
dh_extra_flds = fun.report_fc_values(dhpts_df, field_defs, xmlfile)
# Delete extra fields from points feature class and dataframe (which will become CSV)
for fld in dh_extra_flds:
try:
arcpy.DeleteField_management(dhPts, fld)
print('Deleted field "{}"'.format(fld))
except:
print('WARNING: Failed to delete field "{}"'.format(fld))
pass
arcpy.FeatureClassToFeatureClass_conversion(dhPts, scratch_dir, pts_name.split('_')[0] + '_DCpts.shp')
# Save CSV in scratch_dir
dhpts_df.drop(dh_extra_flds, axis=1, inplace=True)
csv_fname = os.path.join(scratch_dir, pts_name.split('_')[0] + '_DCpts.csv')
dhpts_df.to_csv(csv_fname, na_rep=fill, index=False)
print("\nOUTPUT: {} in specified scratch_dir.".format(os.path.basename(csv_fname)))
# -
# #### Armoring
# __Arm_x__, __Arm_y__, and __Arm_z__ are the easting, northing, and elevation, respectively, where an artificial structure crosses the transect in the vicinity of the beach. These features are meant to supplement the dune toe data set by providing an upper limit to the beach in areas where dune toe extraction was confounded by the presence of an artificial structure. Values are populated for each transect as follows:
#
# 1. Get the positions of intersection between the digitized armoring lines and the transects (Intersect tool from the Overlay toolset);
# 2. Extract the elevation value at each intersection point from the DEM (Extract Multi Values to Points tool from Spatial Analyst);
# +
# Create elevation raster at 5-m resolution if not already
# elevGrid = fwa.ProcessDEM_2(elevGrid, utmSR)
elevGrid = os.path.join(home, 'DEM_5m')
# Armoring line
if not arcpy.Exists(armorLines):
arcpy.CreateFeatureclass_management(home, os.path.basename(armorLines), 'POLYLINE', spatial_reference=utmSR)
print("{} created. If shorefront armoring exists, interrupt execution to manually digitize.".format(armorLines))
arm2trans_df = fwa.ArmorLineToTrans_PD(extendedTrans, armorLines, sl2trans_df, tID_fld, proj_code, elevGrid)
# Save and print sample
arm2trans_df.to_pickle(os.path.join(scratch_dir, 'arm2trans.pkl'))
try:
arm2trans_df.sample(5)
except:
pass
# -
# ### Add all the positions to the trans_df
# Join the new dataframes to the transect dataframe. Before it performs the join, `join_columns_id_check()` checks the index and the ID field for potential errors such as whether they are the equal and whether there are duplicated IDs or null values in either.
# Load saved dataframes
trans_df = pd.read_pickle(os.path.join(scratch_dir, 'trans_df.pkl'))
sl2trans_df = pd.read_pickle(os.path.join(scratch_dir, 'sl2trans.pkl'))
dune2trans_df = pd.read_pickle(os.path.join(scratch_dir, 'dune2trans.pkl'))
arm2trans_df = pd.read_pickle(os.path.join(scratch_dir, 'arm2trans.pkl'))
# +
# Join positions of shoreline, dune crest, dune toe, armoring
trans_df = fun.join_columns_id_check(trans_df, sl2trans_df, tID_fld)
trans_df = fun.join_columns_id_check(trans_df, dune2trans_df, tID_fld)
trans_df = fun.join_columns_id_check(trans_df, arm2trans_df, tID_fld)
# Save and print sample
trans_df.to_pickle(os.path.join(scratch_dir, 'trans_df_beachmetrics.pkl'))
trans_df.sample(5)
# -
# ### Check for errors
# *Optional*
#
# Display summary stats / histograms and create feature classes. The feature classes display the locations that will be used to calculate beach width. Review the output feature classes in a GIS to validate.
# +
plots = trans_df.hist(['DH_z', 'DL_z', 'Arm_z'])
# Subplot Labels
plots[0][0].set_xlabel("Elevation (m in NAVD88)")
plots[0][0].set_ylabel("Frequency")
plots[0][1].set_xlabel("Elevation (m in NAVD88)")
plots[0][1].set_ylabel("Frequency")
try:
plots[0][2].set_xlabel("Elevation (m in NAVD88)")
plots[0][2].set_ylabel("Frequency")
except:
pass
plt.show()
plt.close()
# +
# Convert dataframe to feature class - shoreline points with slope
fwa.DFtoFC(sl2trans_df, os.path.join(arcpy.env.workspace, 'pts2trans_SL'),
spatial_ref=utmSR, id_fld=tID_fld, xy=["SL_x", "SL_y"], keep_fields=['Bslope'])
print('OUTPUT: pts2trans_SL in designated scratch geodatabase.')
# Dune crests
try:
fwa.DFtoFC(dune2trans_df, os.path.join(arcpy.env.workspace, 'ptSnap2trans_DH'),
spatial_ref=utmSR, id_fld=tID_fld, xy=["DH_snapX", "DH_snapY"], keep_fields=['DH_z'])
print('OUTPUT: ptSnap2trans_DH in designated scratch geodatabase.')
except Exception as err:
print(err)
pass
# Dune toes
try:
fwa.DFtoFC(dune2trans_df, os.path.join(arcpy.env.workspace, 'ptSnap2trans_DL'),
spatial_ref=utmSR, id_fld=tID_fld, xy=["DL_snapX", "DL_snapY"], keep_fields=['DL_z'])
print('OUTPUT: ptSnap2trans_DL in designated scratch geodatabase.')
except Exception as err:
print(err)
pass
# -
# ### Calculate upper beach width and height
# Upper beach width (__uBW__) and upper beach height (__uBH__) are calculated based on the difference in position between two points: the position of MHW along the transect (__SL_x__, __SL_y__) and the dune toe position or equivalent (usually __DL_snapX__, __DL_snapY__). In some cases, the dune toe is not appropriate to designate the "top of beach" so beach width and height are calculated from either the position of the dune toe, the dune crest, or the base of an armoring structure. The dune crest was only considered a possibility if the dune crest elevation (__DH_zMHW__) was less than or equal to `maxDH`.
#
# They are calculated as follows:
# 2. Calculate distances from MHW to the position along the transect of the dune toe (__DistDL__), dune crest (__DistDH__), and armoring (__DistArm__).
# 2. Adjust the elevations to MHW, populating fields __DH_zmhw__, __DL_zmhw__, and __Arm_zmhw__.
# 3. Conditionally select the appropriate feature to represent "top of beach." Dune toe is prioritized. If it is not available and __DH_zmhw__ is less than or equal to maxDH, use dune crest. If neither of the dune positions satisfy the conditions and an armoring feature intersects with the transect, use the armoring position. If none of the three are possible, __uBW__ and __uBH__ will be null.
# 4. Copy the distance to shoreline and height above MHW (__Dist--__, __---zmhw__) to __uBW__ and __uBH__, respectively.
#
# Notes:
# - In some morphology datasets, missing elevation values at a point indicate that the point should not be used to measure beach width. In those cases, use the `skip_missing_z` argument to select whether or not to skip these points.
# Load saved dataframe
trans_df = pd.read_pickle(os.path.join(scratch_dir, 'trans_df_beachmetrics.pkl'))
# Calculate distances from shore to dunes, etc.
trans_df = fwa.calc_BeachWidth_fill(extendedTrans, trans_df, maxDH, tID_fld,
sitevals['MHW'], fill, skip_missing_z=True)
# ### Dist2Inlet
#
#
# Distance to nearest tidal inlet (__Dist2Inlet__) is computed as alongshore distance of each sampling transect from the nearest tidal inlet. This distance includes changes in the path of the shoreline instead of simply a Euclidean distance and reflects sediment transport pathways. It is measured using the oceanside shoreline between inlets (ShoreBetweenInlets).
#
# Note that the ShoreBetweenInlets feature class must be both 'dissolved' and 'singlepart' so that each feature represents one-and-only-one shoreline that runs the entire distance between two inlets or equivalent. If the shoreline is bounded on both sides by an inlet, measure the distance to both and assign the minimum distance of the two. If the shoreline meets only one inlet (meaning the study area ends before the island ends), use the distance to the only inlet.
#
# The process uses the cut, disjoint, and length geometry methods and properties in ArcPy data access module. The function measure_Dist2Inlet() prints a warning when the difference in Dist2Inlet between two consecutive transects is greater than 300.
# +
# Calc Dist2Inlet in new dataframe
dist_df = fwa.measure_Dist2Inlet(shoreline, extendedTrans, inletLines, tID_fld)
# Join to transects
trans_df = fun.join_columns_id_check(trans_df, pd.DataFrame(dist_df.Dist2Inlet), tID_fld, fill=fill)
# Save and view last 10 rows
dist_df.to_pickle(os.path.join(scratch_dir, 'dist2inlet_df.pkl'))
dist_df.tail(10)
# -
# ### Clip transects, get barrier widths
# Calculates __WidthLand__, __WidthFull__, and __WidthPart__, which measure different flavors of the cross-shore width of the barrier island. __WidthLand__ is the above-water distance between the back-barrier and seaward MHW shorelines. __WidthLand__ only includes regions of the barrier within the shoreline polygon (bndpoly_2sl) and does not extend into any of the sinuous or intervening back-barrier waterways and islands. __WidthFull__ is the total distance between the back-barrier and seaward MHW shorelines (including space occupied by waterways). __WidthPart__ is the width of only the most seaward portion of land within the shoreline.
#
# These are calculated as follows:
#
# 1. Clip the transect to the full island shoreline (Clip in the Analysis toolbox);
# 2. For __WidthLand__, get the length of the multipart line segment from "SHAPE@LENGTH" feature class attribute. When the feature is multipart, this will include only the remaining portions of the transect;
# 3. For __WidthPart__, convert the clipped transect from multipart to singlepart and get the length of the first line segment, which should be the most seaward;
# 4. For __WidthFull__, calculate the distance between the first vertex and the last vertex of the clipped transect (Feature Class to NumPy Array with explode to points, pandas groupby, numpy hypot).
# +
# Clip transects, get barrier widths
widths_df = fwa.calc_IslandWidths(extendedTrans, barrierBoundary, tID_fld=tID_fld)
# # Save
widths_df.to_pickle(os.path.join(scratch_dir, 'widths_df.pkl'))
# Join
trans_df = fun.join_columns_id_check(trans_df, widths_df, tID_fld, fill=fill)
# Save
trans_df.to_pickle(os.path.join(scratch_dir, trans_name+'_null_prePts.pkl'))
trans_df.sample(5)
# -
# ## 5-m Points
# The point dataset samples the land every 5 m along each shore-normal transect.
#
# ### Split transects into points at 5-m intervals.
#
# The point dataset is created from the tidied transects (tidyTrans, created during pre-processing) as follows:
#
# 1. Clip the tidied transects (tidyTrans) to the shoreline polygon (bndpoly_2sl) , retaining only those portions of the transects that represent land.
# 2. Produce a dataframe of point positions along each transect every 5 m starting from the ocean-side shoreline. This uses the positionAlongLine geometry method accessed with a Search Cursor and saves the outputs in a new dataframe.
# 3. Create a point feature class from the dataframe.
#
# Note: Sometimes the system doesn't seem to register the new feature class (transPts_unsorted) for a while. I'm not sure how to work around that, other than just to wait.
# +
pts_df, pts_presort = fwa.TransectsToPointsDF(extTrans_tidy, barrierBoundary, fc_out=pts_presort)
print("OUTPUT: '{}' in scratch geodatabase.".format(os.path.basename(pts_presort)))
# Save
pts_df.to_pickle(os.path.join(scratch_dir, 'pts_presort.pkl'))
# -
# ### Add Elevation and Slope to points
#
# __ptZ__ (later __ptZmhw__) and __ptSlp__ are the elevation and slope at the 5-m cell corresponding to the point.
# 1. Create the slope and DEM rasters if they don't already exist. We use the 5-m DEM to generate a slope surface (Slope tool in 3D Analyst).
# 2. Use Extract Multi Values to Points tool in Spatial Analyst.
# 3. Convert the feature class back to a dataframe.
# +
# Create slope raster from DEM
if not arcpy.Exists(slopeGrid):
arcpy.Slope_3d(elevGrid, slopeGrid, 'PERCENT_RISE')
print("OUTPUT: slope file in designated home geodatabase.")
# Add elevation and slope values at points.
arcpy.sa.ExtractMultiValuesToPoints(pts_presort, [[elevGrid, 'ptZ'], [slopeGrid, 'ptSlp']])
print("OUTPUT: added slope and elevation to '{}' in designated scratch geodatabase.".format(os.path.basename(pts_presort)))
# +
if 'SubType' in locals():
# Add substrate type, geomorphic setting, veg type, veg density values at points.
arcpy.sa.ExtractMultiValuesToPoints(pts_presort, [[SubType, 'SubType'], [VegType, 'VegType'],
[VegDens, 'VegDens'], [GeoSet, 'GeoSet']])
# Convert to dataframe
pts_df = fwa.FCtoDF(pts_presort, xy=True, dffields=[tID_fld,'ptZ', 'ptSlp', 'SubType',
'VegType', 'VegDens', 'GeoSet'])
# Recode fill values
pts_df.replace({'GeoSet': {9999:np.nan}, 'SubType': {9999:np.nan}, 'VegType': {9999:np.nan},
'VegDens': {9999:np.nan}}, inplace=True)
else:
print("Plover BN layers not specified (we only check for SubType), so we'll proceed without them. ")
# Convert to dataframe
pts_df = fwa.FCtoDF(pts_presort, xy=True, dffields=[tID_fld,'ptZ', 'ptSlp'])
# Save and view sample
pts_df.to_pickle(os.path.join(scratch_dir, 'pts_extractedvalues_presort.pkl'))
pts_df.sample(5)
# +
# Print histogram of elevation extracted to points
plots = pts_df.hist('ptZ')
# Subplot Labels
plots[0][0].set_xlabel("Elevation (m in NAVD88)")
plots[0][0].set_ylabel("Frequency")
# Display
plt.show()
plt.close()
# -
# ### Calculate distances and sort points
#
# __SplitSort__ is a unique numeric identifier of the 5-m points at the study site, sorted by order along shoreline and by distance from oceanside. __SplitSort__ values are populated by sorting the points by __sort_ID__ and __Dist_Seg__ (see below).
#
# __Dist_Seg__ is the Euclidean distance between the point and the seaward shoreline (__SL_x__, __SL_y__). __Dist_MHWbay__ is the distance between the point and the bayside shoreline and is calculated by subtracting the __Dist_Seg__ value from the __WidthPart__ value of the transect.
#
# __DistSegDH__, __DistSegDL__, and __DistSegArm__ measure the distance of each 5-m point from the dune crest and dune toe position along a particular transect. They are calculated as the Euclidean distance between the 5-m point and the given feature.
# Load saved dataframes
pts_df = pd.read_pickle(os.path.join(scratch_dir, 'pts_extractedvalues_presort.pkl'))
trans_df = pd.read_pickle(os.path.join(scratch_dir, trans_name+'_null_prePts.pkl'))
# +
# Calculate DistSeg, Dist_MHWbay, DistSegDH, DistSegDL, DistSegArm, and sort points (SplitSort)
pts_df = fun.join_columns(pts_df, trans_df, tID_fld)
pts_df = fun.prep_points(pts_df, tID_fld, pID_fld, sitevals['MHW'], fill)
# Aggregate ptZmhw to max and mean and join to transects
pts_df, zmhw = fun.aggregate_z(pts_df, sitevals['MHW'], tID_fld, 'ptZ', fill)
trans_df = fun.join_columns(trans_df, zmhw)
# Join transect values to pts
pts_df = fun.join_columns(pts_df, trans_df, tID_fld)
# pID_fld needs to be among the columns
if not pID_fld in pts_df.columns:
pts_df.reset_index(drop=False, inplace=True)
# Match field names to those in sorted_pt_flds list
for fld in pts_df.columns:
if fld not in sorted_pt_flds:
for i, fldi in enumerate(sorted_pt_flds):
if fldi.lower() == fld.lower():
sorted_pt_flds[i] = fld
print(fld)
# Drop extra fields and sort columns
trans_df.drop(extra_fields, axis=1, inplace=True, errors='ignore')
for i, f in enumerate(sorted_pt_flds):
for c in pts_df.columns:
if f.lower() == c.lower():
sorted_pt_flds[i] = c
pts_df = pts_df.reindex_axis(sorted_pt_flds, axis=1)
# Save dataframes
trans_df.to_pickle(os.path.join(scratch_dir, trans_name+'_null.pkl'))
pts_df.to_pickle(os.path.join(scratch_dir, pts_name+'_null.pkl'))
# View random rows from the points DF
pts_df.sample(5)
# -
# ### Recode the values for CSV output and model running
# +
# Recode
pts_df4csv = pts_df.replace({'SubType': {7777:'{1111, 2222}', 1000:'{1111, 3333}'},
'VegType': {77:'{11, 22}', 88:'{22, 33}', 99:'{33, 44}'},
'VegDens': {666: '{111, 222}', 777: '{222, 333}',
888: '{333, 444}', 999: '{222, 333, 444}'}})
# Fill NAs
pts_df4csv.fillna(fill, inplace=True)
# Save and view sample
pts_df4csv.to_pickle(os.path.join(scratch_dir, pts_name+'_csv.pkl'))
pts_df4csv.sample(5)
# -
# ## Quality checking
# Look at extracted profiles from around the island. Enter the transect ID within the available range when prompted. Evaluate the plots for consistency among variables. Repeat various times until you can be satisfied that the variables are consistent with each other and appear to represent reality. View areas with inconsistencies in a GIS.
# +
desccols = ['DL_zmhw', 'DH_zmhw', 'Arm_zmhw', 'uBW', 'uBH', 'Dist2Inlet',
'WidthPart', 'WidthLand', 'WidthFull', 'mean_Zmhw', 'max_Zmhw']
# Histograms
trans_df.hist(desccols, sharey=True, figsize=[15, 10], bins=20)
plt.show()
plt.close('all')
# +
flds_dist = ['SplitSort', 'Dist_Seg', 'Dist_MHWbay', 'DistSegDH', 'DistSegDL', 'DistSegArm']
flds_z = ['ptZmhw', 'ptZ', 'ptSlp']
pts_df.loc[:,flds_dist+flds_z].describe()
pts_df.hist(flds_dist, sharey=True, figsize=[15, 8], layout=(2,3))
pts_df.hist(flds_z, sharey=True, figsize=[15, 4], layout=(1,3))
plt.show()
plt.close('all')
# +
# Prompt for transect identifier (sort_ID) and get all points from that transect.
trans_in = int(input('Transect ID ("sort_ID" {:d}-{:d}): '.format(int(pts_df[tID_fld].head(1)), int(pts_df[tID_fld].tail(1)))))
pts_set = pts_df[pts_df[tID_fld] == trans_in]
# Plot
fig = plt.figure(figsize=(13,10))
# Plot the width of the island.
ax1 = fig.add_subplot(211)
try:
fun.plot_island_profile(ax1, pts_set, sitevals['MHW'], sitevals['MTL'])
except TypeError as err:
print('TypeError: {}'.format(err))
pass
# Zoom in on the upper beach.
ax2 = fig.add_subplot(212)
try:
fun.plot_beach_profile(ax2, pts_set, sitevals['MHW'], sitevals['MTL'], maxDH)
except TypeError as err:
print('TypeError: {}'.format(err))
pass
# Display
plt.show()
plt.close('all')
# -
# ### Report field values
# Load dataframe
pts_df4csv = pd.read_pickle(os.path.join(scratch_dir, pts_name+'_csv.pkl'))
xmlfile = os.path.join(scratch_dir, pts_name+'_eainfo.xml')
fun.report_fc_values(pts_df4csv, field_defs, xmlfile)
# ## Outputs
#
# ### Transect-averaged
# Output the transect-averaged metrics in the following formats:
# - transects, unpopulated except for ID values, as gdb feature class
# - transects, unpopulated except for ID values, as shapefile
# - populated transects with fill values as gdb feature class
# - populated transects with null values as gdb feature class
# - populated transects with fill values as shapefile
# - raster of beach width (__uBW__) by transect
# Load the dataframe
trans_df = pd.read_pickle(os.path.join(scratch_dir, trans_name+'_null.pkl'))
# #### Vector format
# +
# Create transect file with only ID values and geometry to publish.
trans_flds = ['TRANSECTID', 'TRANSORDER', 'DD_ID']
for i, f in enumerate(trans_flds):
for c in trans_df.columns:
if f.lower() == c.lower():
trans_flds[i] = c
trans_4pub = fwa.JoinDFtoFC(trans_df.loc[:,trans_flds], extendedTrans, tID_fld, out_fc=sitevals['code']+'_trans')
out_shp = arcpy.FeatureClassToFeatureClass_conversion(trans_4pub, scratch_dir, sitevals['code']+'_trans.shp')
print("OUTPUT: {} in specified scratch_dir.".format(os.path.basename(str(out_shp))))
# +
# Create transect FC with fill values - Join values from trans_df to the transect FC as a new file.
trans_fc = fwa.JoinDFtoFC(trans_df, extendedTrans, tID_fld, out_fc=trans_name+'_fill')
# Create transect FC with null values
fwa.CopyFCandReplaceValues(trans_fc, fill, None, out_fc=trans_name+'_null', out_dir=home)
# Save final transect SHP with fill values
out_shp = arcpy.FeatureClassToFeatureClass_conversion(trans_fc, scratch_dir, trans_name+'_shp.shp')
print("OUTPUT: {} in specified scratch_dir.".format(os.path.basename(str(out_shp))))
# -
# #### Raster - beach width
# It may be necessary to close any Arc sessions you have open.
# +
# Create a template raster corresponding to the transects.
if not arcpy.Exists(rst_transID):
print("{} was not found so we will create the base raster.".format(os.path.basename(rst_transID)))
outEucAll = arcpy.sa.EucAllocation(extTrans_tidy, maximum_distance=50, cell_size=cell_size, source_field=tID_fld)
outEucAll.save(os.path.basename(rst_transID))
# Create raster of uBW values by joining trans_df to the template raster.
out_rst = fwa.JoinDFtoRaster(trans_df, os.path.basename(rst_transID), bw_rst, fill, tID_fld, 'uBW')
# -
# ### 5-m points
#
# Output the point metrics in the following formats:
# - tabular, in CSV
# - populated points with fill values as gdb feature class
# - populated points with null values as gdb feature class
# - populated points with fill values as shapefile
# Load the saved dataframes
pts_df4csv = pd.read_pickle(os.path.join(scratch_dir, pts_name+'_csv.pkl'))
pts_df = pd.read_pickle(os.path.join(scratch_dir, pts_name+'_null.pkl'))
# #### Tabular format
# +
# Save CSV in scratch_dir
csv_fname = os.path.join(scratch_dir, pts_name +'.csv')
pts_df4csv.to_csv(csv_fname, na_rep=fill, index=False)
sz_mb = os.stat(csv_fname).st_size/(1024.0 * 1024.0)
print("OUTPUT: {} [{} MB] in specified scratch_dir. ".format(os.path.basename(csv_fname), sz_mb))
# -
# #### Vector format
# +
# Convert pts_df to FC - automatically converts NaNs to fills (default fill is -99999)
pts_fc = fwa.DFtoFC_large(pts_df, out_fc=os.path.join(arcpy.env.workspace, pts_name+'_fill'),
spatial_ref=utmSR, df_id=pID_fld, xy=["seg_x", "seg_y"])
# Save final FCs with null values
fwa.CopyFCandReplaceValues(pts_fc, fill, None, out_fc=pts_name+'_null', out_dir=home)
# Save final points as SHP with fill values
out_pts_shp = arcpy.FeatureClassToFeatureClass_conversion(pts_fc, scratch_dir, pts_name+'_shp.shp')
print("OUTPUT: {} in specified scratch_dir.".format(os.path.basename(str(out_pts_shp))))
|
vol1/extractor_fiis12.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Gedeon-m-gedus/Image_Processing/blob/master/Pruning_quantiztion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="6wwgzt-5lkru" colab_type="code" colab={}
# !pip install torchsummaryX
# + id="DtmCPCbfCsJ4" colab_type="code" colab={}
import torch
from torch import nn
import numpy as np
import torch.nn.utils.prune as prune
import torchvision.models as models
import torch.nn.functional as F
from torchsummaryX import summary
from torchvision import datasets, transforms as T
import os
# + id="jG6uXrX7FnSJ" colab_type="code" colab={}
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
set_seed(42)
# + id="omV-WZqkF5Pg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="ffff82d8-d8df-4b95-bdba-dc7a6a592277"
# Colab's file access feature
from google.colab import drive
drive.mount('/content/gdrive')
# + id="mkBARficK1yG" colab_type="code" colab={}
save_dir = "/content/gdrive/My Drive/AMMI_project/object_detector"
handle_dirs(save_dir)
# + id="tcWThzPeDEq0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 82, "referenced_widgets": ["39e1304e499e4f4b937aa9003c60d447", "4e0d0084ff4841529b425cc7f3ef0c28", "3f88bbd07af54a398e356000b9a8d730", "99d86efa3a6f4163976e2e2632e74e20", "819d4ad11021497faa44f5fbb10a38de", "f9ec6befd2e04e90813c5adaeb404bab", "fc8d097a38294129a841a0f69aed2f8c", "13e00ac121524196b6fee6264b32c2c6"]} outputId="deec52f8-0311-491f-af28-1a25dceb3759"
model = models.resnext50_32x4d(pretrained=True)
# + id="m--zjI54DuLB" colab_type="code" colab={}
model.state_dict();
PATH = save_dir + '/Resnet50_full_precission.pth'
torch.save(model.state_dict(), PATH)
# + id="yV-w1r5ND2wO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="eaca1047-1486-4067-c8bc-30944187bbd1"
summary(model,torch.zeros([1, 3, 224, 224]))
# + id="lendqIHEEMvh" colab_type="code" colab={}
#model.named_modules
# + id="vBcLbrveEMyg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="6fe53cd7-53ea-414f-be10-d6f85d2083ba"
new_model = model
for name, module in new_model.named_modules():
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
prune.l1_unstructured(module, name='weight', amount=0.2)
# prune 40% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
prune.l1_unstructured(module, name='weight', amount=0.4)
print(dict(new_model.named_buffers()).keys()) # to verify that all masks exist
# + id="Kkur6SnMEM6C" colab_type="code" colab={}
new_model.state_dict();
PATH = save_dir + '/Resnet50_pruned.pth'
torch.save(new_model.state_dict(), PATH)
# + id="83gb6LYOEM86" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e34dfd8b-b2a1-4e92-fc70-86fe47a6f5fa"
new_model#.layer1.state_dict().keys()
# + id="DlLffLjsEIXE" colab_type="code" colab={}
|
Pruning_quantiztion.ipynb
|