code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="LI72-IKKcVZ6" # We will import our libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="mt3ee4dUco_I" outputId="2d6c7426-e0ed-48db-cc2b-20ad4896a42d" from google.colab import files uploaded = files.upload() # + id="aiQrN9p3dPWH" train = pd.read_csv('titanic_train.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Yt2nQDGfdfXp" outputId="6d4f615a-fba2-4914-e3af-836764aff077" train[:5] # + colab={"base_uri": "https://localhost:8080/"} id="vuMXPawMdhY2" outputId="cc79cc5e-2331-4331-e8f2-dea59b5e6cac" train.info() # + [markdown] id="LG6jFLrUdnYO" # We can already see that there are some missing values present in the dataset in the columns Age, Cabin, Embarked # + [markdown] id="IADt0C0Me3SC" # # EDA # + colab={"base_uri": "https://localhost:8080/", "height": 442} id="jo38HB1hdm2X" outputId="20f79fac-fe23-4403-c30e-10a254989da1" # Now, we plot a heatmap to see which all columns have missing vlaues present in them plt.figure(figsize=(10,6)) sns.heatmap(train.isna(), yticklabels= False, cbar= False, cmap= 'viridis') # we can see that age column and other Cabin column has many missing values present in them # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="AeyQ2O1Je5ku" outputId="d8d826c5-5206-4ab5-ce69-ec9720ac5649" # Number of people survived and number of people not survived in crash sns.countplot(x='Survived', data = train, palette= 'seismic') # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ZdS3SzmJfRHI" outputId="80d2c05d-e6a3-4c86-9a2a-19a9c3d318bb" # Now we will see same plot for sex/ gender as hue sns.countplot(x='Survived', data = train, palette= 'seismic', hue='Sex') # + [markdown] id="IWhBhzELfk73" # * From this we can see that many male people couldn't survive and almost more than half of the people who were survived in the crash were females # * From this we can also conclude that there were many males than females # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="SfQ_rRQsf0Ju" outputId="9c739ee0-a0e6-4f9d-cfe3-b6ab935fcfce" # Number of male and female in the Titanic sns.countplot(x='Sex', data= train, palette='seismic') # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="E1jUbx6xgpdH" outputId="3b5c4c7d-524d-477b-cab1-78083704c756" # We will see how many of them survived based on Passenger Class sns.set_style('whitegrid') sns.countplot(x='Survived', data= train, hue= 'Pclass', palette= 'rainbow_r') # + [markdown] id="XlmOtMdtheU6" # * We see that many people of Class 3 couldn't survive as compared to Class 1 and Class 2 # * And we can also see that many Class 1 people were able to survive as compared to Class 2 and Class 3 # * From this, we can also make note that there were many class 3 people in the cruise # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="sMVF6GG4h01I" outputId="df44afcb-20f2-4aa7-9cac-fbd5bb87a1bb" # Now we shall look into the distribution of Age feature in the dataset sns.distplot(train['Age'], kde= False, bins=30, color = 'blue') # + [markdown] id="Gj8bu8KkitFo" # * we can see that there are many younger people who cruised the ship and there is some skewness towards the children also # + colab={"base_uri": "https://localhost:8080/", "height": 351} id="0yxVkwhxir9E" outputId="443ba51b-e320-4198-f636-30ea3892a724" # Distribution of fare sns.distplot(train['Fare'], kde= False, color= 'red') # We see that the average pricing of the fare is around 10 to 100 # + [markdown] id="B2IEBP9CkGpH" # # Handling Missing Values # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="JKRZlHVQkB6u" outputId="e13380c4-fffd-44f2-98b2-a3215c83516e" sns.set_style('white') sns.boxplot(x='Pclass', y='Age', data=train, palette='Pastel1') # + [markdown] id="NQ7BxGAdk0uE" # * we can see that wealtheir people of class 1 and 2 are tned to be more older than Class 3 people present in the ship # + id="jrBH8iLXk8x6" def impute_age(cols): Age= cols[0] Pclass = cols[1] if pd.isna(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age # + id="B6xi81aQlU12" train['Age'] = train[['Age', 'Pclass']].apply(impute_age, axis= 1) # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="VJlBHVN2lgSy" outputId="b0648915-0b40-4932-d910-ad6d3ff9a6aa" sns.heatmap(train.isna(), yticklabels=False, cbar=False, cmap='viridis') # we see that all our age column is now filled and doesn't have any missing values # + id="5VkcGt47lwZH" # Now we drop Cabin column since it has more missing values present than the actual data in that column train.drop('Cabin', axis=1, inplace= True) # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="m1z6ysYrl_Ds" outputId="f46e5959-b773-4742-ab8d-d0873bce733a" train[:5] # + id="CjGJQOLOmD5o" train.dropna(inplace= True) # + [markdown] id="3GUXnaJYnOTA" # # Creating dummy variable of categorical variables # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="XgBQ3vuynGeK" outputId="dc28f0ed-cc8c-4a2c-9c4d-0bde8d5ad8c8" sex= pd.get_dummies(train['Sex'], drop_first=True) sex[:5] # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="4zSwQdMUnhlV" outputId="6a455f9c-ddf4-482e-ebd0-a4b3038a9062" embark = pd.get_dummies(train['Embarked'], drop_first=True) embark[:5] # + colab={"base_uri": "https://localhost:8080/", "height": 166} id="DGF1wk3Cnu-4" outputId="275feb1e-ef2b-4f1a-c7df-502ee4a0dca5" train = pd.concat([train, sex, embark], axis=1) train[:4] # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="j7hmrA0zn9mv" outputId="fe51c7eb-3e06-4ebf-a432-3fc47605a98e" train.drop(['Sex','Embarked','Name','Ticket'], axis=1, inplace= True) train[:5] # + [markdown] id="PZXk8alaoNNA" # Now the datset has been completely cleaned and ready for training into a model # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Yvg2SEaaoLbN" outputId="bc42194d-286d-4974-9ec0-236902376692" X = train.drop('Survived', axis =1) X[:5] # + colab={"base_uri": "https://localhost:8080/"} id="91knDmPDodMX" outputId="6d02e364-87fd-4c6a-8fd4-eb04dc27a90c" y= train['Survived'] y[:5] # + id="asZCnYBGoiQ8" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state= 101) # + id="JxFkZMYso0iq" from sklearn.linear_model import LogisticRegression model = LogisticRegression() # + colab={"base_uri": "https://localhost:8080/"} id="DpCUPqz5o6Fp" outputId="55c197b5-37f5-48f2-c739-c2ce82777b56" model.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="8S0E56nGo9wC" outputId="2e9a0fe6-bb89-459d-ee6d-47bc65d847b8" predict = model.predict(X_test) predict[:5] # + colab={"base_uri": "https://localhost:8080/"} id="Qe1Qm4bbpLet" outputId="bfa7cf0a-e357-4aef-cccc-54b10dc37c57" y_test[:5] # + colab={"base_uri": "https://localhost:8080/"} id="kln02rMjpOfN" outputId="349db638-a365-49db-c5aa-db6f9f28b7b3" from sklearn.metrics import confusion_matrix, classification_report, log_loss print(confusion_matrix(y_test, predict)) print(classification_report(y_test, predict)) # + id="8P1e1ntdp01J" from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators = 50) # + colab={"base_uri": "https://localhost:8080/"} id="TiWOire3qBJ-" outputId="8df3b809-dcf5-4bf6-fa0c-26421fbe118b" rfc.fit(X_train, y_train) # + id="zPMbCJfCqE15" pred = rfc.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="NmH-G2HhqLDJ" outputId="41b194f5-f1d8-4b75-d7a5-6dafcf59cc10" print(confusion_matrix(y_test, pred)) print(classification_report(y_test, pred))
Machine Learning/Logistic Regression/Logistic_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Collect calibration Data import traitlets import ipywidgets.widgets as widgets from IPython.display import display from jetbot import Camera, bgr8_to_jpeg import os import time # Create data folder. image_folder = "Images/" if not os.path.exists(image_folder): os.makedirs(image_folder) # Save image when camera update the vlaue. def update(change): global frame_id, image_folder, image fname = str(frame_id).zfill(4) + ".jpg" with open(image_folder+fname, 'wb') as f: f.write(image.value) print("\rsave data " + fname, end="") frame_id += 1 time.sleep(0.05) # Activate the camera. camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) image = widgets.Image(format='jpeg', width=480, height=270) # this width and height doesn't necessarily have to match the camera camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg) display(image) # Start collect data. frame_id = 0 camera.observe(update, names='value') # Unlink the camera. camera.unobserve(update, names='value') camera_link.unlink()
lab4/program/collect_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"checksum": "c004350cd25d3b1075afcf8b7b244cc6", "grade": false, "grade_id": "cell-2adc36b256efc420", "locked": true, "schema_version": 1, "solution": false} # # Assignment 4 - Average Reward Softmax Actor-Critic # # Welcome to your Course 3 Programming Assignment 4. In this assignment, you will implement **Average Reward Softmax Actor-Critic** in the Pendulum Swing-Up problem that you have seen earlier in the lecture. Through this assignment you will get hands-on experience in implementing actor-critic methods on a continuing task. # # **In this assignment, you will:** # 1. Implement softmax actor-critic agent on a continuing task using the average reward formulation. # 2. Understand how to parameterize the policy as a function to learn, in a discrete action environment. # 3. Understand how to (approximately) sample the gradient of this objective to update the actor. # 4. Understand how to update the critic using differential TD error. # # + [markdown] deletable=false editable=false nbgrader={"checksum": "282b307e98de110dd40a15a6cc25ec5d", "grade": false, "grade_id": "cell-99df6e3a990f9278", "locked": true, "schema_version": 1, "solution": false} # ## Pendulum Swing-Up Environment # # In this assignment, we will be using a Pendulum environment, adapted from [Santamaría et al. (1998)](http://www.incompleteideas.net/papers/SSR-98.pdf). This is also the same environment that we used in the lecture. The diagram below illustrates the environment. # # <img src="data/pendulum_env.png" alt="Drawing" style="width: 400px;"/> # # The environment consists of single pendulum that can swing 360 degrees. The pendulum is actuated by applying a torque on its pivot point. The goal is to get the pendulum to balance up-right from its resting position (hanging down at the bottom with no velocity) and maintain it as long as possible. The pendulum can move freely, subject only to gravity and the action applied by the agent. # # The state is 2-dimensional, which consists of the current angle $\beta \in [-\pi, \pi]$ (angle from the vertical upright position) and current angular velocity $\dot{\beta} \in (-2\pi, 2\pi)$. The angular velocity is constrained in order to avoid damaging the pendulum system. If the angular velocity reaches this limit during simulation, the pendulum is reset to the resting position. # The action is the angular acceleration, with discrete values $a \in \{-1, 0, 1\}$ applied to the pendulum. # For more details on environment dynamics you can refer to the original paper. # # The goal is to swing-up the pendulum and maintain its upright angle. Hence, the reward is the negative absolute angle from the vertical position: $R_{t} = -|\beta_{t}|$ # # Furthermore, since the goal is to reach and maintain a vertical position, there are no terminations nor episodes. Thus this problem can be formulated as a continuing task. # # Similar to the Mountain Car task, the action in this pendulum environment is not strong enough to move the pendulum directly to the desired position. The agent must learn to first move the pendulum away from its desired position and gain enough momentum to successfully swing-up the pendulum. And even after reaching the upright position the agent must learn to continually balance the pendulum in this unstable position. # + [markdown] deletable=false editable=false nbgrader={"checksum": "17075aa4f743d7ce32b468322a340a07", "grade": false, "grade_id": "cell-72dc8196386b12dd", "locked": true, "schema_version": 1, "solution": false} # ## Packages # # You will use the following packages in this assignment. # # - [numpy](www.numpy.org) : Fundamental package for scientific computing with Python. # - [matplotlib](http://matplotlib.org) : Library for plotting graphs in Python. # - [RL-Glue](http://www.jmlr.org/papers/v10/tanner09a.html) : Library for reinforcement learning experiments. # - [jdc](https://alexhagen.github.io/jdc/) : Jupyter magic that allows defining classes over multiple jupyter notebook cells. # - [tqdm](https://tqdm.github.io/) : A package to display progress bar when running experiments # - plot_script : custom script to plot results # - [tiles3](http://incompleteideas.net/tiles/tiles3.html) : A package that implements tile-coding. # - pendulum_env : Pendulum Swing-up Environment # # **Please do not import other libraries** — this will break the autograder. # # + deletable=false editable=false nbgrader={"checksum": "c45e0038609a4d2ab65c82e7866ac17a", "grade": false, "grade_id": "cell-df277e2f962adb8c", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! # Import necessary libraries # DO NOT IMPORT OTHER LIBRARIES - This will break the autograder. import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import os from tqdm import tqdm from rl_glue import RLGlue from pendulum_env import PendulumEnvironment from agent import BaseAgent import plot_script import tiles3 as tc # + [markdown] deletable=false editable=false nbgrader={"checksum": "eca68945ea0514012d2e6fb9e32cdb58", "grade": false, "grade_id": "cell-ab47eee3b7f7d678", "locked": true, "schema_version": 1, "solution": false} # ## Section 1: Create Tile Coding Helper Function # # In this section, we are going to build a tile coding class for our agent that will make it easier to make calls to our tile coder. # # Tile-coding is introduced in Section 9.5.4 of the textbook as a way to create features that can both provide good generalization and discrimination. We have already used it in our last programming assignment as well. # # Similar to the last programming assignment, we are going to make a function specific for tile coding for our Pendulum Swing-up environment. We will also use the [Tiles3 library](http://incompleteideas.net/tiles/tiles3.html). # # To get the tile coder working we need to: # # 1) create an index hash table using tc.IHT(), # 2) scale the inputs for the tile coder based on number of tiles and range of values each input could take # 3) call tc.tileswrap to get active tiles back. # # However, we need to make one small change to this tile coder. # Note that in this environment the state space contains angle, which is between $[-\pi, \pi]$. If we tile-code this state space in the usual way, the agent may think the value of states corresponding to an angle of $-\pi$ is very different from angle of $\pi$ when in fact they are the same! To remedy this and allow generalization between angle $= -\pi$ and angle $= \pi$, we need to use **wrap tile coder**. # # The usage of wrap tile coder is almost identical to the original tile coder, except that we also need to provide the `wrapwidth` argument for the dimension we want to wrap over (hence only for angle, and `None` for angular velocity). More details of wrap tile coder is also provided in [Tiles3 library](http://incompleteideas.net/tiles/tiles3.html). # # + deletable=false nbgrader={"checksum": "6c16c849417bf1b801731e16f4e3a151", "grade": false, "grade_id": "cell-e4e31210465e6d0f", "locked": false, "schema_version": 1, "solution": true} # [Graded] class PendulumTileCoder: def __init__(self, iht_size=4096, num_tilings=32, num_tiles=8): """ Initializes the MountainCar Tile Coder Initializers: iht_size -- int, the size of the index hash table, typically a power of 2 num_tilings -- int, the number of tilings num_tiles -- int, the number of tiles. Here both the width and height of the tiles are the same Class Variables: self.iht -- tc.IHT, the index hash table that the tile coder will use self.num_tilings -- int, the number of tilings the tile coder will use self.num_tiles -- int, the number of tiles the tile coder will use """ self.num_tilings = num_tilings self.num_tiles = num_tiles self.iht = tc.IHT(iht_size) def get_tiles(self, angle, ang_vel): """ Takes in an angle and angular velocity from the penddlum environment and returns a numpy array of active tiles. Arguments: angle -- float, the angle of the pendulum between -np.pi and np.pi ang_vel -- float, the angular velocity of the agent between -2*np.pi and 2*np.pi returns: tiles -- np.array, active tiles """ ### Set the max and min of angle and ang_vel to scale the input (4 lines) # ANGLE_MIN = ? # ANGLE_MAX = ? # ANG_VEL_MIN = ? # ANG_VEL_MAX = ? ### START CODE HERE ### ANGLE_MIN=-np.pi ANGLE_MAX=np.pi ANG_VEL_MIN=-2*np.pi ANG_VEL_MAX=2*np.pi ### END CODE HERE ### ### Use the ranges above and self.num_tiles to set angle_scale and ang_vel_scale (2 lines) # angle_scale = number of tiles / angle range # ang_vel_scale = number of tiles / ang_vel range ### START CODE HERE ### angle_scale=self.num_tiles/(ANGLE_MAX-ANGLE_MIN) ang_vel_scale=self.num_tiles/(ANG_VEL_MAX-ANG_VEL_MIN) ### END CODE HERE ### # Get tiles by calling tc.tileswrap method # wrapwidths specify which dimension to wrap over and its wrapwidth tiles = tc.tileswrap(self.iht, self.num_tilings, [angle * angle_scale, ang_vel * ang_vel_scale], wrapwidths=[self.num_tiles, False]) return np.array(tiles) # + [markdown] deletable=false editable=false nbgrader={"checksum": "4b02f0fce6904c39ace01c263ee80ead", "grade": false, "grade_id": "cell-1d990f692063303c", "locked": true, "schema_version": 1, "solution": false} # Run the following code to verify `PendulumTilecoder` # + deletable=false editable=false nbgrader={"checksum": "d118544172252ec03f5b282817ff263e", "grade": true, "grade_id": "graded_tilecoder", "locked": true, "points": 15, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test Code for PendulumTileCoder ## # Your tile coder should also work for other num. tilings and num. tiles test_obs = [[-np.pi, 0], [-np.pi, 0.5], [np.pi, 0], [np.pi, -0.5], [0, 1]] pdtc = PendulumTileCoder(iht_size=4096, num_tilings=8, num_tiles=4) result=[] for obs in test_obs: angle, ang_vel = obs tiles = pdtc.get_tiles(angle=angle, ang_vel=ang_vel) result.append(tiles) for tiles in result: print(tiles) # + [markdown] deletable=false editable=false nbgrader={"checksum": "f8ae6af80e2bd513ac3562ccde6bebc1", "grade": false, "grade_id": "cell-44b88917a2825241", "locked": true, "schema_version": 1, "solution": false} # **Expected output**: # # [0 1 2 3 4 5 6 7] # [0 1 2 3 4 8 6 7] # [0 1 2 3 4 5 6 7] # [ 9 1 2 10 4 5 6 7] # [11 12 13 14 15 16 17 18] # + [markdown] deletable=false editable=false nbgrader={"checksum": "4ef6492853db03e0ee980ea374723cb8", "grade": false, "grade_id": "cell-78613720dae0e08a", "locked": true, "schema_version": 1, "solution": false} # ## Section 2: Create Average Reward Softmax Actor-Critic Agent # # Now that we implemented PendulumTileCoder let's create the agent that interacts with the environment. We will implement the same average reward Actor-Critic algorithm presented in the videos. # # This agent has two components: an Actor and a Critic. The Actor learns a parameterized policy while the Critic learns a state-value function. The environment has discrete actions; your Actor implementation will use a softmax policy with exponentiated action-preferences. The Actor learns with the sample-based estimate for the gradient of the average reward objective. The Critic learns using the average reward version of the semi-gradient TD(0) algorithm. # # In this section, you will be implementing `agent_policy`, `agent_start`, `agent_step`, and `agent_end`. # + [markdown] deletable=false editable=false nbgrader={"checksum": "828614763989884f1e80f0e16218325a", "grade": false, "grade_id": "cell-3676d253ce82f3e3", "locked": true, "schema_version": 1, "solution": false} # ## Section 2-1: Implement Helper Functions # # Let's first define a couple of useful helper functions. # + [markdown] deletable=false editable=false nbgrader={"checksum": "8d96bc09e1ea682556c7f8fedc790c64", "grade": false, "grade_id": "cell-fd6ef7407bc3283d", "locked": true, "schema_version": 1, "solution": false} # ## Section 2-1a: Compute Softmax Probability # # In this part you will implement `compute_softmax_prob`. # # This function computes softmax probability for all actions, given actor weights `actor_w` and active tiles `tiles`. This function will be later used in `agent_policy` to sample appropriate action. # # First, recall how the softmax policy is represented from state-action preferences: $\large \pi(a|s, \mathbf{\theta}) \doteq \frac{e^{h(s,a,\mathbf{\theta})}}{\sum_{b}e^{h(s,b,\mathbf{\theta})}}$. # # **state-action preference** is defined as $h(s,a, \mathbf{\theta}) \doteq \mathbf{\theta}^T \mathbf{x}_h(s,a)$. # # Given active tiles `tiles` for state `s`, state-action preference $\mathbf{\theta}^T \mathbf{x}_h(s,a)$ can be computed by `actor_w[a][tiles].sum()`. # # We will also use **exp-normalize trick**, in order to avoid possible numerical overflow. # Consider the following: # # $\large \pi(a|s, \mathbf{\theta}) \doteq \frac{e^{h(s,a,\mathbf{\theta})}}{\sum_{b}e^{h(s,b,\mathbf{\theta})}} = \frac{e^{h(s,a,\mathbf{\theta}) - c} e^c}{\sum_{b}e^{h(s,b,\mathbf{\theta}) - c} e^c} = \frac{e^{h(s,a,\mathbf{\theta}) - c}}{\sum_{b}e^{h(s,b,\mathbf{\theta}) - c}}$ # # $\pi(\cdot|s, \mathbf{\theta})$ is shift-invariant, and the policy remains the same when we subtract a constant $c \in \mathbb{R}$ from state-action preferences. # # Normally we use $c = \max_b h(s,b, \mathbf{\theta})$, to prevent any overflow due to exponentiating large numbers. # + deletable=false nbgrader={"checksum": "4540ff160f7a874ad3ee99deae10bbcb", "grade": false, "grade_id": "cell-9daa349ce740c93d", "locked": false, "schema_version": 1, "solution": true} # [Graded] def compute_softmax_prob(actor_w, tiles): """ Computes softmax probability for all actions Args: actor_w - np.array, an array of actor weights tiles - np.array, an array of active tiles Returns: softmax_prob - np.array, an array of size equal to num. actions, and sums to 1. """ # First compute the list of state-action preferences (1~2 lines) # state_action_preferences = ? (list of size 3) state_action_preferences = [] ### START CODE HERE ### for a in range (actor_w.shape[0]): state_action_preferences.append(actor_w[a][tiles].sum()) ### END CODE HERE ### # Set the constant c by finding the maximum of state-action preferences (use np.max) (1 line) # c = ? (float) ### START CODE HERE ### c=np.max(state_action_preferences) ### END CODE HERE ### # Compute the numerator by subtracting c from state-action preferences and exponentiating it (use np.exp) (1 line) # numerator = ? (list of size 3) ### START CODE HERE ### numerator=np.exp(state_action_preferences-c) ### END CODE HERE ### # Next compute the denominator by summing the values in the numerator (use np.sum) (1 line) # denominator = ? (float) ### START CODE HERE ### denominator=np.sum(numerator) ### END CODE HERE ### # Create a probability array by dividing each element in numerator array by denominator (1 line) # We will store this probability array in self.softmax_prob as it will be useful later when updating the Actor # softmax_prob = ? (list of size 3) ### START CODE HERE ### softmax_prob=numerator/denominator ### END CODE HERE ### return softmax_prob # + [markdown] deletable=false editable=false nbgrader={"checksum": "219d176a243b4cc8105fadc7f200c8cd", "grade": false, "grade_id": "cell-6746fb79fd66fca9", "locked": true, "schema_version": 1, "solution": false} # Run the following code to verify `compute_softmax_prob`. # # We will test the method by building a softmax policy from state-action preferences [-1,1,2]. # # The sampling probability should then roughly match $[\frac{e^{-1}}{e^{-1}+e^1+e^2}, \frac{e^{1}}{e^{-1}+e^1+e^2}, \frac{e^2}{e^{-1}+e^1+e^2}] \approx$ [0.0351, 0.2595, 0.7054] # + deletable=false editable=false nbgrader={"checksum": "3ff8eb422e5265e03f5b265eb23bdb58", "grade": true, "grade_id": "graded_compute_softmax_prob", "locked": true, "points": 20, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test Code for compute_softmax_prob() ## # set tile-coder iht_size = 4096 num_tilings = 8 num_tiles = 8 test_tc = PendulumTileCoder(iht_size=iht_size, num_tilings=num_tilings, num_tiles=num_tiles) num_actions = 3 actions = list(range(num_actions)) actor_w = np.zeros((len(actions), iht_size)) # setting actor weights such that state-action preferences are always [-1, 1, 2] actor_w[0] = -1./num_tilings actor_w[1] = 1./num_tilings actor_w[2] = 2./num_tilings # obtain active_tiles from state state = [-np.pi, 0.] angle, ang_vel = state active_tiles = test_tc.get_tiles(angle, ang_vel) # compute softmax probability softmax_prob = compute_softmax_prob(actor_w, active_tiles) print('softmax probability: {}'.format(softmax_prob)) # + [markdown] deletable=false editable=false nbgrader={"checksum": "b93e7c56f4632a1651adf5b0bbfd75e5", "grade": false, "grade_id": "cell-77f00606b70a1d25", "locked": true, "schema_version": 1, "solution": false} # **Expected Output:** # # softmax probability: [0.03511903 0.25949646 0.70538451] # + [markdown] deletable=false editable=false nbgrader={"checksum": "a2f94be0165e918d0886453a691fea1b", "grade": false, "grade_id": "cell-eed6babe9b563391", "locked": true, "schema_version": 1, "solution": false} # ## Section 2-2: Implement Agent Methods # # Let's first define methods that initialize the agent. `agent_init()` initializes all the variables that the agent will need. # # Now that we have implemented helper functions, let's create an agent. In this part, you will implement `agent_start()` and `agent_step()`. We do not need to implement `agent_end()` because there is no termination in our continuing task. # # `compute_softmax_prob()` is used in `agent_policy()`, which in turn will be used in `agent_start()` and `agent_step()`. We have implemented `agent_policy()` for you. # # When performing updates to the Actor and Critic, recall their respective updates in the Actor-Critic algorithm video. # # We approximate $q_\pi$ in the Actor update using one-step bootstrapped return($R_{t+1} - \bar{R} + \hat{v}(S_{t+1}, \mathbf{w})$) subtracted by current state-value($\hat{v}(S_{t}, \mathbf{w})$), equivalent to TD error $\delta$. # # $\delta_t = R_{t+1} - \bar{R} + \hat{v}(S_{t+1}, \mathbf{w}) - \hat{v}(S_{t}, \mathbf{w}) \hspace{6em} (1)$ # # **Average Reward update rule**: $\bar{R} \leftarrow \bar{R} + \alpha^{\bar{R}}\delta \hspace{4.3em} (2)$ # # **Critic weight update rule**: $\mathbf{w} \leftarrow \mathbf{w} + \alpha^{\mathbf{w}}\delta\nabla \hat{v}(s,\mathbf{w}) \hspace{2.5em} (3)$ # # **Actor weight update rule**: $\mathbf{\theta} \leftarrow \mathbf{\theta} + \alpha^{\mathbf{\theta}}\delta\nabla ln \pi(A|S,\mathbf{\theta}) \hspace{1.4em} (4)$ # # # However, since we are using linear function approximation and parameterizing a softmax policy, the above update rule can be further simplified using: # # $\nabla \hat{v}(s,\mathbf{w}) = \mathbf{x}(s) \hspace{14.2em} (5)$ # # $\nabla ln \pi(A|S,\mathbf{\theta}) = \mathbf{x}_h(s,a) - \sum_b \pi(b|s, \mathbf{\theta})\mathbf{x}_h(s,b) \hspace{3.3em} (6)$ # # + deletable=false nbgrader={"checksum": "7477f1cdf96f2bd8bafd07abfbd201a2", "grade": false, "grade_id": "cell-a25279b09b459f5c", "locked": false, "schema_version": 1, "solution": true} # [Graded] class ActorCriticSoftmaxAgent(BaseAgent): def __init__(self): self.rand_generator = None self.actor_step_size = None self.critic_step_size = None self.avg_reward_step_size = None self.tc = None self.avg_reward = None self.critic_w = None self.actor_w = None self.actions = None self.softmax_prob = None self.prev_tiles = None self.last_action = None def agent_init(self, agent_info={}): """Setup for the agent called when the experiment first starts. Set parameters needed to setup the semi-gradient TD(0) state aggregation agent. Assume agent_info dict contains: { "iht_size": int "num_tilings": int, "num_tiles": int, "actor_step_size": float, "critic_step_size": float, "avg_reward_step_size": float, "num_actions": int, "seed": int } """ # set random seed for each run self.rand_generator = np.random.RandomState(agent_info.get("seed")) iht_size = agent_info.get("iht_size") num_tilings = agent_info.get("num_tilings") num_tiles = agent_info.get("num_tiles") # initialize self.tc to the tile coder we created self.tc = PendulumTileCoder(iht_size=iht_size, num_tilings=num_tilings, num_tiles=num_tiles) # set step-size accordingly (we normally divide actor and critic step-size by num. tilings (p.217-218 of textbook)) self.actor_step_size = agent_info.get("actor_step_size")/num_tilings self.critic_step_size = agent_info.get("critic_step_size")/num_tilings self.avg_reward_step_size = agent_info.get("avg_reward_step_size") self.actions = list(range(agent_info.get("num_actions"))) # Set initial values of average reward, actor weights, and critic weights # We initialize actor weights to three times the iht_size. # Recall this is because we need to have one set of weights for each of the three actions. self.avg_reward = 0.0 self.actor_w = np.zeros((len(self.actions), iht_size)) self.critic_w = np.zeros(iht_size) self.softmax_prob = None self.prev_tiles = None self.last_action = None def agent_policy(self, active_tiles): """ policy of the agent Args: active_tiles (Numpy array): active tiles returned by tile coder Returns: The action selected according to the policy """ # compute softmax probability softmax_prob = compute_softmax_prob(self.actor_w, active_tiles) # Sample action from the softmax probability array # self.rand_generator.choice() selects an element from the array with the specified probability chosen_action = self.rand_generator.choice(self.actions, p=softmax_prob) # save softmax_prob as it will be useful later when updating the Actor self.softmax_prob = softmax_prob return chosen_action def agent_start(self, state): """The first method called when the experiment starts, called after the environment starts. Args: state (Numpy array): the state from the environment's env_start function. Returns: The first action the agent takes. """ angle, ang_vel = state ### Use self.tc to get active_tiles using angle and ang_vel (2 lines) # set current_action by calling self.agent_policy with active_tiles # active_tiles = ? # current_action = ? ### START CODE HERE ### active_tiles=self.tc.get_tiles(angle, ang_vel) current_action=self.agent_policy(active_tiles) ### END CODE HERE ### self.last_action = current_action self.prev_tiles = np.copy(active_tiles) return self.last_action def agent_step(self, reward, state): """A step taken by the agent. Args: reward (float): the reward received for taking the last action taken state (Numpy array): the state from the environment's step based on where the agent ended up after the last step. Returns: The action the agent is taking. """ angle, ang_vel = state ### Use self.tc to get active_tiles using angle and ang_vel (1 line) # active_tiles = ? ### START CODE HERE ### active_tiles=self.tc.get_tiles(angle, ang_vel) ### END CODE HERE ### ### Compute delta using Equation (1) (1 line) # delta = ? ### START CODE HERE ### delta=reward-self.avg_reward+np.sum(self.critic_w[active_tiles])-np.sum(self.critic_w[self.prev_tiles]) ### END CODE HERE ### ### update average reward using Equation (2) (1 line) # self.avg_reward += ? ### START CODE HERE ### self.avg_reward+=self.avg_reward_step_size*delta ### END CODE HERE ### # update critic weights using Equation (3) and (5) (1 line) # self.critic_w[self.prev_tiles] += ? ### START CODE HERE ### self.critic_w[self.prev_tiles]+=self.critic_step_size*delta ### END CODE HERE ### # update actor weights using Equation (4) and (6) # We use self.softmax_prob saved from the previous timestep # We leave it as an exercise to verify that the code below corresponds to the equation. for a in self.actions: if a == self.last_action: self.actor_w[a][self.prev_tiles] += self.actor_step_size * delta * (1 - self.softmax_prob[a]) else: self.actor_w[a][self.prev_tiles] += self.actor_step_size * delta * (0 - self.softmax_prob[a]) ### set current_action by calling self.agent_policy with active_tiles (1 line) # current_action = ? ### START CODE HERE ### current_action=self.agent_policy(active_tiles) ### END CODE HERE ### self.prev_tiles = active_tiles self.last_action = current_action return self.last_action def agent_message(self, message): if message == 'get avg reward': return self.avg_reward # + [markdown] deletable=false editable=false nbgrader={"checksum": "0abe20eda4a3c9f6781959352dab4748", "grade": false, "grade_id": "cell-c47a537224d052ad", "locked": true, "schema_version": 1, "solution": false} # Run the following code to verify `agent_start()`. # Although there is randomness due to `self.rand_generator.choice()` in `agent_policy()`, we control the seed so your output should match the expected output. # + deletable=false editable=false nbgrader={"checksum": "40a531b5ef11d53daca1ce9f8544dfb0", "grade": true, "grade_id": "graded_agent_start", "locked": true, "points": 10, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test Code for agent_start()## agent_info = {"iht_size": 4096, "num_tilings": 8, "num_tiles": 8, "actor_step_size": 1e-1, "critic_step_size": 1e-0, "avg_reward_step_size": 1e-2, "num_actions": 3, "seed": 99} test_agent = ActorCriticSoftmaxAgent() test_agent.agent_init(agent_info) state = [-np.pi, 0.] test_agent.agent_start(state) print("agent active_tiles: {}".format(test_agent.prev_tiles)) print("agent selected action: {}".format(test_agent.last_action)) # + [markdown] deletable=false editable=false nbgrader={"checksum": "c7e0ca514f7c96e8e6beb2cf9304758e", "grade": false, "grade_id": "cell-4bb285c764d8ad67", "locked": true, "schema_version": 1, "solution": false} # **Expected output**: # # agent active_tiles: [0 1 2 3 4 5 6 7] # agent selected action: 2 # + [markdown] deletable=false editable=false nbgrader={"checksum": "bb016e27cf1ece334e66e895094ef089", "grade": false, "grade_id": "cell-a3d392998465216c", "locked": true, "schema_version": 1, "solution": false} # Run the following code to verify `agent_step()` # + deletable=false editable=false nbgrader={"checksum": "d62013f0d2b33e3e7ed30f86264dc84d", "grade": true, "grade_id": "graded_agent_step", "locked": true, "points": 25, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test Code for agent_step() ## # Make sure agent_start() and agent_policy() are working correctly first. # agent_step() should work correctly for other arbitrary state transitions in addition to this test case. env_info = {"seed": 99} agent_info = {"iht_size": 4096, "num_tilings": 8, "num_tiles": 8, "actor_step_size": 1e-1, "critic_step_size": 1e-0, "avg_reward_step_size": 1e-2, "num_actions": 3, "seed": 99} test_env = PendulumEnvironment test_agent = ActorCriticSoftmaxAgent rl_glue = RLGlue(test_env, test_agent) rl_glue.rl_init(agent_info, env_info) # start env/agent rl_glue.rl_start() rl_glue.rl_step() print("agent next_action: {}".format(rl_glue.agent.last_action)) print("agent avg reward: {}\n".format(rl_glue.agent.avg_reward)) print("agent first 10 values of actor weights[0]: \n{}\n".format(rl_glue.agent.actor_w[0][:10])) print("agent first 10 values of actor weights[1]: \n{}\n".format(rl_glue.agent.actor_w[1][:10])) print("agent first 10 values of actor weights[2]: \n{}\n".format(rl_glue.agent.actor_w[2][:10])) print("agent first 10 values of critic weights: \n{}".format(rl_glue.agent.critic_w[:10])) # + [markdown] deletable=false editable=false nbgrader={"checksum": "9d4691943e3a97a619875655bef00a2e", "grade": false, "grade_id": "cell-feab2079de2e1fc0", "locked": true, "schema_version": 1, "solution": false} # **Expected output**: # # agent next_action: 1 # agent avg reward: -0.03139092653589793 # # agent first 10 values of actor weights[0]: # [0.01307955 0.01307955 0.01307955 0.01307955 0.01307955 0.01307955 # 0.01307955 0.01307955 0. 0. ] # # agent first 10 values of actor weights[1]: # [0.01307955 0.01307955 0.01307955 0.01307955 0.01307955 0.01307955 # 0.01307955 0.01307955 0. 0. ] # # agent first 10 values of actor weights[2]: # [-0.02615911 -0.02615911 -0.02615911 -0.02615911 -0.02615911 -0.02615911 # -0.02615911 -0.02615911 0. 0. ] # # agent first 10 values of critic weights: # [-0.39238658 -0.39238658 -0.39238658 -0.39238658 -0.39238658 -0.39238658 # -0.39238658 -0.39238658 0. 0. ] # + [markdown] deletable=false editable=false nbgrader={"checksum": "9bf003af9552ea8cf02c5a3e69f91d4f", "grade": false, "grade_id": "cell-4a2937aee7e48fe0", "locked": true, "schema_version": 1, "solution": false} # ## Section 3: Run Experiment # # Now that we've implemented all the components of environment and agent, let's run an experiment! # We want to see whether our agent is successful at learning the optimal policy of balancing the pendulum upright. We will plot total return over time, as well as the exponential average of the reward over time. We also do multiple runs in order to be confident about our results. # # The experiment/plot code is provided in the cell below. # + deletable=false editable=false nbgrader={"checksum": "86aa230c2ce8ef9fbd0b5022c72515f6", "grade": false, "grade_id": "cell-42b7e0b38d1ead4c", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! # Define function to run experiment def run_experiment(environment, agent, environment_parameters, agent_parameters, experiment_parameters): rl_glue = RLGlue(environment, agent) # sweep agent parameters for num_tilings in agent_parameters['num_tilings']: for num_tiles in agent_parameters["num_tiles"]: for actor_ss in agent_parameters["actor_step_size"]: for critic_ss in agent_parameters["critic_step_size"]: for avg_reward_ss in agent_parameters["avg_reward_step_size"]: env_info = {} agent_info = {"num_tilings": num_tilings, "num_tiles": num_tiles, "actor_step_size": actor_ss, "critic_step_size": critic_ss, "avg_reward_step_size": avg_reward_ss, "num_actions": agent_parameters["num_actions"], "iht_size": agent_parameters["iht_size"]} # results to save return_per_step = np.zeros((experiment_parameters["num_runs"], experiment_parameters["max_steps"])) exp_avg_reward_per_step = np.zeros((experiment_parameters["num_runs"], experiment_parameters["max_steps"])) # using tqdm we visualize progress bars for run in tqdm(range(1, experiment_parameters["num_runs"]+1)): env_info["seed"] = run agent_info["seed"] = run rl_glue.rl_init(agent_info, env_info) rl_glue.rl_start() num_steps = 0 total_return = 0. return_arr = [] # exponential average reward without initial bias exp_avg_reward = 0.0 exp_avg_reward_ss = 0.01 exp_avg_reward_normalizer = 0 while num_steps < experiment_parameters['max_steps']: num_steps += 1 rl_step_result = rl_glue.rl_step() reward = rl_step_result[0] total_return += reward return_arr.append(reward) avg_reward = rl_glue.rl_agent_message("get avg reward") exp_avg_reward_normalizer = exp_avg_reward_normalizer + exp_avg_reward_ss * (1 - exp_avg_reward_normalizer) ss = exp_avg_reward_ss / exp_avg_reward_normalizer exp_avg_reward += ss * (reward - exp_avg_reward) return_per_step[run-1][num_steps-1] = total_return exp_avg_reward_per_step[run-1][num_steps-1] = exp_avg_reward if not os.path.exists('results'): os.makedirs('results') save_name = "ActorCriticSoftmax_tilings_{}_tiledim_{}_actor_ss_{}_critic_ss_{}_avg_reward_ss_{}".format(num_tilings, num_tiles, actor_ss, critic_ss, avg_reward_ss) total_return_filename = "results/{}_total_return.npy".format(save_name) exp_avg_reward_filename = "results/{}_exp_avg_reward.npy".format(save_name) np.save(total_return_filename, return_per_step) np.save(exp_avg_reward_filename, exp_avg_reward_per_step) # + [markdown] deletable=false editable=false nbgrader={"checksum": "569a57d760604a84cdb04d53ecfefede", "grade": false, "grade_id": "cell-bea80af13342f057", "locked": true, "schema_version": 1, "solution": false} # ## Section 3-1: Run Experiment with 32 tilings, size 8x8 # # We will first test our implementation using 32 tilings, of size 8x8. We saw from the earlier assignment using tile-coding that many tilings promote fine discrimination, and broad tiles allows more generalization. # We conducted a wide sweep of meta-parameters in order to find the best meta-parameters for our Pendulum Swing-up task. # # We swept over the following range of meta-parameters and the best meta-parameter is boldfaced below: # # actor step-size: $\{\frac{2^{-6}}{32}, \frac{2^{-5}}{32}, \frac{2^{-4}}{32}, \frac{2^{-3}}{32}, \mathbf{\frac{2^{-2}}{32}}, \frac{2^{-1}}{32}, \frac{2^{0}}{32}, \frac{2^{1}}{32}\}$ # # critic step-size: $\{\frac{2^{-4}}{32}, \frac{2^{-3}}{32}, \frac{2^{-2}}{32}, \frac{2^{-1}}{32}, \frac{2^{0}}{32}, \mathbf{\frac{2^{1}}{32}}, \frac{3}{32}, \frac{2^{2}}{32}\}$ # # avg reward step-size: $\{2^{-11}, 2^{-10} , 2^{-9} , 2^{-8}, 2^{-7}, \mathbf{2^{-6}}, 2^{-5}, 2^{-4}, 2^{-3}, 2^{-2}\}$ # # # We will do 50 runs using the above best meta-parameter setting to verify your agent. # Note that running the experiment cell below will take **_approximately 5 min_**. # # + deletable=false editable=false nbgrader={"checksum": "ff324b51dd0e1d7bdb47b9979f698bde", "grade": false, "grade_id": "cell-e9bf5a92d552cda5", "locked": true, "schema_version": 1, "solution": false} # Do not modify this cell! #### Run Experiment # Experiment parameters experiment_parameters = { "max_steps" : 20000, "num_runs" : 50 } # Environment parameters environment_parameters = {} # Agent parameters # Each element is an array because we will be later sweeping over multiple values # actor and critic step-sizes are divided by num. tilings inside the agent agent_parameters = { "num_tilings": [32], "num_tiles": [8], "actor_step_size": [2**(-2)], "critic_step_size": [2**1], "avg_reward_step_size": [2**(-6)], "num_actions": 3, "iht_size": 4096 } current_env = PendulumEnvironment current_agent = ActorCriticSoftmaxAgent run_experiment(current_env, current_agent, environment_parameters, agent_parameters, experiment_parameters) plot_script.plot_result(agent_parameters, 'results') # + [markdown] deletable=false editable=false nbgrader={"checksum": "2d162faa4b5808751fcb4433bbd81b7c", "grade": false, "grade_id": "cell-7cfde5a470e987d7", "locked": true, "schema_version": 1, "solution": false} # Run the following code to verify your experimental result. # + deletable=false editable=false nbgrader={"checksum": "dd5614163afa480e8e49fd89e7a43c36", "grade": true, "grade_id": "graded_exp_result", "locked": true, "points": 30, "schema_version": 1, "solution": false} # Do not modify this cell! ## Test Code for experimental result ## filename = 'ActorCriticSoftmax_tilings_32_tiledim_8_actor_ss_0.25_critic_ss_2_avg_reward_ss_0.015625_exp_avg_reward' agent_exp_avg_reward = np.load('results/{}.npy'.format(filename)) result_med = np.median(agent_exp_avg_reward, axis=0) answer_range = np.load('correct_npy/exp_avg_reward_answer_range.npy') upper_bound = answer_range.item()['upper-bound'] lower_bound = answer_range.item()['lower-bound'] # check if result is within answer range all_correct = np.all(result_med <= upper_bound) and np.all(result_med >= lower_bound) if all_correct: print("Your experiment results are correct!") else: print("Your experiment results does not match with ours. Please check if you have implemented all methods correctly.") # + [markdown] deletable=false editable=false nbgrader={"checksum": "44295b14742f975bfb7fcd14ec123ebb", "grade": false, "grade_id": "cell-9081e37ad214f0b6", "locked": true, "schema_version": 1, "solution": false} # ## Section 3-2: Performance Metric and Meta-Parameter Sweeps # # # ### Performance Metric # # To evaluate performance, we plotted both the return and exponentially weighted average reward over time. # # In the first plot, the return is negative because the reward is negative at every state except when the pendulum is in the upright position. As the policy improves over time, the agent accumulates less negative reward, and thus the return decreases slowly. Towards the end the slope is almost flat indicating the policy has stabilized to a good policy. When using this plot however, it can be difficult to distinguish whether it has learned an optimal policy. The near-optimal policy in this Pendulum Swing-up Environment is to maintain the pendulum in the upright position indefinitely, getting near 0 reward at each time step. We would have to examine the slope of the curve but it can be hard to compare the slope of different curves. # # The second plot using exponential average reward gives a better visualization. We can see that towards the end the value is near 0, indicating it is getting near 0 reward at each time step. Here, the exponentially weighted average reward shouldn't be confused with the agent’s internal estimate of the average reward. To be more specific, we used an exponentially weighted average of the actual reward without initial bias (Refer to Exercise 2.7 from the textbook (p.35) to read more about removing the initial bias). If we used sample averages instead, later rewards would have decreasing impact on the average and would not be able to represent the agent's performance with respect to its current policy effectively. # # It is easier to see whether the agent has learned a good policy in the second plot than the first plot. If the learned policy is optimal, the exponential average reward would be close to 0. # # Furthermore, how did we pick the best meta-parameter from the sweeps? A common method would be to pick the meta-parameter that results in the largest Area Under the Curve (AUC). However, this is not always what we want. We want to find a set of meta-parameters that learns a good final policy. When using AUC as the criteria, we may pick meta-parameters that allows the agent to learn fast but converge to a worse policy. In our case, we selected the meta-parameter setting that obtained the most exponential average reward over the last 5000 time steps. # # # ### Parameter Sensitivity # # In addition to finding the best meta-parameters it is also equally important to plot **parameter sensitivity curves** to understand how our algorithm behaves. # # In our simulated Pendulum problem, we can extensively test our agent with different meta-parameter configurations but it would be quite expensive to do so in real life. Parameter sensitivity curves can provide us insight into how our algorithms might behave in general. It can help us identify a good range of each meta-parameters as well as how sensitive the performance is with respect to each meta-parameter. # # Here are the sensitivity curves for the three step-sizes we swept over: # # <img src="data/sensitivity_combined.png" alt="Drawing" style="width: 1000px;"/> # # On the y-axis we use the performance measure, which is the average of the exponential average reward over the 5000 time steps, averaged over 50 different runs. On the x-axis is the meta-parameter we are testing. For the given meta-parameter, the remaining meta-parameters are chosen such that it obtains the best performance. # # The curves are quite rounded, indicating the agent performs well for these wide range of values. It indicates that the agent is not too sensitive to these meta-parameters. Furthermore, looking at the y-axis values we can observe that average reward step-size is particularly less sensitive than actor step-size and critic step-size. # # But how do we know that we have sufficiently covered a wide range of meta-parameters? It is important that the best value is not on the edge but in the middle of the meta-parameter sweep range in these sensitivity curves. Otherwise this may indicate that there could be better meta-parameter values that we did not sweep over. # + [markdown] deletable=false editable=false nbgrader={"checksum": "e679782b8781ed867e952ab2a8735ec1", "grade": false, "grade_id": "cell-e9c6a124eb3c37e6", "locked": true, "schema_version": 1, "solution": false} # ## Wrapping up # # ### **Congratulations!** You have successfully implemented Course 3 Programming Assignment 4. # # # You have implemented your own **Average Reward Actor-Critic with Softmax Policy** agent in the Pendulum Swing-up Environment. You implemented the environment based on information about the state/action space and transition dynamics. Furthermore, you have learned how to implement an agent in a continuing task using the average reward formulation. We parameterized the policy using softmax of action-preferences over discrete action spaces, and used Actor-Critic to learn the policy. # # # To summarize, you have learned how to: # 1. Implement softmax actor-critic agent on a continuing task using the average reward formulation. # 2. Understand how to parameterize the policy as a function to learn, in a discrete action environment. # 3. Understand how to (approximately) sample the gradient of this objective to update the actor. # 4. Understand how to update the critic using differential TD error.
03_Prediction_and_Control_with Function_Approximation/week4/C3W4_programming_assignment/C3W4_prrgramming_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.insert(1, '/home/crazy/UI/ide/crossviper-master') from crossviper import * class Ide: def __init__(self, parent): ide = tk.Toplevel(parent) ide.geometry("%dx%d+%d+%d"% (500,800,1800,0)) ide.resizable(width = "True",height = "False") app = CrossViper(master=ide) app.master.title('Python Ide') app.master.minsize(width=400, height=800) # + # root = tk.Tk() # obj = Ide(root) # root.mainloop() # -
Untitled7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Langmuir probe data # # Langmuir probes are the bread and butter of plasma diagnsotics. In AUG they are spread through the inner and outer divertors. Some of them tend to go MIA in some days, so always check out for individual signals. The naming convention is always something like "ua1". The first "u" is for "unten" (lower), so the first letter can be either "u" or "o" (oben). The second letter can be "a" for "ausen" (outer), "i" for "innen" (inner) or "m" for "mitte" (middle, in the lower divertor roof baffle). # # Reading temperature and density for the probes is straightforward, as the information is stored in the `LSD` shotfile (yep, LSD, *LangmuirSondenDaten, jungs*). To get the particular info, you can compose the name of the signal by adding the prefix `te-` for temperature and `ne-` for density. # # Reading jsat information, however, is a bloody nightmare. Ain't nobody got time for that. # # It is much easier to read data from the `DIVERTOR` programme written by <NAME> and outputting ASCII files than you reading the data itself. There are some functions to read data outputted by DIVERTOR. import sys sys.path.append('ipfnlite/') sys.path.append('/afs/ipp/aug/ads-diags/common/python/lib/') from getsig import getsig import matplotlib.pyplot as plt #plt.style.use('./Styles/darklab.mplstyle') shotnr = 29864 telfs = getsig(shotnr, 'LSD', 'te-ua4') nelfs = getsig(shotnr, 'LSD', 'ne-ua4') # + fig, ax = plt.subplots(nrows=2, sharex=True, dpi=100) ax[0].plot(nelfs.time, nelfs.data*1e-19, lw=0.4) ax[1].plot(telfs.time, telfs.data, lw=0.4) ax[0].set_ylabel(r'$\mathrm{n_{e}\,[10^{19}\,m^{-3}]}$') ax[1].set_ylabel('T [eV]') ax[0].set_ylim(bottom=0) ax[1].set_ylim(bottom=0) ax[1].set_xlabel('time [s]') ax[1].set_xlim(1,4) plt.tight_layout() plt.show() # - # ## Reading DIVERTOR output from readStark import readDivData from getsig import getsig from scipy.interpolate import interp2d import matplotlib as mpl #Special axes arrangement for colorbars from mpl_toolkits.axes_grid1.inset_locator import inset_axes import numpy as np import matplotlib.pyplot as plt #plt.style.use('./Styles/darklab.mplstyle') jsat_out = readDivData('./Files/3D_29864_jsat_out.dat') h1 = getsig(29864, 'DCN', 'H-1') h5 = getsig(29864, 'DCN', 'H-5') dtot = getsig(29864, 'UVS', 'D_tot') # + fig = plt.figure(dpi=120) #Initial and Final time points tBegin = 1.0 tEnd = 3.6 #2x2 array, left side for plotting, right side for placing colorbar, hence the ratios gs = mpl.gridspec.GridSpec(3, 2, height_ratios=[1, 1, 1], width_ratios=[5, 1]) #Top plot ax0 = fig.add_subplot(gs[0, 0]) ax0.plot(h1.time, h1.data*1e-19, label='H-1') ax0.plot(h5.time, h5.data*1e-19, label='H-5') ax0.set_ylabel(r'$\mathrm{n_{e}\,[10^{19}\,m^{-3}]}$') ax0.set_ylim(bottom=0) ax0.legend() #Middle plot ax1 = fig.add_subplot(gs[1, 0], sharex=ax0) vmax = 15 clrb = ax1.pcolormesh(jsat_out.time, jsat_out.deltas, jsat_out.data, vmax=vmax, shading='gouraud', cmap='viridis') axins = inset_axes(ax1, width="5%", # width = 10% of parent_bbox width height="100%", # height : 50% loc=6, bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax1.transAxes, borderpad=0) cbar = plt.colorbar(clrb, cax=axins, ticks=(np.arange(0.0, vmax+1.0, 3.0))) cbar.set_label(r'$\mathrm{\Gamma_{D^{+}}\,[10^{22}\,e/m^{-2}]}$') #Strike point line ax1.axhline(0.0, color='w') ax1.set_ylabel(r'$\mathrm{\Delta s\,[cm]}$') ax1.set_ylim(-5,17) ax1.set_yticks([-5,0,5,10,15]) ##This is just the middle figure, but 2D-interpolated #Bottom plot ax2 = fig.add_subplot(gs[2, 0], sharex=ax0) ax2.plot(dtot.time, dtot.data*1e-22, label='D fueling [1e22 e/s]') ax2.set_ylim(bottom=0) ax2.legend() #Remove ticks from top and middle plot plt.setp(ax0.get_xticklabels(), visible=False) plt.setp(ax1.get_xticklabels(), visible=False) ax0.set_xlim(tBegin, tEnd) ax2.set_xlabel('time [s]') plt.subplots_adjust(left=0.1, right=0.99, bottom=0.11, top=0.98, wspace=0.10, hspace=0.11) #plt.tight_layout() plt.savefig('./Figures/test.png', dpi=300, transparent=True) plt.show()
08-Langmuir probes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Step 0 Cluster the Malware Bazaar data set # # This is in the file (which has been checked in): # malbaz/cen_389300.csv # # On the 2021-09-17 the CSV file provided by Malware Bazaar had 389300 lines. # To download an updated version and cluster this data set, see malbaz/README. # Using HAC-T with a threshold distance CDist=30, the resulting clustering had 16453 clutsters. # # The clusters are described in the file malbaz/cen_389300.csv, which has the following columns # tlsh TLSH of the center of the cluster # family The most common "signature" in the cluster # firstSeen The first seen date for the cluster (earliest of all first seen dates) # label The following concatenated: family, firstseen, nitems # radius The radius of the cluster # nitems The number of items in the cluster # # Below we list the 20 most frequently occurring family assigned to clusters: # # 3416 AgentTesla \ # 2782 NULL \ # 942 Heodo \ # 725 AveMariaRAT \ # 721 Mirai \ # 708 FormBook \ # 580 Loki \ # 558 QuakBot \ # 416 RemcosRAT \ # 382 Dridex \ # 316 NanoCore \ # 311 IcedID \ # 304 SnakeKeylogger \ # 244 TrickBot \ # 242 GuLoader \ # 231 Gozi \ # 214 RedLineStealer \ # 210 CobaltStrike \ # 197 MassLogger \ # 184 njrat # # # Step 1 Display a dendogram for all clusters assigned to a malware family # For our first demonstration, # we selected FickerStealer because the dendrogram was not too large / too dense. # Note: We provide some tools for narrowing the search / showing more meaningful dendrograms. from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", searchColName="family", searchValueList=['FickerStealer']) tlsh_dendrogram(tlist, labelList=labels[0]) # ## Interpretation of FickerStealer dendrogram # # We see a set of close clusters (distances between clusters < 110) in the months of March and April. # We see a set of even closer clusters (distances between clusters < 60) in the months of May and August. # There may have been a significant change in the malware family between April and May 2021. # # Step 2 Show a dendrogram for the RacoonStealer family # # Use the date filtering options (sDate and eDate) # # We start by generating a dendrogram for the entire RacoonStealer malware family. from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", searchColName="family", searchValueList=['RaccoonStealer']) tlsh_dendrogram(tlist, labelList=labels[0]) # ## 2.1 Use the sDate parameter to specify clusters after a date # # The above dendrogram was not useful. # So we set the start date (sDate) paremeter to "2021-09-01", so that we only show clusters with a firstSeen date # which occurs on or after 2021-09-01 from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", searchColName="family", searchValueList=['RaccoonStealer'], sDate="2021-09-01") tlsh_dendrogram(tlist, labelList=labels[0]) # ## 2.2 Use the sDate and eDate parameters to specify clusters in a date range # # Here we select clusters in the first quarter of 2021. # That is in the range: 2021-01-01 to 2021-03-31 from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", searchColName="family", searchValueList=['RaccoonStealer'], sDate="2021-01-01", eDate="2021-03-31") tlsh_dendrogram(tlist, labelList=labels[0]) # # Step 3 Show all the clusters surrounding a new file using simTlsh # # We got a file which had TLSH value # T14893F844FD459B2FC3D372F6E75C028D763A1FE8A7E630269934BEA023F56D12526911 from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", simTlsh="T14893F844FD459B2FC3D372F6E75C028D763A1FE8A7E630269934BEA023F56D12526911", simThreshold=130) tlsh_dendrogram(tlist, labelList=labels[0]) # ## Interpretation of Mirai / Gafgyt dendrogram # # The sample provided is called "QUERY" and it is in the 7th row of the dendrogram (near the top). # We see that it is close to a Mirai cluster. # We can adjust the simThreshold to only display clusters closer to our simTlsh (see below). # # We see that there is a large branch of Gafgyt malware clusters at the top of the diagram. # And a large branch of Mirai malware clusters at the bottom of the diagram. # This makes perfect sense. # It was reported in April 2021, that Gafgyt had started re-using Mirai code. # # https://securityaffairs.co/wordpress/116882/cyber-crime/gafgyt-re-uses-mirai-code.html from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", simTlsh="T14893F844FD459B2FC3D372F6E75C028D763A1FE8A7E630269934BEA023F56D12526911", simThreshold=100) tlsh_dendrogram(tlist, labelList=labels[0]) # Here we reduced the simThreshold to 80. # We find that our simTlsh has a distance < 10 to the Mirai 2021006-30 cluster # Our sample is highly likely to be a sample of Mirai malware. # # Step 4 Show all the clusters surrounding a specified cluster # # We had a SnakeKeyLogger cluster which was first seen on 2021-09-16 # This cluster has center # T12584BF243AFB8019F173AFBA8FE575969B6EFA633603D55D2491038A0613B81CDC153E # (this is line 42 of malbaz/cen_389300.csv) from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", simTlsh="T12584BF243AFB8019F173AFBA8FE575969B6EFA633603D55D2491038A0613B81CDC153E", simThreshold=90) tlsh_dendrogram(tlist, labelList=labels[0]) # ## Interpretation of SnakeKeylogger / AgentTesla / Formbook / Loki dendrogram # # We see that the SnakeKeyLogger cluster is mixed in a group of AgentTesla / Loki and Formbook clusters. # These malware families are known to exhibit similar properties. # They are information stealer / RATs which are typically sent attached to spam emails. # # "Researchers say the attackers’ use of several common malware families makes attribution of this # campaign to a particular threat group difficult." # https://cyberintelmag.com/malware-viruses/year-long-spear-phishing-campaign-targets-energy-sector-with-agent-tesla-other-rats/ # # https://asec.ahnlab.com/en/22074/ # # # Step 5 Work with unlabelled clusters # # We generate a dendrogram which includes unlabeled clusters. # We extract information about those clusters and show how to list the members. # + from pylib.tlsh_lib import * (tlist, labels) = tlsh_csvfile("malbaz/clust_389300.csv", simTlsh="T10923013EC661113BCD05DB76E2622B7E24A64C768F6B70D871E7208A3CFE8505F42961", simThreshold=180) tlsh_dendrogram(tlist, labelList=labels[0]) # - # In the middle of this dendrogram (the green section) we see a group of clusters without labels. # We now extract information about Cluster 43300 from pylib.tlsh_lib import * mb_show_sha1("Cluster 43300") # We also show how to get more information about Gozi 2020-11-10 (2) which is the 5th row from the bottom. from pylib.tlsh_lib import * mb_show_sha1("MyDoom", thisDate="2021-09-03")
tlshCluster/malbaz.ipynb
;; -*- coding: utf-8 -*- ;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .scm ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: Calysto Scheme 3 ;; language: scheme ;; name: calysto_scheme ;; --- ;; ### 練習問題2.6 ;; ペアを⼿続きとして表現するという考え⽅で頭がごちゃごちゃになっていないとしたら、次のようなことを考えてみよう。 ;; ⼿続きを扱うことができる⾔語では、 ;; (少なくとも、⾮負整数に関する限りは) 数値なしでもやっていける。そのためには、0 と、1 を⾜すという演算を次のように実装する。 ;; ;; (define zero (lambda (f) (lambda (x) x))) ;; (define (add-1 n) ;; (lambda (f) (lambda (x) (f ((n f) x))))) ;; ;; この表現は、発明者の Alonzo Church にちなんで**チャーチ数** ;; (Church numeral) として知られている。 ;; Alonzo Church はλ-演算を発明した論理学者である。 ;; one と two を直接 (zero と add-1 を使わずに) 定義せよ ;; (ヒント:置換を使って (add-1 zero) を評価する)。 ;; 加算⼿続きの直接的な定義 +(add-1 の繰り返し適⽤は⽤いない)を与えよ。 (define zero (lambda (f) (lambda (x) x))) (define (add-1 n) (lambda (f) (lambda (x) (f ((n f) x))))) ;; + ; チャーチ数がよくわからないので、 ; 動かしてみて結果から考えてみる。 (define (func x) (begin (display x) (+ x 1) ) ) ((zero func) 0) ;(dispaly ((zero func) 0)) (newline) (define one (add-1 zero)) ((one func) 0) ;(dispaly ((one func) 0)) (newline) (define two (add-1 one)) ((two func) 0) ;(dispaly ((two func) 0)) (newline) (define three (add-1 two)) ((three func) 0) ;(dispaly ((three func) 0)) (newline) ;; - ;; 上記の結果より、チャーチ数は以下のように考えられる。 ;; ;; zero → 引数に与えた手続きを1度も実行しない。 ;; one → 引数に与えた手続きを1度実行する。 ;; two → 引数に与えた手続きを2度実行する。 ;; three → 引数に与えた手続きを3度実行する。 ;; ;; + (define (func x) (begin (display x) (+ x 1) ) ) ; oneとtwoを直接定義 (define one (lambda (f) (lambda (x) (f x)))) ;(display ((one func) 0)) ((one func) 0) (newline) (define two (lambda (f) (lambda (x) (f (f x))))) ;(display ((two func) 0)) ((two func) 0) (newline) (define three (lambda (f) (lambda (x) (f (f (f x)))))) ;(display ((three func) 0)) ((three func) 0) (newline) ;; - (define (add a b) ;(lambda (f) (lambda (x) ((a (b (f x)))))) ; 実行エラーになる ;(lambda (f) (a (b f))) ; これだとaの分のlambda式が増えない ;(lambda (f) ((a f) (b f))) ; 実行エラーになる (lambda (f) (lambda (x) ((a f) ((b f) x)))) ; 実行結果が1多い ;(lambda (f) (lambda (x) ((a f) (b (f x))))) ) ;; + ((one func) 0) (newline) ((two func) 0) (newline) (define three (add one two)) ((three func) 0) (newline) ;; + (define (func x) (begin (display '0) ;(display x) ;(+ x 1) () ) ) (define x (add one two)) ;(display ((x func) 0)) ((x func) 0) (newline) (define x (add two one)) ;(display ((x func) 0)) ((x func) 0) (newline) (define x (add x two)) ;(display ((x func) 0)) ((x func) 0) (newline) ((zero func) 0) (newline) ((one func) 0) (newline) ((two func) 0) (newline)
exercises/2.06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # language: python # name: python38564bitbasecondad1742f2c15834eb4a25ed5f906de87ff # --- from gaussian_noise_regression import GaussianNoiseRegression import pandas as pd import numpy as np import matplotlib.pyplot as plt pd.set_option('display.max_rows',100) data = pd.read_csv('test/ImbR.csv', index_col=0) data data.std().tolist() gn1 = GaussianNoiseRegression(data, thr_rel=0.8, c_perc=[0.5,3]) method = gn1.getMethod() extrType = gn1.getExtrType() thr_rel = gn1.getThrRel() controlPtr = gn1.getControlPtr() c_perc_undersampling, c_perc_oversampling = gn1.getCPerc() pert = gn1.getPert() method, extrType, thr_rel, controlPtr, c_perc_undersampling, c_perc_oversampling, pert yPhi, ydPhi, yddPhi = gn1.calc_rel_values() yPhi data1 = gn1.preprocess_data(yPhi) data1 gn1.set_feature_stds_list(data1) feature_stds_list = gn1.get_feature_stds_list() feature_stds_list gn1.set_obj_interesting_set(data1) interesting_set = gn1.get_obj_interesting_set() interesting_set gn1.set_obj_uninteresting_set(data1) uninteresting_set = gn1.get_obj_uninteresting_set() uninteresting_set gn1.set_obj_bumps(data1) bumps_undersampling, bumps_oversampling = gn1.get_obj_bumps() bumps_undersampling, bumps_oversampling resampled = gn1.process_percentage() resampled data = pd.read_csv('test/ImbR.csv', index_col=0) data gn2 = GaussianNoiseRegression(data, thr_rel=0.8, c_perc='balance') method = gn2.getMethod() extrType = gn2.getExtrType() thr_rel = gn2.getThrRel() controlPtr = gn2.getControlPtr() c_perc = gn2.getCPerc() pert = gn1.getPert() method, extrType, thr_rel, controlPtr, c_perc, pert resampled = gn2.resample() resampled data = pd.read_csv('test/ImbR.csv', index_col=0) data gn3 = GaussianNoiseRegression(data, thr_rel=0.8, c_perc='extreme') method = gn3.getMethod() extrType = gn3.getExtrType() thr_rel = gn3.getThrRel() controlPtr = gn3.getControlPtr() c_perc = gn3.getCPerc() pert = gn3.getPert() method, extrType, thr_rel, controlPtr, c_perc, pert resampled = gn3.resample() resampled
archive/test_gaussian_noise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (ML) # language: python # name: ml # --- # # 64-D image manifold: images # + # %matplotlib inline import sys import numpy as np import matplotlib from matplotlib import pyplot as plt from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox import torch sys.path.append("../../") from experiments.datasets import FFHQStyleGAN64DLoader from experiments.architectures.image_transforms import create_image_transform, create_image_encoder from experiments.architectures.vector_transforms import create_vector_transform from manifold_flow.flows import ManifoldFlow, EncoderManifoldFlow import plot_settings as ps # - ps.setup() # ## Helper function to go from torch to numpy conventions def trf(x): return np.clip(np.transpose(x, [1,2,0]) / 256., 0., 1.) # ## Load models def load_model( filename, outerlayers=20, innerlayers=8, levels=4, splinebins=11, splinerange=10.0, dropout=0.0, actnorm=True, batchnorm=False, linlayers=2, linchannelfactor=2, lineartransform="lu" ): steps_per_level = outerlayers // levels spline_params = { "apply_unconditional_transform": False, "min_bin_height": 0.001, "min_bin_width": 0.001, "min_derivative": 0.001, "num_bins": splinebins, "tail_bound": splinerange, } outer_transform = create_image_transform( 3, 64, 64, levels=levels, hidden_channels=100, steps_per_level=steps_per_level, num_res_blocks=2, alpha=0.05, num_bits=8, preprocessing="glow", dropout_prob=dropout, multi_scale=True, spline_params=spline_params, postprocessing="partial_mlp", postprocessing_layers=linlayers, postprocessing_channel_factor=linchannelfactor, use_actnorm=actnorm, use_batchnorm=batchnorm, ) inner_transform = create_vector_transform( 64, innerlayers, linear_transform_type=lineartransform, base_transform_type="rq-coupling", context_features=1, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) model = ManifoldFlow( data_dim=(3, 64, 64), latent_dim=64, outer_transform=outer_transform, inner_transform=inner_transform, apply_context_to_outer=False, pie_epsilon=0.1, clip_pie=None ) model.load_state_dict( torch.load("../data/models/{}.pt".format(filename), map_location=torch.device("cpu")) ) _ = model.eval() return model def load_emf_model( filename, outerlayers=20, innerlayers=8, levels=4, splinebins=11, splinerange=10.0, dropout=0.0, actnorm=True, batchnorm=False, linlayers=2, linchannelfactor=2, lineartransform="lu" ): steps_per_level = outerlayers // levels spline_params = { "apply_unconditional_transform": False, "min_bin_height": 0.001, "min_bin_width": 0.001, "min_derivative": 0.001, "num_bins": splinebins, "tail_bound": splinerange, } encoder = create_image_encoder( 3, 64, 64, latent_dim=64, context_features=None, ) outer_transform = create_image_transform( 3, 64, 64, levels=levels, hidden_channels=100, steps_per_level=steps_per_level, num_res_blocks=2, alpha=0.05, num_bits=8, preprocessing="glow", dropout_prob=dropout, multi_scale=True, spline_params=spline_params, postprocessing="partial_mlp", postprocessing_layers=linlayers, postprocessing_channel_factor=linchannelfactor, use_actnorm=actnorm, use_batchnorm=batchnorm, ) inner_transform = create_vector_transform( 64, innerlayers, linear_transform_type=lineartransform, base_transform_type="rq-coupling", context_features=1, dropout_probability=dropout, tail_bound=splinerange, num_bins=splinebins, use_batch_norm=batchnorm, ) model = EncoderManifoldFlow( data_dim=(3, 64, 64), latent_dim=2, encoder=encoder, outer_transform=outer_transform, inner_transform=inner_transform, apply_context_to_outer=False, pie_epsilon=0.1, clip_pie=None ) model.load_state_dict( torch.load("../data/models/{}.pt".format(filename), map_location=torch.device("cpu")) ) _ = model.eval() return model mf = load_model("mf_64_gan64d_april") emf = load_emf_model("emf_64_gan64d_april") pie = load_model("pie_64_gan64d_april") # ## Sample comparison # + n = 8 x_test = 0.5 + 255. * np.load("../data/samples/gan64d/x_test_prior.npy")[:n] x_gen_af = np.load("../data/results/flow_2_gan64d_april_samples.npy")[:n] x_gen_pie = np.load("../data/results/pie_64_gan64d_april_samples.npy")[:n] x_gen_pie_sample_v = pie.sample(u=None, context=torch.zeros((n,1)).to(torch.float), n=n, sample_orthogonal=True).detach().numpy() x_gen_mf = np.load("../data/results/mf_64_gan64d_april_samples.npy")[:n] x_gen_emf = np.load("../data/results/emf_64_gan64d_april_samples.npy")[:n] # + nrows = 6 ncols = 8 # xs = [x_test, x_gen_af, x_gen_pie, x_gen_mf, x_gen_emf] # labels = ["Original", "AF", r"PIE", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] xs = [x_test, x_gen_af, x_gen_pie, x_gen_pie_sample_v, x_gen_mf, x_gen_emf] labels = ["Original", "AF", r"PIE (manifold)", r"PIE (off-manifold)", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] fig, gs = ps.grid_width(ncols, nrows, width=ps.TEXTWIDTH, large_margin=0.04, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j*ncols + i]) plt.imshow(trf(x[i])) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_samples.pdf") # + nrows = 5 ncols = 4 xs = [x_test, x_gen_af, x_gen_pie, x_gen_mf, x_gen_emf] labels = ["Original", "AF", r"PIE", r"$\mathcal{M}$-flow", r"$\mathcal{M}_e$-flow"] fig, gs = ps.grid_width(ncols, nrows, width=0.33 * ps.TEXTWIDTH, large_margin=0.06, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j*ncols + i]) plt.imshow(trf(x[i])) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_samples_small.pdf") # - # ## Test samples and projections to learned manifolds # + test_idx=list(range(8)) n_test = len(test_idx) x_test = 0.5 + 255. * np.load("../data/samples/gan64d/x_test.npy")[test_idx] x_reco_mf = np.load("../data/results/mf_64_gan64d_april_model_x_reco_test.npy")[test_idx] x_reco_emf = np.load("../data/results/emf_64_gan64d_april_model_x_reco_test.npy")[test_idx] x_reco_pie = np.load("../data/results/pie_64_gan64d_april_model_x_reco_test.npy")[test_idx] # + nrows = 5 ncols = 4 enhance = 1 labels = ["Original", "PIE", "Residual", "$\mathcal{M}$-flow", r"Residual"] fig, gs = ps.grid_width(ncols, nrows, width=0.33 * ps.TEXTWIDTH, large_margin=0.06, small_margin=0.01, sep=0.005, t_space=False, b_space=False, r_space=False, l_space=True) for i in range(ncols): xs = [ trf(x_test[i]), trf(x_reco_pie[i]), 1. - enhance*np.abs(trf(x_reco_pie[i]) - trf(x_test[i])), trf(x_reco_mf[i]), 1. - enhance*np.abs(trf(x_reco_mf[i]) - trf(x_test[i])), ] for j, (x, label) in enumerate(zip(xs, labels)): ax = plt.subplot(gs[j * ncols + i]) plt.imshow(x) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False) if i == 0: plt.ylabel(label) plt.savefig("../figures/gan64d_projections_small.pdf") # - # ## Training samples # + loader = FFHQStyleGAN64DLoader() data = loader.load_dataset(train=False, dataset_dir="../data/samples/gan64d") fig = plt.figure(figsize=(5*3., 4*3.)) for i in range(20): x, _ = data[np.random.randint(len(data) - 1)] x_ = np.transpose(np.array(x), [1,2,0]) / 256. ax = plt.subplot(4, 5, i+1) plt.imshow(x_) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.tight_layout() plt.show() # -
experiments/notebooks/gan64d_images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Date 를 다룰때 # datetime.date.today() 오늘 날짜를 갖습니다 import datetime today = datetime.date.today() print(f'오늘 날짜는 {today} 입니다') new_year = datetime.date(2020, 1, 1) print(f'올해 신년 날짜는 {new_year} 입니다') # + # Time 을 다룰때 # datetime.time() 은 시간을 갖습니다 import datetime # time 객체 noon = datetime.time(12, 0, 0) print(noon) three_pm = datetime.time(15, 0, 0) print(three_pm) # - # Time 을 다룰때, date 와 time 둘의 값을 가진 객체를 가질수 있습니다. # Datetime 객체 import datetime # 현재 날짜와시간 now = datetime.datetime.now() print(now) # + import datetime # 특정 datetime my_birthday = datetime.datetime(2000, 12, 18, 18, 28, 0) print(my_birthday) # + # 시작날짜부터 끝나는 날짜까지 순회하며 # 각 날짜들을 출력 import datetime # 날짜간의 간격을 timedelta 로 지정해줍니다 day_delta = datetime.timedelta(days=1) # 시작 날짜를 start_date 오늘 날짜로 지정해주고 start_date = datetime.date.today() # 끝나는 날짜를, start_date 으로부터 7 time_delta 가 지난 날짜로 지정해 줍니다 (즉, 오늘부터 7일후로 지정) end_date = start_date + 7 * day_delta # 시작날짜부터 끝나는 날짜까지 순회하면서, 각 날짜들을 출력해줍니다 for i in range((end_date - start_date).days): print(start_date + i*day_delta) # - # timedelta 모듈은 시간간의 차이를 계산할수 있습니다 from datetime import datetime, timedelta now = datetime.now() print(now) later = datetime(2020, 12, 18) print(later) delta = later - now print(delta) # + from datetime import datetime, timedelta now = datetime.now() later = datetime(2020, 12, 18) delta = later - now print(type(delta)) print(type(delta.days)) print(type(delta.seconds)) # + # 특정 날짜 이후에 특정 일수 이후의 날짜 from datetime import date, timedelta current_date = date.today().isoformat() days_after = (date.today() + timedelta(days=30)).isoformat() print(f'현재날짜 : {current_date}') print(f'현재 날짜에 30일 이후 : {days_after}') # + # 특정 날짜에 특정 일수 이전의 날짜 from datetime import date, timedelta current_date = date.today().isoformat() days_before = (date.today() - timedelta(days=30)).isoformat() print(f'현재날짜 : {current_date}') print(f'현재날짜의 30일 이전 날짜 : {days_before}') # + # 간단한 날짜 계산 import datetime today = datetime.date.today() print("오늘은 {}".format(today)) yesterday = today - datetime.timedelta(days=1) print("어제는 {}".format(yesterday)) tomorrow = today + datetime.timedelta(days=1) print("내일은 {}".format(tomorrow)) print("어제와 오늘 사이의 시간 : {}".format(tomorrow - yesterday)) # + # substracting months from a date import calendar from datetime import date def monthdelta(date, delta): m = (date.month + delta) % 12 y = date.year + (date.month + delta - 1) // 12 if not m: m = 12 d = min(date.day, calendar.monthrange(y, m)[1]) return date.replace(day=d, month=m, year=y) next_month = monthdelta(date.today(), 1) print(next_month) # + # dateutils module 사용하기 import datetime import dateutil.relativedelta d = datetime.datetime.strptime(datetime.date.today().isoformat(), "%Y-%m-%d") d2 = d - dateutil.relativedelta.relativedelta(months=1) print(d2) # + # timezone 바꾸기 from datetime import datetime from dateutil import tz utc = tz.tzutc() local = tz.tzlocal() utc_now = datetime.utcnow() print(utc_now) utc_now = utc_now.replace(tzinfo=utc) print(utc_now) local_now = utc_now.astimezone(local) print(local_now) # -
python/python_basic_date_and_time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: jupyterlab # language: python # name: jupyterlab # --- # + import os import sys sys.path.append('../') import numpy as np import matplotlib.pyplot as plt import pandas as pd import re import src.io as sio import src.preprocessing as spp import src.fitting as sft # - DATA_FOLDER = "20211116_FR0612-3G18_RT" DATA_FOLDERPATH = sio.get_qudiamond_folderpath(DATA_FOLDER) FIGURE_FOLDERPATH = sio.get_figure_folderpath(DATA_FOLDER) AUTOCORR_SUBFOLDER = r"2021\11\20211126\Autocorrelation" AUTOCORR_SUBFOLDERPATH = sio.get_qudi_data_path(os.path.join(DATA_FOLDER, AUTOCORR_SUBFOLDER)) df # + fig, ax = plt.subplots() df = sio.read_into_df(os.path.join(AUTOCORR_SUBFOLDERPATH, "20211126-1305-05_35mW_autocorrelation.dat")) ax.plot(df["Time (ps)"] / 1e3, df["g2(t) norm"], label=f"{file[17:22]}") ax.set_xlabel("Time (ns)") ax.set_ylabel("g2(t) norm") sio.save_figures(folder=FIGURE_FOLDERPATH, filename="20211126-1305-05_35mW_autocorrelation") # + # %matplotlib widget fig, ax = plt.subplots() for file in os.listdir(AUTOCORR_SUBFOLDERPATH): if file.endswith("mW_autocorrelation.dat"): print(file) df = sio.read_into_df(os.path.join(AUTOCORR_SUBFOLDERPATH, file)) ax.plot(df["Time (ps)"], df["g2(t) norm"], label=f"{file[17:22]}") fig.legend() # - optimized_positions = np.array( [ [3.43827438e-05, 9.13976768e-06, 4.71517473e-06], [3.49163374e-05, 9.00172571e-06, 4.71517473e-06], [3.53106037e-05, 9.13064807e-06, 4.71517473e-06], [3.46186516e-05, 9.17515368e-06, 4.71517473e-06] ] ) for pos in optimized_positions: print(pos) # + # %matplotlib widget fig, (ax1, ax2, ax3) = plt.subplots(nrows=3) ax1.plot(optimized_positions[:, 0] * 1e6, ".-", label=f"stddev = {np.std(optimized_positions[:, 0]) * 1e6:.2f} um") ax1.legend() ax2.plot(optimized_positions[:, 1] * 1e6, ".-", label=f"stddev = {np.std(optimized_positions[:, 1]) * 1e6:.2f} um") ax2.legend() ax3.plot(optimized_positions[:, 2], ".-", label=f"stddev = {np.std(optimized_positions[:, 2]) * 1e6:.2f} um") ax3.legend() # + N = 16 success_rate = 0.75 profit = 1000 loss = 2000 / 4 ((N * success_rate) * profit) - (N * (1 - success_rate) * loss)
notebooks/20211116_FR0612-3G18_RT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Before your start: # - Read the README.md file # - Comment as much as you can and use the resources in the README.md file # - Happy learning! # + # Import your libraries: # %matplotlib inline import numpy as np import pandas as pd # - # In this lab, we will explore a dataset that describes websites with different features and labels them either benign or malicious . We will use supervised learning algorithms to figure out what feature patterns malicious websites are likely to have and use our model to predict malicious websites. # # # Challenge 1 - Explore The Dataset # # Let's start by exploring the dataset. First load the data file: websites = pd.read_csv('../website.csv') # #### Explore the data from an bird's-eye view. # # You should already been very familiar with the procedures now so we won't provide the instructions step by step. Reflect on what you did in the previous labs and explore the dataset. # # Things you'll be looking for: # # * What the dataset looks like? # * What are the data types? # * Which columns contain the features of the websites? # * Which column contains the feature we will predict? What is the code standing for benign vs malicious websites? # * Do we need to transform any of the columns from categorical to ordinal values? If so what are these columns? # # Feel free to add additional cells for your explorations. Make sure to comment what you find out. # Your code here # + # Your comment here # - # #### Next, evaluate if the columns in this dataset are strongly correlated. # # In the Mushroom supervised learning lab we did recently, we mentioned we are concerned if our dataset has strongly correlated columns because if it is the case we need to choose certain ML algorithms instead of others. We need to evaluate this for our dataset now. # # Luckily, most of the columns in this dataset are ordinal which makes things a lot easier for us. In the next cells below, evaluate the level of collinearity of the data. # # We provide some general directions for you to consult in order to complete this step: # # 1. You will create a correlation matrix using the numeric columns in the dataset. # # 1. Create a heatmap using `seaborn` to visualize which columns have high collinearity. # # 1. Comment on which columns you might need to remove due to high collinearity. # Your code here # + # Your comment here # - # # Challenge 2 - Remove Column Collinearity. # # From the heatmap you created, you should have seen at least 3 columns that can be removed due to high collinearity. Remove these columns from the dataset. # # Note that you should remove as few columns as you can. You don't have to remove all the columns at once. But instead, try removing one column, then produce the heatmap again to determine if additional columns should be removed. As long as the dataset no longer contains columns that are correlated for over 90%, you can stop. Also, keep in mind when two columns have high collinearity, you only need to remove one of them but not both. # # In the cells below, remove as few columns as you can to eliminate the high collinearity in the dataset. Make sure to comment on your way so that the instructional team can learn about your thinking process which allows them to give feedback. At the end, print the heatmap again. # Your code here # + # Your comment here # - # Print heatmap again # # Challenge 3 - Handle Missing Values # # The next step would be handling missing values. **We start by examining the number of missing values in each column, which you will do in the next cell.** # Your code here # If you remember in the previous labs, we drop a column if the column contains a high proportion of missing values. After dropping those problematic columns, we drop the rows with missing values. # # #### In the cells below, handle the missing values from the dataset. Remember to comment the rationale of your decisions. # Your code here # + # Your comment here # - # #### Again, examine the number of missing values in each column. # # If all cleaned, proceed. Otherwise, go back and do more cleaning. # Examine missing values in each column # # Challenge 4 - Handle `WHOIS_*` Categorical Data # There are several categorical columns we need to handle. These columns are: # # * `URL` # * `CHARSET` # * `SERVER` # * `WHOIS_COUNTRY` # * `WHOIS_STATEPRO` # * `WHOIS_REGDATE` # * `WHOIS_UPDATED_DATE` # # How to handle string columns is always case by case. Let's start by working on `WHOIS_COUNTRY`. Your steps are: # # 1. List out the unique values of `WHOIS_COUNTRY`. # 1. Consolidate the country values with consistent country codes. For example, the following values refer to the same country and should use consistent country code: # * `CY` and `Cyprus` # * `US` and `us` # * `SE` and `se` # * `GB`, `United Kingdom`, and `[u'GB'; u'UK']` # # #### In the cells below, fix the country values as intructed above. # Your code here # Since we have fixed the country values, can we convert this column to ordinal now? # # Not yet. If you reflect on the previous labs how we handle categorical columns, you probably remember we ended up dropping a lot of those columns because there are too many unique values. Too many unique values in a column is not desirable in machine learning because it makes prediction inaccurate. But there are workarounds under certain conditions. One of the fixable conditions is: # # #### If a limited number of values account for the majority of data, we can retain these top values and re-label all other rare values. # # The `WHOIS_COUNTRY` column happens to be this case. You can verify it by print a bar chart of the `value_counts` in the next cell to verify: # Your code here # #### After verifying, now let's keep the top 10 values of the column and re-label other columns with `OTHER`. # Your code here # Now since `WHOIS_COUNTRY` has been re-labelled, we don't need `WHOIS_STATEPRO` any more because the values of the states or provinces may not be relevant any more. We'll drop this column. # # In addition, we will also drop `WHOIS_REGDATE` and `WHOIS_UPDATED_DATE`. These are the registration and update dates of the website domains. Not of our concerns. # # #### In the next cell, drop `['WHOIS_STATEPRO', 'WHOIS_REGDATE', 'WHOIS_UPDATED_DATE']`. # Your code here # # Challenge 5 - Handle Remaining Categorical Data & Convert to Ordinal # # Now print the `dtypes` of the data again. Besides `WHOIS_COUNTRY` which we already fixed, there should be 3 categorical columns left: `URL`, `CHARSET`, and `SERVER`. # Your code here # #### `URL` is easy. We'll simply drop it because it has too many unique values that there's no way for us to consolidate. # Your code here # #### Print the unique value counts of `CHARSET`. You see there are only a few unique values. So we can keep it as it is. # + # Your code here # - # `SERVER` is a little more complicated. Print its unique values and think about how you can consolidate those values. # # #### Before you think of your own solution, don't read the instructions that come next. # Your code here # ![Think Hard](../think-hard.jpg) # Your comment here # Although there are so many unique values in the `SERVER` column, there are actually only 3 main server types: `Microsoft`, `Apache`, and `nginx`. Just check if each `SERVER` value contains any of those server types and re-label them. For `SERVER` values that don't contain any of those substrings, label with `Other`. # # At the end, your `SERVER` column should only contain 4 unique values: `Microsoft`, `Apache`, `nginx`, and `Other`. # Your code here # Count `SERVER` value counts here # OK, all our categorical data are fixed now. **Let's convert them to ordinal data using Pandas' `get_dummies` function ([documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html)).** Make sure you drop the categorical columns by passing `drop_first=True` to `get_dummies` as we don't need them any more. **Also, assign the data with dummie values to a new variable `website_dummy`.** # Your code here # Now, inspect `website_dummy` to make sure the data and types are intended - there shouldn't be any categorical columns at this point. # Your code here # # Challenge 6 - Modeling, Prediction, and Evaluation # # We'll start off this section by splitting the data to train and test. **Name your 4 variables `X_train`, `X_test`, `y_train`, and `y_test`. Select 80% of the data for training and 20% for testing.** # + from sklearn.model_selection import train_test_split # Your code here: # - # #### For this lab, we will opt to use SVM. # # Support Vector Machines, or SVM, is an algorithm that aims to draw a line or a plane between the two groups such that they are linearly separable and the distance from the observations of each group to the line or plane is maximized. The goal of the algorithm is to find the line or plane that separates the groups. You can read more about this algorithm [here](https://en.wikipedia.org/wiki/Support_vector_machine). # # In the next cell, `svm` will be imported for you. **You will initialize the proper estimator, fit the training data, and predict the test data.** # # The `sklearn.svm` class documentation can be found [here](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm). By reading the documentation and searching online, the question you'll need to answer is **which SVM estimator to use**? When you choose the estimator, keep the following in mind: # # * Our data are categorical, not continuous. # # * We have removed the correlated columns. All columns we have right now are independent. # # If your statistical knowledge is not adequate at this moment, don't worry. Just play around and make an informed guess. We'll evaluate your prediction in the next step. If the prediction is unsatisfactory you can move back to this step to modify your estimator. # + from sklearn import svm # Your code here: # - # In the following cell, we'll show you how to compute the accuracy of your prediction. The output score will show you how often your classifier is correct. If you have used the proper estimator, your accuracy score should be over 0.9. However, if your accuracy score is unsatisfactory, go back to the previous step to try another estimator until you produce a satisfactory accuracy score. # + # Computer prediction accuracy from sklearn import metrics print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # - # # Bonus Challenge - Feature Scaling # # Problem-solving in machine learning is iterative. You can improve your model prediction with various techniques (there is a sweetspot for the time you spend and the improvement you receive though). Now you've completed only one iteration of ML analysis. There are more iterations you can conduct to make improvements. In order to be able to do that, you will need deeper knowledge in statistics and master more data analysis techniques. In this bootcamp, we don't have time to achieve that advanced goal. But you will make constant efforts after the bootcamp to eventually get there. # # However, now we do want you to learn one of the advanced techniques which is called *feature scaling*. The idea of feature scaling is to standardize/normalize the range of independent variables or features of the data. This can make the outliers more apparent so that you can remove them. This step needs to happen during Challenge 6 after you split the training and test data because you don't want to split the data again which makes it impossible to compare your results with and without feature scaling. For general concepts about feature scaling, click [here](https://en.wikipedia.org/wiki/Feature_scaling). To read deeper, click [here](https://medium.com/greyatom/why-how-and-when-to-scale-your-features-4b30ab09db5e). # # In the next cell, attempt to improve your model prediction accuracy by means of feature scaling. A library you can utilize is `sklearn.preprocessing.RobustScaler` ([documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html)). You'll use the `RobustScaler` to fit and transform your `X_train`, then transform `X_test`. You will use SVM to fit and predict your transformed data and obtain the accuracy score in the same way. Compare the accuracy score with your normalized data with the previous accuracy data. Is there an improvement? # + # Your code here
module-3/lab-supervised-learning/your-code/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- reset #Please run this code block only once, the first time you are running this notebook # !pip3 install google.cloud #To be able to retrieve data from google cloud bigquery tables from google.cloud import bigquery # + # authentication in myBinder # !./google-cloud-sdk/bin/gcloud auth application-default login # If you install google cloud sdk # #!gcloud auth application-default login # - project_id='syntheticlethality' #please replace syntheticlethality with your own projectid client = bigquery.Client(project_id) #client = bigquery.Client(credentials=credentials, project=credentials.project_id) # Retireve BRCA1 gene effect score of the first data entry for gene BRCA1 from CRISPR dataset crispr_query=""" SELECT * FROM `syntheticlethality.DepMap_public_20Q3.Achilles_gene_effect` WHERE Hugo_Symbol='BRCA1' LIMIT 10 """ brca1_gene_effect=client.query(crispr_query).result().to_dataframe() brca1_gene_effect # Retrieve the first 10 records for BRCA1 gene expression from pancancer atlas dataset pancancer_atlas_query=""" SELECT * FROM `pancancer-atlas.Filtered.EBpp_AdjustPANCAN_IlluminaHiSeq_RNASeqV2_genExp_filtered` WHERE Symbol='BRCA1' LIMIT 10 """ brca1_gene_exp=client.query(pancancer_atlas_query).result().to_dataframe() brca1_gene_exp
first_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Automated Machine Learning: Classification with Deployment # # In this example we use the scikit learn's [digit dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) to showcase how you can use AutoML for a simple classification problem and deploy it to an Azure Container Instance (ACI). # # Make sure you have executed the [configuration](../configuration.ipynb) before running this notebook. # # In this notebook you will learn how to: # 1. Create an experiment using an existing workspace. # 2. Configure AutoML using `AutoMLConfig`. # 3. Train the model using local compute. # 4. Explore the results. # 5. Register the model. # 6. Create a container image. # 7. Create an Azure Container Instance (ACI) service. # 8. Test the ACI service. # # ## Create an Experiment # # As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. # + import json import logging import os import random from matplotlib import pyplot as plt from matplotlib.pyplot import imshow import numpy as np import pandas as pd from sklearn import datasets import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.train.automl import AutoMLConfig from azureml.train.automl.run import AutoMLRun # + ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-local-classification' # project folder project_folder = './sample_projects/automl-local-classification' experiment=Experiment(ws, experiment_name) output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Project Directory'] = project_folder output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) pd.DataFrame(data=output, index=['']).T # - # ## Diagnostics # # Opt-in diagnostics for better experience, quality, and security of future releases. from azureml.telemetry import set_diagnostics_collection set_diagnostics_collection(send_diagnostics = True) # ## Configure AutoML # # Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment. # # |Property|Description| # |-|-| # |**task**|classification or regression| # |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| # |**iteration_timeout_minutes**|Time limit in minutes for each iteration.| # |**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.| # |**n_cross_validations**|Number of cross validation splits.| # |**X**|(sparse) array-like, shape = [n_samples, n_features]| # |**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.| # |**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.| # + digits = datasets.load_digits() X_train = digits.data[10:,:] y_train = digits.target[10:] automl_config = AutoMLConfig(task = 'classification', name = experiment_name, debug_log = 'automl_errors.log', primary_metric = 'AUC_weighted', iteration_timeout_minutes = 20, iterations = 10, n_cross_validations = 2, verbosity = logging.INFO, X = X_train, y = y_train, path = project_folder) # - # ## Train the Models # # Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. # In this example, we specify `show_output = True` to print currently running iterations to the console. local_run = experiment.submit(automl_config, show_output = True) # ### Retrieve the Best Model # # Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. best_run, fitted_model = local_run.get_output() # ### Register the Fitted Model for Deployment # If neither `metric` nor `iteration` are specified in the `register_model` call, the iteration with the best primary metric is registered. description = 'AutoML Model' tags = None model = local_run.register_model(description = description, tags = tags) local_run.model_id # This will be written to the script file later in the notebook. # ### Create Scoring Script # + # %%writefile score.py import pickle import json import numpy import azureml.train.automl from sklearn.externals import joblib from azureml.core.model import Model def init(): global model model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy # deserialize the model file back into a sklearn model model = joblib.load(model_path) def run(rawdata): try: data = json.loads(rawdata)['data'] data = numpy.array(data) result = model.predict(data) except Exception as e: result = str(e) return json.dumps({"error": result}) return json.dumps({"result":result.tolist()}) # - # ### Create a YAML File for the Environment # To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb). # + experiment_name = 'automl-local-classification' experiment = Experiment(ws, experiment_name) ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id) # - dependencies = ml_run.get_run_sdk_dependencies(iteration = 7) for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']: print('{}\t{}'.format(p, dependencies[p])) # + from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]']) conda_env_file_name = 'myenv.yml' myenv.save_to_file('.', conda_env_file_name) # + # Substitute the actual version number in the environment file. # This is not strictly needed in this notebook because the model should have been generated using the current SDK version. # However, we include this in case this code is used on an experiment from a previous SDK version. with open(conda_env_file_name, 'r') as cefr: content = cefr.read() with open(conda_env_file_name, 'w') as cefw: cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk'])) # Substitute the actual model id in the script file. script_file_name = 'score.py' with open(script_file_name, 'r') as cefr: content = cefr.read() with open(script_file_name, 'w') as cefw: cefw.write(content.replace('<<modelid>>', local_run.model_id)) # - # ### Create a Container Image # + from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script = script_file_name, conda_file = conda_env_file_name, tags = {'area': "digits", 'type': "automl_classification"}, description = "Image for automl classification sample") image = Image.create(name = "automlsampleimage", # this is the model object models = [model], image_config = image_config, workspace = ws) image.wait_for_creation(show_output = True) if image.creation_state == 'Failed': print("Image build log at: " + image.image_build_log_uri) # - # ### Deploy the Image as a Web Service on Azure Container Instance # + from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "digits", 'type': "automl_classification"}, description = 'sample service for Automl Classification') # + from azureml.core.webservice import Webservice aci_service_name = 'automl-sample-01' print(aci_service_name) aci_service = Webservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) # - # ### Delete a Web Service # + #aci_service.delete() # - # ### Get Logs from a Deployed Web Service # + #aci_service.get_logs() # - # ### Test a Web Service # + #Randomly select digits and test digits = datasets.load_digits() X_test = digits.data[:10, :] y_test = digits.target[:10] images = digits.images[:10] for index in np.random.choice(len(y_test), 3, replace = False): print(index) test_sample = json.dumps({'data':X_test[index:index + 1].tolist()}) predicted = aci_service.run(input_data = test_sample) label = y_test[index] predictedDict = json.loads(predicted) title = "Label value = %d Predicted value = %s " % ( label,predictedDict['result'][0]) fig = plt.figure(1, figsize = (3,3)) ax1 = fig.add_axes((0,0,.8,.8)) ax1.set_title(title) plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest') plt.show()
how-to-use-azureml/automated-machine-learning/classification-with-deployment/auto-ml-classification-with-deployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Building the CASBAH Galaxy Database (v1.0) # ## SDSS # ### Targeting # I am unclear on how to sensibly extract targeting information from the # SDSS. But this may well be an issue for various analyses. # ### Extracting Galaxy data # The script build_sdss loops through the listed fields with SDSS # coverage and calls the grab_sdss_spectra script to grab photometric # and spectral data. It is currently grabbing DR12. # # Here are some defaults: # * Box of 2deg on a side # * Photometry is Petrosian ugriz # * Galaxies are cut down to 20Mpc separation (LCDM cosmology) # * z > 500km/s to cut stars # # Here is the basic procedure: # * Query photometry all objects in search box with spectra # * Query list of spectra from SDSS in search box # * This list often contain duplicates from multiple passes # * Cut on 20Mpc using redshifts, RA+DEC of obj, and RA+DEC of QSO # * Loop on sources to build table # * Take BOSS data over SDSS # * Generate a binary FITS table, including photometry, redshift and spectra # Example call from xastropy.casbah import galaxy_data as xcgd reload(xcgd) radec = (212.34957*u.deg,26.30585*u.deg) tab=xcgd.grab_sdss_spectra(radec, radius=1.*u.degree/12.) Table(tab[0:5]) # ### Open questions/issues # 1. Should we be recording other observing meta-data? # 1. How about SFR, M*, etc.? # ## DEIMOS # ### Targeting # # * Pull mask target info from Mask files :: parse_deimos_mask_file # * Pull other target info from SExtractor output # * Requires yaml file describing target criteria # * And the SExtractor output file # # #### Sample output of MULTI_OBS file # # MULTI_OBJ file: # # | INSTR | MASK_NAME | MASK_RA | MASK_DEC | MASK_EPOCH | MASK_PA | DATE_OBS | DISPERSER | TEXP | CONDITIONS | # # | DEIMOS | PG1407_may_early | 14:09:34.10 | 26:18:45.1 | 2000.0 | -96.1 | 23-Jul-2015 | G600 | 3600.0 | POOR_SEEING,CLOUDS | # # | DEIMOS | PG1407_may_early | 14:09:34.10 | 26:18:45.1 | 2000.0 | -96.1 | 24-Jul-2015 | G600 | 3600.0 | CLEAR | # #### Sample of target file fil='/Users/xavier/CASBAH/Galaxies/PG1407+265/PG1407+265_targets.fits' targ = Table.read(fil) # mt = np.where(targ['MASK_NAME'] != 'N/A')[0] targ[mt[0:5]] # ## Testing fil='/Users/xavier/CASBAH/Galaxies/PG1407+265/PG1407+265_targets.fits' tmp = Table.read(fil,fill_values=[('N/A','0','MASK_NAME')],format='fits')
xastropy/casbah/CASBAH_galaxy_database.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Run length encode/decode code # https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66 # - # Import some data import pandas as pd train_labels = pd.read_csv('stage1_train_labels.csv') # + # To unflatten: #https://docs.scipy.org/doc/numpy/reference/generated/numpy.unravel_index.html # - def rldecode(starts, lengths, values, minlength=None): """ Decode a run-length encoding of a 1D array. Parameters ---------- starts, lengths, values : 1D array_like The run-length encoding. minlength : int, optional Minimum length of the output array. Returns ------- 1D array. Missing data will be filled with NaNs. """ starts, lengths, values = map(np.asarray, (starts, lengths, values)) # TODO: check validity of rlhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unravel_index.htmle ends = starts + lengths n = ends[-1] if minlength is not None: n = max(minlength, n) x = np.full(n, np.nan) for lo, hi, val in zip(starts, ends, values): x[lo:hi] = val return x
Archive/Post Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/erickaalgr/CpEN-21A-BSCpE-1-1/blob/main/Final_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="aL32TJzk4bwa" # #Final Exam # # ##CONTROL AND LOOPING STATEMENT # + [markdown] id="i6q1fOjE5dFj" # ### PROBLEM STATEMENT 1 # # Create a Python program that will produce an output of sum of 10 numbers than using FOR LOOP statement. (30 points) # + colab={"base_uri": "https://localhost:8080/"} id="h469tyhX4Hkv" outputId="69f0a603-378c-4d87-cb3b-e605f96dc2b8" sum=0 num=[4, 3, 2, 1, 0, -1, -2, -3, -4, -5] for x in num: sum= sum + x print("The sum of 10 numbers less than 5 is", sum) # + [markdown] id="ymkW7L3B6GK9" # ###PROBLEM STATEMENT 2 # # Create a Python program that will produce accept five numbers and determine the sum of first and last number among five numbers entered using WHILE LOOP statement. (35 points) # + id="D85jDAwLJv5n" colab={"base_uri": "https://localhost:8080/"} outputId="67defcf2-ddaf-4e08-d675-e31b58dcf21b" num1=int(input("First number: ")) while(num1!=0): num2=int(input('Second number: ')) num3=int(input('Third number: ')) num4=int(input('Fourth number: ')) num5=int(input('Fifth number: ')) break while(num5!=0): sum=num1+num5 print("The sum of first and last number is ",sum) num1+=1 break # + [markdown] id="y9N1p2aQ6fGm" # ###PROBLEM STATEMENT 3 # # Create a Python program to calculate student grades. It accepts a numerical grade as input and it will display the character grade as output based on the given scale using Nested If-Else statement.(35 points) # + colab={"base_uri": "https://localhost:8080/"} id="b9HhDsreui_6" outputId="60b3565d-c1ec-4f2f-e065-79726241c536" #This cell uses Nested If-Else statement. grade= int(input("Enter your Numerical Grade: ")) if grade>=90: print("Character Grade: A") else: if grade>=80 and grade<=89: print("Character Grade: B") else: if grade>=70 and grade<=79: print("Character Grade: C") else: if grade>=60 and grade<=69: print("Character Grade: D") else: print("Character Grade: F") # + colab={"base_uri": "https://localhost:8080/"} id="DbGtTlVN7IU9" outputId="444b7dc6-c22c-4a98-bb6f-65cf651f6b5a" #This cell uses If-Elif-Else Statement. grade= int(input("Enter your Numerical Grade: ")) if grade>=90: print("Character Grade: A") elif grade>=80 and grade<=89: print("Character Grade: B") elif grade>=70 and grade<=79: print("Character Grade: C") elif grade>=60 and grade<=69: print("Character Grade: D") else: print("Character Grade: F")
Final_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Teorema de Pitágoras # **O objetivo dessa aula é entender como funciona o teorema de Pitágoras através da programação python. # # A equação do teorema de Pitágoras é: # # a^2 + b^2 = c^2 # # em que a e b são as medidas dos dois catetos do triângulo, e c é a medida da hipotenusa. # # Um triângulo é dito retângulo quando possui um ângulo reto, ou seja, que tem medida igual a 90º. Os lados de um triângulo retângulo recebem nomes especiais: os lados que formam o ângulo de 90º são chamados de catetos, já o terceiro lado, que é oposto ao ângulo de 90º, é chamado de hipotenusa. # # Assim que pensarmos em triângulo retângulo, devemos automaticamente lembrar do mais famoso dos teoremas matemáticos: o Teorema de Pitágoras. Esse teorema está relacionado à hipotenusa, que representaremos por a, e os catetos, que representaremos por b e c. # # * Mas afinal, quando devemos usar o teorema de Pitágoras? Normalmente o teorema de Pitágoras é utilizado quando conhecemos dois lados de um triângulo retângulo e queremos encontrar o terceiro lado. # # # Com a linguagem de programação podemos fazer um programa para inserir os valores dos catetos e obter o valor da hipotenusa. # # * Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo. Calcule e mostre o comprimento da hipotenusa. # # + print('*** Resolução para o problema do teorema de Pitágoras ***\n') print('* co = cateto oposto') print('* ca = cateto adjacente') print('* hip = hipotenusa') co = float(input('Digite o comprimento do cateto oposto: ')) ca = float(input('Digite o comprimento do cateto adjacente: ')) hip = (co**2 + ca**2) ** (1/2) # (1/2) => É a forma de calcular a raiz quadada de um número, no caso a raiz da hipotenusa. # :.2f é para definir quantas casas decimais quero que apareça no meu resultado. print(f'A hipotenusa vai medir {hip:.2f}')
Prova - plano de aula pitagoras EM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tweepy from textblob import TextBlob import pandas as pd from plotly import __version__ import cufflinks as cf from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot init_notebook_mode(connected=True) cf.go_offline() import matplotlib.pyplot as plt from tweepy import Stream,StreamListener import json,re,csv # + consumer_key = 'xxxxxxxxxxxxxxxxxx' consumer_key_secret = '<KEY>' access_token = '<KEY>' access_token_secret = '<KEY>' auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # + pos1, neg1, neu1 = 0, 0, 0 header=['Positive','Negative','Neutral'] #Create an empty csv file which has three headers: 'Positive','Negative','Neutral' with open('List.csv','w') as file: write=csv.DictWriter(file,fieldnames=header) write.writeheader() #doc for Stream Listener to tweepy: http://docs.tweepy.org/en/latest/streaming_how_to.html class Listener(StreamListener): def on_data(self, data): raw_t=json.loads(data) try: #this assumes that the data in json has a 'text' field, if not it will go to except and print the message data=raw_t['text'] #four lines below will clear the tweets by removing: metions, has tag etc. data = re.sub('@[A-Za-z0–9]+', '',data) #Removing @mentions data = re.sub('#', '', data) # Removing '#' hash tag data = re.sub('RT[\s]+', '', data) # Removing RT data = re.sub('https?:\/\/\S+', '', data) # Removing hyperlink global pos1 global neg1 global neu1 pos, neg, neu = 0, 0, 0 print(data) analysis = TextBlob(data) print(analysis.sentiment) #the below if statement will count the number of tweets based on their sentiment('Positive','Negative','Neutral') if analysis.sentiment[0]>0: pos+=1 elif analysis.sentiment[0]<0: neg+=1 else: neu+=1 pos1=pos1+pos neg1=neg1+neg neu1=neu1+neu #write the result from counting to the csv file "List.csv" with open('List.csv', 'a') as file: writer = csv.DictWriter(file, fieldnames=header) info={ 'Positive':pos1, 'Negative':neg1, 'Neutral':neu1 } writer.writerow(info) print(data) return True except: print('You did something wrong, take a look at the data') def on_error(self, status): print(status) l = Listener() # - stream = Stream(auth, l) stream.filter(track=['bloomberg'])
Sentiment Analysis in the real time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Projet Corona (Python) # language: python # name: corona # --- # https://github.com/equancy/cookiecutter-data-science-project # https://trello.com/b/N8OiJE0b/bienvenue-sur-trello # https://docs.google.com/spreadsheets/d/1wQVypefm946ch4XDp37uZ-wartW4V7ILdg-qYiDXUHM/htmlview?usp=sharing&sle=true# # + active="" # # Création d'un environnement dédié au projet # conda create -n corona # # # Activation de l'environnement # conda activate corona # ==> (corona) MacBook-Pro-de-Djiby:notebooks djibybalde$ # # # Installation du kernel pour travailler avec Jupyter # conda install ipykernel # # # Ajout de l'environnement à Jupyter # python -m ipykernel install --user --name corona --display-name "Projet Corona (Python)" # # # Pour mémoire, pour revenir à l'environnement de base # conda deactivate # # # Lancement de Jupyter Lab # jupyter lab # # # ## Nouveau terminal # conda activate corona # # # Requirements to use the cookiecutter template: # pip install cookiecutter # # # To start a new project, run: # cookiecutter http://github.com/equancy/cookiecutter-data-science-project # # project_name [project_name]: djiby # repo_name [djiby]: covid19 # le nom du dossier # author_name [Equancy]: rvm # rvm, pour mignot # description [A short description of the project.]: build a project using covid19 dataset # Choose from 1, 2 [1]: 2 # Choose from 1, 2 [1]: # default # # # nouveau terminale # cd Documents # ==> (base) MacBook-Pro-de-Djiby:Documents djibybalde$ # cd covid19 # ==> (base) MacBook-Pro-de-Djiby:covid19 djibybalde$ # cd notebooks # ==> (base) MacBook-Pro-de-Djiby:notebooks djibybalde$ # jupyter lab # # ## nouveau terminale # conda activate corona # ==> (corona) MacBook-Pro-de-Djiby:notebooks djibybalde$ # pip install pandas # # ### ======= Cool! on veut maintenant pouser le note qu'on a fait dans GitHube ======== # (base) MacBook-Pro-de-Djiby:~ djibybalde$ conda activate corona # (corona) MacBook-Pro-de-Djiby:~ djibybalde$ cd Documents # (corona) MacBook-Pro-de-Djiby:Documents djibybalde$ cd covid19 # # (corona) MacBook-Pro-de-Djiby:covid19 djibybalde$ git init # (corona) MacBook-Pro-de-Djiby:covid19 djibybalde$ git add . # (corona) MacBook-Pro-de-Djiby:covid19 djibybalde$ git remote add origin https://github.com/djibybalde/covid19.git # covid19 dois etre créer au prealable # # (corona) MacBook-Pro-de-Djiby:covid19 djibybalde$ git commit # le 'master' et les fichier a commiter dois apparaitre # ==> # Dans un iditeur appellé VI, I(pour inrsetion-Initial commit / ajoute de commenetaire) >> ESC >> :wq # (corona) MacBook-Pro-de-Djiby:covid19 djibybalde$ git push --set-upstream origin master # # Username for 'https://github.com': <djibybalde> # Password for 'https://github.com': <keyword> # ==> To https://github.com/djibybalde/covid19.git * [new branch] master -> master # # # 13/03/2020 # matplotlib+searbord ==> statique # plotly(dash)==> dynamique, webapp # darkh ==> dynamique, webapp # altair ==> dynamique # # data corona>>src # -creer un fichier text et renome en app.py # corps du fichier > # # #les variables qui portent les doulbes undescore # _ pas visible peut etre accessible # __ pas accessible sur le site # # ## # cd documents/covid19/src # dans ternemal: python app.py # se connecter à l'internet sur <http://localhost:8050> # # # git commit -a # pour commiter tous oubien # git commit myfiel.format # pour ne commiter que le myfiel.format # # git remote remove origin # suprime le lien entre les 2 envi (local et distance) ==> # git remote add origin http://githube.com/name/myfolder # - # + active="" # # summary # To run a app: # - create a new file.txt and rename it to app.py (in the src folder) # - create a new file.txt and rename it to env.yaml (in the source folder:covid19) # - open ternimal: # - cd documents/covid19/src # - python app.py # - writte the parameters on env.yaml and the contains of the application on app.py # -
notebooks/description.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Node: def __init__(self, data): self.data = data self.next = None # make None as the default value for next. def count_nodes(head): # assuming that head != None count = 1 current = head while current.next is not None: current = current.next count += 1 return count # + nodeA = Node(6) nodeB = Node(3) nodeC = Node(4) nodeD = Node(2) nodeE = Node(1) nodeA.next = nodeB nodeB.next = nodeC nodeC.next = nodeD nodeD.next = nodeE print("This linked list's length is: (should print 5)") print(count_nodes(nodeA))
Linked+List.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ventilator-prediction # language: python # name: ventilator-prediction # --- # + colab={"base_uri": "https://localhost:8080/"} id="L9kAJB3BCtBt" outputId="3508a337-278d-4a50-eb33-4f3627a5cbed" from google.colab import drive drive.mount('/content/drive') # + id="Q4ZM-Gu_DSX9" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns resource_path = "/content/drive/MyDrive/Ventilator-Pressure-Prediction/" local_resource_path = "../models/" data_path = "../input/" running_enviroment = "local" # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/"} id="SpkoQOzOLWe5" outputId="0b19cb26-3e30-49a3-cef3-95533e1d7510" # !cat /proc/cpuinfo # + id="D66QlQFhD6zJ" if running_enviroment == "colab": data_df = pd.read_csv(resource_path + 'train.csv') else: data_df = pd.read_csv(data_path + 'train.csv') # + colab={"base_uri": "https://localhost:8080/"} id="5NoyHYFosSyp" outputId="15e9fe90-95d2-44ef-f6b6-72f0950ff19c" data_df.shape # + colab={"base_uri": "https://localhost:8080/"} id="00OUx7SnacqZ" outputId="06397ebf-02e0-4f75-8e5c-1aac45d0a835" # To convert string data into numeric which was numeric in nature data_df = data_df.apply(pd.to_numeric, errors='ignore') data_df.info() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="ABuRifL55UGS" outputId="966e8406-9e05-4790-f802-17038a12e4d7" data_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 687} id="XDbDTDTFU0Tw" outputId="aae2677a-6604-40c5-d1b4-d81d8544d8cf" figure = plt.figure(figsize=(22, 8)) ax = figure.gca() data_df.hist(ax = ax) # + colab={"base_uri": "https://localhost:8080/"} id="ozR0xiF37kLv" outputId="475e6fe0-7c3d-40bd-d270-6eff9f515239" # Get the categorical columns for transforming set(data_df.columns) - set(data_df._get_numeric_data().columns) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="1XGo2VxsWaop" outputId="6a063eab-0513-4f6a-b8a8-5c4ab30598f0" train_columns = list(data_df.columns) train_columns.remove("pressure") for column in train_columns: plt.figure(figsize=(8,4)) sns.boxplot(column, data=data_df) # + colab={"base_uri": "https://localhost:8080/", "height": 595} id="KgJ4BE7qmB0y" outputId="7852fcf2-5ef2-4317-f611-640815bcd562" # Treat outlier from sklearn import impute def treat_outlier_and_null(columns): for column_name in columns: column = data_df[column_name] iqr = np.quantile(column, 0.75) - np.quantile(column, 0.25) upper_bound = np.quantile(column, 0.75) + 1.25*iqr lower_bound = np.quantile(column, 0.25) - 1.25*iqr is_outlier_found = column[(column>upper_bound) | (column<lower_bound)] if list(is_outlier_found): column[(column>upper_bound) | (column<lower_bound)] = np.nan # Method2: static_imputation if data_df[column_name].dtypes == 'O': data_df[column_name] = data_df[column_name].fillna(data_df[column_name].mode().iloc[0]) else: data_df[column_name] = data_df[column_name].fillna(data_df[column_name].median()) # Check the null values in the data print(data_df.isnull().sum()) treat_outlier_and_null(train_columns) print(data_df.isnull().sum()) # Method1: Fill in the null values with KNN # knn_imputer = impute.KNNImputer(n_neighbors=5) # filled_data_df = knn_imputer.fit_transform(data_df.drop(["id", "breath_id", "time_step"], axis=1)) # data_df.loc[:, ['R', 'C', 'u_in', 'u_out', 'pressure']] = filled_data_df data_df.head() # + id="bTs1Nvujl_FD" # Adding some features based on the provided data def generate_features(df): # create an aggregate dictionary aggs = {} aggs['C'] = ['max', 'min', 'mean'] aggs['R'] = ['max', 'min', 'mean'] aggs['u_in'] = ['max', 'min', 'mean', 'std', 'var'] # we group by breath_id and calculate the aggregates agg_df = df.groupby('breath_id').agg(aggs) agg_df = agg_df.reset_index() new_agg_columns = [] for agg_columns in list(agg_df.columns): column_name = agg_columns[0] + "_" + agg_columns[1] new_agg_columns.append(column_name.strip("_")) agg_df.columns = new_agg_columns return agg_df agg_df = generate_features(data_df) data_df = pd.merge(data_df, agg_df, how='left', on = 'breath_id') # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="nT7VoirE5T1H" outputId="b1b2da6a-1886-4af1-d800-f57ba75de72b" plt.figure(figsize=(22, 18)) sns.heatmap(data_df.corr(method='spearman'), annot=True, cmap='viridis') # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="_c1rJdSq5Tcz" outputId="9768adc0-7e3f-48e0-ba3b-a71a37895c0b" from sklearn.feature_selection import VarianceThreshold # Perform feature selection def feature_selection(dataset, corr_threshold, var_threshold): var_thresh = VarianceThreshold(threshold=var_threshold) transformed_data = var_thresh.fit_transform(dataset) var_columns = var_thresh.variances_ > var_threshold var_removed_columns = [not column for column in var_columns] removed_columns = set(dataset.iloc[:, var_removed_columns].columns) print(f"variance removed column: {removed_columns}") corr_matrix = dataset.corr(method='spearman') for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j]) > corr_threshold: colname = corr_matrix.columns[i] removed_columns.add(colname) print(f"variance and correlation removed column: {removed_columns}") return removed_columns removed_columns = feature_selection(data_df.drop(["pressure"], axis=1), 0.85, 0.1) removed_columns.add("id") data_df.drop(list(removed_columns), axis=1, inplace=True) data_df.head() # + id="2w-jG8Hd5Rez" from sklearn.model_selection import train_test_split data_df = data_df.sample(frac=0.01) print(data_df.shape) X = data_df.drop(["pressure"], axis=1) y = data_df["pressure"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42) # + id="6NSNEZVa5RXd" from sklearn.preprocessing import StandardScaler import joblib scaler = StandardScaler() scaler.fit(X_train) scaled_X_train = scaler.transform(X_train) scaled_X_test = scaler.transform(X_test) if running_enviroment == "colab": joblib.dump(scaler, resource_path + 'ventilator-scaler-lower.pkl') else: joblib.dump(scaler, local_resource_path + 'ventilator-scaler-lower.pkl') # + colab={"base_uri": "https://localhost:8080/"} id="StQ9DXle5RQ1" outputId="846a2ea7-a794-4eb0-82b8-1f4caebdeaba" #1st model: RandomForest from sklearn.ensemble import RandomForestRegressor from sklearn import model_selection regressor = RandomForestRegressor(n_jobs=-1) param_grid = { "n_estimators": [120, 300, 500, 800, 1200], "max_depth": [5, 8, 15, 25, 30, None], "min_samples_split": [1.0, 2, 5, 10, 15, 100], "min_samples_leaf": [1, 2, 5, 10], "max_features" : ["log2", "sqrt", None] } model = model_selection.RandomizedSearchCV( estimator=regressor, param_distributions=param_grid, n_iter=2, scoring="neg_mean_absolute_error", verbose=100, n_jobs=1, cv=4 ) model.fit(scaled_X_train, y_train) y_pred = model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(model, resource_path + 'random_forest_model1.joblib') else: joblib.dump(model, local_resource_path + 'random_forest_model1.joblib') # + id="lQHEhJwp5RJm" from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + #1st model: RandomForest from sklearn.ensemble import RandomForestRegressor from sklearn import model_selection regressor = RandomForestRegressor(n_jobs=-1) param_grid = { "n_estimators": [120, 300, 500, 800, 1200], "max_depth": [5, 8, 15, 25, 30, None], "min_samples_split": [1.0, 2, 5, 10, 15, 100], "min_samples_leaf": [1, 2, 5, 10], "max_features" : ["log2", "sqrt", None] } model = model_selection.RandomizedSearchCV( estimator=regressor, param_distributions=param_grid, n_iter=2, scoring="neg_mean_absolute_error", verbose=100, n_jobs=1, cv=4 ) model.fit(scaled_X_train, y_train) y_pred = model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(model, resource_path + 'random_forest_model2.joblib') else: joblib.dump(model, local_resource_path + 'random_forest_model2.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + #1st model: RandomForest from sklearn.ensemble import RandomForestRegressor from sklearn import model_selection regressor = RandomForestRegressor(n_jobs=-1) param_grid = { "n_estimators": [120, 300, 500, 800, 1200], "max_depth": [5, 8, 15, 25, 30, None], "min_samples_split": [1.0, 2, 5, 10, 15, 100], "min_samples_leaf": [1, 2, 5, 10], "max_features" : ["log2", "sqrt", None] } model = model_selection.RandomizedSearchCV( estimator=regressor, param_distributions=param_grid, n_iter=2, scoring="neg_mean_absolute_error", verbose=100, n_jobs=1, cv=4 ) model.fit(scaled_X_train, y_train) y_pred = model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(model, resource_path + 'random_forest_model3.joblib') else: joblib.dump(model, local_resource_path + 'random_forest_model3.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + #1st model: RandomForest from sklearn.ensemble import RandomForestRegressor from sklearn import model_selection regressor = RandomForestRegressor(n_jobs=-1) param_grid = { "n_estimators": [120, 300, 500, 800, 1200], "max_depth": [5, 8, 15, 25, 30, None], "min_samples_split": [1.0, 2, 5, 10, 15, 100], "min_samples_leaf": [1, 2, 5, 10], "max_features" : ["log2", "sqrt", None] } model = model_selection.RandomizedSearchCV( estimator=regressor, param_distributions=param_grid, n_iter=2, scoring="neg_mean_absolute_error", verbose=100, n_jobs=1, cv=4 ) model.fit(scaled_X_train, y_train) y_pred = model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(model, resource_path + 'random_forest_model4.joblib') else: joblib.dump(model, local_resource_path + 'random_forest_model4.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + id="kfWyv1ip5QyQ" #2nd model: SVM from sklearn.svm import SVR from sklearn import model_selection from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100], "gamma": ["scale", "auto"]} svm_model = model_selection.RandomizedSearchCV(SVR(), parameters, verbose=100, n_iter=2, cv=4) svm_model.fit(scaled_X_train, y_train) y_pred = svm_model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(svm_model, resource_path + 'svm_model1.joblib') else: joblib.dump(svm_model, local_resource_path + 'svm_model1.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + id="uleodgYb5QrC" #2nd model: SVM from sklearn.svm import SVR parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100], "gamma": ["scale", "auto"]} svm_model = model_selection.RandomizedSearchCV(SVR(), parameters, verbose=100, n_iter=2, cv=4) svm_model.fit(scaled_X_train, y_train) y_pred = svm_model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(svm_model, resource_path + 'svm_model2.joblib') else: joblib.dump(svm_model, local_resource_path + 'svm_model2.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + id="1NYpmLmd5Qjb" #2nd model: SVM from sklearn.svm import SVR parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100], "gamma": ["scale", "auto"]} svm_model = model_selection.RandomizedSearchCV(SVR(), parameters, verbose=100, n_iter=2, cv=4) svm_model.fit(scaled_X_train, y_train) y_pred = svm_model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(svm_model, resource_path + 'svm_model3.joblib') else: joblib.dump(svm_model, local_resource_path + 'svm_model3.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + id="YkbaqcT65QcF" #2nd model: SVM from sklearn.svm import SVR parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100], "gamma": ["scale", "auto"]} svm_model = model_selection.RandomizedSearchCV(SVR(), parameters, verbose=100, n_iter=2, cv=4) svm_model.fit(scaled_X_train, y_train) y_pred = svm_model.predict(scaled_X_test) if running_enviroment == "colab": joblib.dump(svm_model, resource_path + 'svm_model4.joblib') else: joblib.dump(svm_model, local_resource_path + 'svm_model4.joblib') print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # - # ## Retraining with lower data # + id="zOEc5iYr5Plm" #1st model: RandomForest from sklearn.ensemble import RandomForestRegressor from sklearn import model_selection params = {'n_estimators': 300, 'min_samples_split': 10, 'min_samples_leaf': 1, 'max_features': 'log2', 'max_depth': 30} model = RandomForestRegressor(n_jobs=-1, **params) model.fit(scaled_X_train, y_train) y_pred = model.predict(scaled_X_test) joblib.dump(model, local_resource_path + 'random_forest_model_lower_data.joblib') # + id="bxmGwPXS5PI5" from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score print(mean_absolute_error(y_test, y_pred)) print(r2_score(y_test, y_pred)) # + id="nAZvlKvT5O3U" # + id="Sfb64Q6z5OuA" # + id="ONN_-5895OiR" # + id="K-3sKMJw2dYn" # + id="EMUAS6ih2dQn" # + id="DUdJV0sS2dFj" # + id="2PhWdcmn2c-Y" # + id="63_xF8W72c2a" # + id="g8_vwDXp2cty" # + id="diFNz9ga2clK" # + id="zredgQPz2cdX" # + id="jJrBx3bs2cS_" # + id="z1xL1Vd72cIS" # + id="2NycmuWs2b9I" # + id="k_nvQqDc2byF" # + id="hdoASAwn2bnE" # + id="htzBsnKPacFY" # + id="uJsICPvZr3_h" train_df.dtypes # + id="5aFTWPiZFUIM" test_df = pd.read_csv('/content/drive/MyDrive/Ventilator-Pressure-Prediction/test.csv') # + id="Cbj35sr3p66P" test_df.shape # + id="RbPFEKEUbCh6" train_df["breath_id"].nunique() # + id="imwoMxR2Fcew" # + id="69gk1D3MvRL_" # + id="ZvPLREjnvRYT" # + id="QK1fMR-evRcS" # + id="Y_dBCTOWvRf9" # + id="Ld42ip8MvRjX" # + id="su-VfDPhvRnD" # + id="zNKZGyu0vRpl" # + id="tbouYzTVvRsa" # + id="v1UpZZX2vRvI" # + id="4nALgHDLvRyf"
notebooks/Ventilator_Pressure_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="XA9scnGrLCn2" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/annotation/german/pretrained-german-models.ipynb) # # ## 0. Colab Setup # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 15931, "status": "ok", "timestamp": 1589241981039, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="Dm-qYk1nH9Qx" outputId="51045616-829f-45df-a72d-e7fed16d2707" import os # Install java # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # ! java -version # Install pyspark # ! pip install --ignore-installed -q pyspark==2.4.4 # Install Spark NLP # ! pip install --ignore-installed -q spark-nlp==2.5.0 # + [markdown] colab_type="text" id="gTvXsYwYGrXA" # ### German models specs # # | Feature | Description| # |:----------|:----------| # | **Lemma** | Trained by **Lemmatizer** annotator on **lemmatization-lists** by `<NAME>`| # | **POS** | Trained by **PerceptronApproach** annotator on the [Universal Dependencies](https://universaldependencies.org/treebanks/de_hdt/index.html)| # | **NER** | Trained by **NerDLApproach** annotator with **Char CNNs - BiLSTM - CRF** and **GloVe Embeddings** on the **WikiNER** corpus and supports the identification of `PER`, `LOC`, `ORG` and `MISC` entities | # + colab={} colab_type="code" id="QGc8b0-yGrXC" import sparknlp from sparknlp.annotator import * from sparknlp.base import * from sparknlp.pretrained import PretrainedPipeline from pyspark.sql.types import StringType # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 80581, "status": "ok", "timestamp": 1589242045740, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="SY5sbRNUGrXI" outputId="ce6cfcec-4a9c-4e13-f416-ba4784888c68" spark = sparknlp.start() print("Spark NLP version: ", sparknlp.version()) print("Apache Spark version: ", spark.version) # + colab={} colab_type="code" id="fNfAQEa2GrXP" dfTest = spark.createDataFrame([ "Die Anfänge der EU gehen auf die 1950er-Jahre zurück, als zunächst sechs Staaten die Europäische Wirtschaftsgemeinschaft (EWG) gründeten.", "Angela[1] <NAME> (* 17. Juli 1954 in Hamburg als Angela Dorothea Kasner) ist eine deutsche Politikerin (CDU)." ], StringType()).toDF("text") # + [markdown] colab_type="text" id="DgmHq9mYGrXV" # ### Pretrained Pipelines in German # #### explain_document_md (glove_6B_300) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 150917, "status": "ok", "timestamp": 1589242116113, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="iYU-OmoJGrXW" outputId="f7a12a37-e248-4a5f-f3a1-1505ae5a5d88" pipeline_exdo_md = PretrainedPipeline('explain_document_md', 'de') # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 155305, "status": "ok", "timestamp": 1589242120523, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="2wfeCpX7GrXa" outputId="1dc0c45e-aa9d-4def-a790-30fe23ac7b52" pipeline_exdo_md.transform(dfTest).show(2, truncate=10) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" executionInfo={"elapsed": 157387, "status": "ok", "timestamp": 1589242122620, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="0_M6Ks9lGrXe" outputId="77a5c03f-fecb-4a38-9d46-995ea0d69a8a" pipeline_exdo_md.transform(dfTest).select("lemma.result").show(2, truncate=70) pipeline_exdo_md.transform(dfTest).select("pos.result").show(2, truncate=70) pipeline_exdo_md.transform(dfTest).select("entities.result").show(2, truncate=70) # + [markdown] colab_type="text" id="CFFhOJauGrXl" # #### explain_document_lg (glove_840B_300) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 424363, "status": "ok", "timestamp": 1589242389608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="GwTxKeJFGrXm" outputId="09be6361-0cfe-4818-92fe-058d7e091067" pipeline_exdo_lg = PretrainedPipeline('explain_document_lg', 'de') # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 425509, "status": "ok", "timestamp": 1589242390783, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="f5ac6jDzGrXq" outputId="1f65e9b9-dd89-48fb-b7e5-7d38bce6291b" pipeline_exdo_lg.transform(dfTest).show(2, truncate=10) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" executionInfo={"elapsed": 427160, "status": "ok", "timestamp": 1589242392461, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="pLgPQOCqGrXw" outputId="7c169b27-dcbe-42b6-a63e-a4b60ffb4422" pipeline_exdo_lg.transform(dfTest).select("lemma.result").show(2, truncate=70) pipeline_exdo_lg.transform(dfTest).select("pos.result").show(2, truncate=70) pipeline_exdo_lg.transform(dfTest).select("entities.result").show(2, truncate=70) # + [markdown] colab_type="text" id="xISxmUMhGrX2" # #### entity_recognizer_md (glove_6B_300) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 471192, "status": "ok", "timestamp": 1589242436530, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="--rX-7QNGrX3" outputId="1bf4fd48-3bf2-4ad3-e0a8-8d8cd65a6dfe" pipeline_entre_md = PretrainedPipeline('entity_recognizer_md', 'de') # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 472255, "status": "ok", "timestamp": 1589242437621, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="wvWLZAsAGrX8" outputId="b5a7c5a3-4a55-4793-9479-0db37f40fd3e" pipeline_entre_md.transform(dfTest).show(2, truncate=10) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" executionInfo={"elapsed": 473801, "status": "ok", "timestamp": 1589242439197, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="M3vlrh_vGrYC" outputId="6c459ccd-f922-4d67-868c-5b609523f5f9" pipeline_entre_md.transform(dfTest).select("token.result").show(2, truncate=70) pipeline_entre_md.transform(dfTest).select("ner.result").show(2, truncate=70) pipeline_entre_md.transform(dfTest).select("entities.result").show(2, truncate=70) # + [markdown] colab_type="text" id="0QdmUQdSGrYI" # #### entity_recognizer_lg (glove_840B_300) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 698026, "status": "ok", "timestamp": 1589242663454, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="kjBzcacjGrYJ" outputId="9c69d23a-55a5-46b8-8061-4d80b15d9514" pipeline_entre_lg = PretrainedPipeline('entity_recognizer_lg', 'de') # + colab={"base_uri": "https://localhost:8080/", "height": 156} colab_type="code" executionInfo={"elapsed": 699010, "status": "ok", "timestamp": 1589242664472, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="n10-XfpQGrYN" outputId="b8acf7c8-09e0-4737-fc92-17817d113248" pipeline_entre_lg.transform(dfTest).show(2, truncate=10) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" executionInfo={"elapsed": 700001, "status": "ok", "timestamp": 1589242665492, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="zZqLfnw9GrYV" outputId="daf69ead-3aab-41c1-c1ad-e8b277907e05" pipeline_entre_lg.transform(dfTest).select("token.result").show(2, truncate=70) pipeline_entre_lg.transform(dfTest).select("ner.result").show(2, truncate=70) pipeline_entre_lg.transform(dfTest).select("entities.result").show(2, truncate=70) # + [markdown] colab_type="text" id="XKPV6SQFGrYa" # ### Pretrained Models in German # + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 774827, "status": "ok", "timestamp": 1589242740344, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="IVfLuZ78GrYb" outputId="cd21492c-53b9-4978-d720-2918a05f5f61" document = DocumentAssembler() \ .setInputCol("text")\ .setOutputCol("document") sentence = SentenceDetector()\ .setInputCols(['document'])\ .setOutputCol('sentence') token = Tokenizer()\ .setInputCols(['sentence'])\ .setOutputCol('token') lemma = LemmatizerModel.pretrained('lemma', 'de')\ .setInputCols(['token'])\ .setOutputCol('lemma') pos = PerceptronModel.pretrained('pos_ud_hdt', 'de') \ .setInputCols(['sentence', 'token'])\ .setOutputCol('pos') embeddings = WordEmbeddingsModel.pretrained('glove_6B_300', 'xx')\ .setInputCols(['sentence', 'token'])\ .setOutputCol('embeddings') ner_model = NerDLModel.pretrained('wikiner_6B_300', 'de')\ .setInputCols(['sentence', 'token', 'embeddings'])\ .setOutputCol('ner') prediction_pipeline = Pipeline(stages=[ document, sentence, token, lemma, pos, embeddings, ner_model ]) # + colab={} colab_type="code" id="Kmc2VBqhGrYf" prediction = prediction_pipeline.fit(dfTest).transform(dfTest) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" executionInfo={"elapsed": 777446, "status": "ok", "timestamp": 1589242742987, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}, "user_tz": -120} id="lH4RDQIEGrYi" outputId="f6f8e720-8301-4184-a0da-6bf4537806bc" prediction.select("lemma.result").show(2, truncate=70) prediction.select("pos.result").show(2, truncate=70) prediction.select("ner.result").show(2, truncate=70) # + colab={} colab_type="code" id="MCovIRpoGrYm"
jupyter/annotation/german/pretrained-german-models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/python # The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt # # This example shows how to run a CNN based face detector using dlib. The # example loads a pretrained model and uses it to find faces in images. The # CNN model is much more accurate than the HOG based model shown in the # face_detector.py example, but takes much more computational power to # run, and is meant to be executed on a GPU to attain reasonable speed. # # You can download the pre-trained model from: # http://dlib.net/files/mmod_human_face_detector.dat.bz2 # # The examples/faces folder contains some jpg images of people. You can run # this program on them and see the detections by executing the # following command: # ./cnn_face_detector.py mmod_human_face_detector.dat ../examples/faces/*.jpg # # # COMPILING/INSTALLING THE DLIB PYTHON INTERFACE # You can install dlib using the command: # pip install dlib # # Alternatively, if you want to compile dlib yourself then go into the dlib # root folder and run: # python setup.py install # or # python setup.py install --yes USE_AVX_INSTRUCTIONS --yes DLIB_USE_CUDA # if you have a CPU that supports AVX instructions, you have an Nvidia GPU # and you have CUDA installed since this makes things run *much* faster. # # Compiling dlib should work on any operating system so long as you have # CMake installed. On Ubuntu, this can be done easily by running the # command: # sudo apt-get install cmake # # Also note that this example requires scikit-image which can be installed # via the command: # pip install scikit-image # Or downloaded from http://scikit-image.org/download.html. # + import sys import dlib from skimage import io if len(sys.argv) < 3: print( "Call this program like this:\n" " ./cnn_face_detector.py mmod_human_face_detector.dat ../examples/faces/*.jpg\n" "You can get the mmod_human_face_detector.dat file from:\n" " http://dlib.net/files/mmod_human_face_detector.dat.bz2") exit() cnn_face_detector = dlib.cnn_face_detection_model_v1(sys.argv[1]) win = dlib.image_window() for f in sys.argv[2:]: print("Processing file: {}".format(f)) img = io.imread(f) # The 1 in the second argument indicates that we should upsample the image # 1 time. This will make everything bigger and allow us to detect more # faces. dets = cnn_face_detector(img, 1) ''' This detector returns a mmod_rectangles object. This object contains a list of mmod_rectangle objects. These objects can be accessed by simply iterating over the mmod_rectangles object The mmod_rectangle object has two member variables, a dlib.rectangle object, and a confidence score. It is also possible to pass a list of images to the detector. - like this: dets = cnn_face_detector([image list], upsample_num, batch_size = 128) In this case it will return a mmod_rectangless object. This object behaves just like a list of lists and can be iterated over. ''' print("Number of faces detected: {}".format(len(dets))) for i, d in enumerate(dets): print("Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}".format( i, d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom(), d.confidence)) rects = dlib.rectangles() rects.extend([d.rect for d in dets]) win.clear_overlay() win.set_image(img) win.add_overlay(rects) dlib.hit_enter_to_continue() # -
face_struct/dlib_cnn_face_detector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 3 - Regression Models # ## Segment 3 - Logistic regression # + import numpy as np import pandas as pd import seaborn as sb import matplotlib.pyplot as plt import sklearn from pandas import Series, DataFrame from pylab import rcParams from sklearn import preprocessing # + from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_predict from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score # - # %matplotlib inline rcParams['figure.figsize'] = 5, 4 sb.set_style('whitegrid') # ## Logistic regression on the titanic dataset import pathlib import os address = pathlib.Path(os.getcwd()).parent address = pathlib.Path(os.path.join(address, 'Data/titanic-training-data.csv')) # + tags=[] titanic_training = pd.read_csv(address) titanic_training.columns = ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'] print(titanic_training.head()) # + tags=[] print(titanic_training.info()) # - # ##### VARIABLE DESCRIPTIONS # # Survived - Survival (0 = No; 1 = Yes)<br> # Pclass - Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)<br> # Name - Name<br> # Sex - Sex<br> # Age - Age<br> # SibSp - Number of Siblings/Spouses Aboard<br> # Parch - Number of Parents/Children Aboard<br> # Ticket - Ticket Number<br> # Fare - Passenger Fare (British pound)<br> # Cabin - Cabin<br> # Embarked - Port of Embarkation (C = Cherbourg, France; Q = Queenstown, UK; S = Southampton - Cobh, Ireland) # ### Checking that your target variable is binary sb.countplot(x='Survived', data=titanic_training, palette='hls') # ### Checking for missing values titanic_training.isnull().sum() titanic_training.describe() # ### Taking care of missing values # ##### Dropping missing values # So let's just go ahead and drop all the variables that aren't relevant for predicting survival. We should at least keep the following: # - Survived - This variable is obviously relevant. # - Pclass - Does a passenger's class on the boat affect their survivability? # - Sex - Could a passenger's gender impact their survival rate? # - Age - Does a person's age impact their survival rate? # - SibSp - Does the number of relatives on the boat (that are siblings or a spouse) affect a person survivability? Probability # - Parch - Does the number of relatives on the boat (that are children or parents) affect a person survivability? Probability # - Fare - Does the fare a person paid effect his survivability? Maybe - let's keep it. # - Embarked - Does a person's point of embarkation matter? It depends on how the boat was filled... Let's keep it. # # What about a person's name, ticket number, and passenger ID number? They're irrelavant for predicting survivability. And as you recall, the cabin variable is almost all missing values, so we can just drop all of these. titanic_data = titanic_training.drop(['Name', 'Ticket', 'Cabin'], axis=1) titanic_data.head() # ### Imputing missing values sb.boxplot(x='Parch', y='Age', data=titanic_data, palette='hls') Parch_groups = titanic_data.groupby(titanic_data['Parch']) Parch_groups.mean() def age_approx(cols): Age = cols[0] Parch = cols[1] if pd.isnull(Age): if Parch == 0: return 32 elif Parch == 1: return 24 elif Parch == 2: return 17 elif Parch == 3: return 33 elif Parch == 4: return 45 else: return 30 else: return Age titanic_data['Age']= titanic_data[['Age', 'Parch']].apply(age_approx, axis=1) titanic_data.isnull().sum() # + tags=[] titanic_data.dropna(inplace=True) titanic_data.reset_index(inplace=True, drop=True) print(titanic_data.info()) # - # ### Converting categorical variables to a dummy indicators from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() gender_cat = titanic_data['Sex'] gender_encoded = label_encoder.fit_transform(gender_cat) gender_encoded[0:5] titanic_data.head() # 1 = male / 0 = female gender_DF = pd.DataFrame(gender_encoded, columns=['male_gender']) gender_DF.head() embarked_cat = titanic_data['Embarked'] embarked_encoded = label_encoder.fit_transform(embarked_cat) embarked_encoded[0:100] from sklearn.preprocessing import OneHotEncoder binary_encoder = OneHotEncoder(categories='auto') embarked_1hot = binary_encoder.fit_transform(embarked_encoded.reshape(-1,1)) embarked_1hot_mat = embarked_1hot.toarray() embarked_DF = pd.DataFrame(embarked_1hot_mat, columns = ['C', 'Q', 'S']) embarked_DF.head() titanic_data.drop(['Sex', 'Embarked'], axis=1, inplace=True) titanic_data.head() titanic_dmy = pd.concat([titanic_data, gender_DF, embarked_DF], axis=1, verify_integrity=True).astype(float) titanic_dmy[0:5] # ### Checking for independence between features sb.heatmap(titanic_dmy.corr()) titanic_dmy.drop(['Fare','Pclass'], axis=1, inplace=True) titanic_dmy.head() # ### Checking that your dataset size is sufficient # + tags=[] titanic_dmy.info() # - X_train, X_test, y_train, y_test = train_test_split(titanic_dmy.drop('Survived', axis=1), titanic_dmy['Survived'], test_size=0.2, random_state=200) # + tags=[] print(X_train.shape) print(y_train.shape) # - X_train[0:5] # ### Deploying and evaluating the model LogReg = LogisticRegression(solver='liblinear') LogReg.fit(X_train, y_train) y_pred = LogReg.predict(X_test) # ## Model Evaluation # ### Classification report without cross-validation # + tags=[] print(classification_report(y_test, y_pred)) # - # ### K-fold cross-validation & confusion matrices y_train_pred = cross_val_predict(LogReg, X_train, y_train, cv=5) confusion_matrix(y_train, y_train_pred) precision_score(y_train, y_train_pred) # ### Make a test prediction titanic_dmy[863:864] # + tags=[] test_passenger = np.array([866, 40, 0, 0, 0, 0, 0, 1]).reshape(1,-1) print(LogReg.predict(test_passenger)) print(LogReg.predict_proba(test_passenger))
Pt_2/03_10_Logistic_regression/03_10_end.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Constraint-based structual learning for Bayesian Network # * This is an **Interactive Version**, which unpacks functions for interactive use. For a compact version, please refer to **main.py** or **constraintBN.ipynb** # * The algorithm are implemented based on [Scutari](https://arxiv.org/pdf/1406.7648.pdf) # * The algorithm will run several iterations. Each time, it will go through four main stages: # * sampling & preprocessing # * finding Markov Blankets # * determining Neighbors # * learning arc directions # * Attributes and Edges will be returned. # * For interactive purpose, this file will go through one iteration step by step, and it will perform given iterations at the final stage to show the result. # # ### Load Data & Specifying Parameters # * The data needs to be .csv files and we need to replace all the "," within the cell before processing # * required **filename** and **location** of the dataset, optional parameters are **sample_size**, number of iterations (**iteration**), and **alpha** for independence tests. # + import numpy as np from scipy import stats from copy import copy import math import csv filename = '500_Cities__Local_Data_for_Better_Health__2017_release.csv' location = '../datasets/' sample_size = 100 iteration = 5 alpha = 0.05 # - # ### Sampling & Preprocessing # * **reformat**: it uniformly randomly selected [sample_size] records from the dataset and print out the attributes names and their indexes # * **replace_str**: the records of a given dataset will be transformed into numbers for further computing. For example, if a column has [a,b,b,c,d,c], it will become [0,1,1,2,3,2] # + ''' from utility.py ''' def replace_str(data, return_dic = False): i = 0 value_dic = {} for col in range(len(data[0])): unique = {} index = 0 t = 0 for row in data: if row[col] not in unique.keys(): unique[row[col]] = index row[col] = index index+=1 else: row[col] = unique[row[col]] value_dic[col] = unique if return_dic: return data, value_dic else: return data def reformat(path, clean_path = "", size = 1000): with open(path) as csvfile: raw = csvfile.readlines() fieldnames = raw[0].strip('\n').split(",") raw = raw[1:] sample = np.random.choice(len(raw), size) sample = sample.tolist() split_raw = [] for i in sample: row = raw[i].split(",") split_raw.append(row) numeric_raw = replace_str(split_raw) return numeric_raw, fieldnames # - # ### Finding Markov Blankets # * To find the markov blankets, I mainly used Grow and Shrink Algorithm from [Margaritis's Thesis](https://www.cs.cmu.edu/~dmarg/Papers/PhD-Thesis-Margaritis.pdf). # * Grow Phase: # While $\exists(Y)\in U -{X}$ such that $Y not\perp X|S $ do $S\gets S\cup{Y}$. # * Shrink Phase: # While $\exists(Y)\in S $ such that $Y \perp X|S-{Y} $ do $S\gets S-{Y}$. # * After finding all the MB, I will perform a symmetric check (when x belongs to y's blanket, if y belongs to x's blanket) and drop out those that do not hold to reduce false postives. from utility import * ''' from learnAlg.py: learn Markov Blanket Using GS ''' def gs(data, alpha): # number of attributes n_attr = data.shape[1] # number of records n_rec = data.shape[0] col_index = range(n_attr) # init empty blanket container for each attri blanket = dict([(i,[]) for i in range(n_attr)]) for X in col_index: # step 1: init blanket for attri S = [] # step2: GROWING phase for Y in col_index: # exists Y not belonging to X nor S if X != Y and Y not in S: columns = (X,Y) + tuple(S) if not are_independent(data[:,columns]): S.append(Y) # step3: SHRINKING phase for Y in S: # test if Y == X if X != Y: new_S = copy(S) new_S.remove(Y) columns = (X,Y) + tuple(new_S) # Y indep of X given S - Y, S = S - Y if are_independent(data[:,columns]): S = new_S # save to blanket blanket[X] = S return blanket # #### Independence Test # * In Grow and Shrink Algorithm, we used **are_independent** and **alpha** to test the independence or conditional independence of X and Y. # * The independence tests are developed based on Daphne's book in Chapter 18.2.2, page 789-790 -- Using chi-sqaure to calculate the deviance and p-value, then comparing the p-value with a threshold of alpha (default: 0.05). # * Notes: # * Here I used 1e-7 to avoid division by zero. # * If there are more than 3 columns passed, I will concatenate all the columns after the second column to be a single Z. In this way, the dimension of computing is always <= 3. # + ''' from utility.py: ''' def are_independent(data, alpha = 0.05): pval = indep_test(data) if pval < alpha: return True else: return False ''' Independent tests: @param test: perform chi-square test For data = [X,Y] - calculate joint prob - calculate marginal prob - cross product of marginal X and marginal Y - calculate chi2 statistics ''' def indep_test(data, test=True): bins = unique_bins(data) n_row = data.shape[0] if len(bins) == 2: # PAGE 788-789 # frequency counts hist,_ = np.histogramdd(data, bins=bins[0:2]) # joint probability distribution over X,Y,(Z) Pxy = hist / data.shape[0] # marginal: axis 0: combine rows/across X; axis 1: combine cols/across Y Px = np.sum(Pxy, axis = 1) # P(X,Z) Py = np.sum(Pxy, axis = 0) # P(Y,Z) # avoid division by zero Px += 1e-7 Py += 1e-7 # deviance using chi-square chi = 0 for i in range(bins[0]): for j in range(bins[1]): chi += n_row * math.pow(Pxy[i][j] - Px[i] * Py[j], 2) / Px[i] * Py[j] dof = (bins[0] - 1) * (bins[1] - 1) p_val = 2*stats.chi2.pdf(chi, dof) # 2* for one tail return round(p_val,4) else: # PAGE 790, condition on Z # CHECK FOR > 3 COLUMNS -> concatenate Z into one column if len(bins) > 3: data = data.astype('str') ncols = len(bins) for i in range(len(data)): data[i,2] = ''.join(data[i,2:ncols]) data = data.astype('int')[:,0:3] bins = unique_bins(data) hist,_ = np.histogramdd(data, bins=bins) # joint probability distribution over X,Y,Z Pxyz = hist / n_row Pz = np.sum(Pxyz, axis = (0,1)) # P(Z) Pxz = np.sum(Pxyz, axis = 1) # P(X,Z) Pyz = np.sum(Pxyz, axis = 0) # P(Y,Z) Pxy_z = Pxyz / (Pz+1e-7) # P(X,Y | Z) = P(X,Y,Z) / P(Z) Px_z = Pxz / (Pz+1e-7) # P(X | Z) = P(X,Z) / P(Z) Py_z = Pyz / (Pz+1e-7) # P(Y | Z) = P(Y,Z) / P(Z) Px_y_z = np.empty((Pxy_z.shape)) # P(X|Z)P(Y|Z) # avoid division by zero Pz += 1e-7 # (M[x,y,z] - M*P(z)P(x|z)P(y|z))^2 / M * P(z)P(x|z)P(y|z) chi = 0 for i in range(bins[0]): for j in range(bins[1]): for k in range(bins[2]): Px_y_z[i][j][k] = Px_z[i][k]*Py_z[j][k] + 1e-7 chi += n_row * math.pow((Pxyz[i][j][k] - Pz[k] * Px_y_z[i][j][k]), 2) / (Pz[k] * Px_y_z[i][j][k]) dof = (bins[0] - 1) * (bins[1] - 1) * bins[2] p_val = 2*stats.chi2.pdf(chi, dof) # 2* for one tail return round(p_val,4) # - # #### Symmetric Check # * The step is used to reduce false positives. ''' from learnAlg.py: check symmetric for mb or nb ''' def check_symmetric(mb): new_mb = dict(mb) attr = mb.keys() for x in attr: for i in mb[x]: if x not in mb[i]: new_mb[x].remove(i) return new_mb # ### Determining Neighbors # * For each pair of attribute X and Y, where X is not the same as Y, search for a set (including empty set) on which X and Y are independent. If no such set exists, place an undirected arc between X and Y. # * In this step, I used MB to reduce the search space. Specifically: # * if X in Y's MB: # * search all the subsets of MB(Y). once found a subset seperating X and Y -> they are not neighbors # * if no such set found, test independence of X and Y without conditions # * if still not independent, add Y to X's neighbor # * if X not in Y's MB: # * given MB(Y), X and Y must be independent -> they are not neighbors # * check symmetric again ''' from learnAlg.py: learn neighbours ''' def learnNb(data, mb, alpha): nb = {} # N(x) is subset of B(x) for x in range(data.shape[1]): nb[x] = [] for y in range(data.shape[1]): if x in mb[y]: space = copy(mb[y]).remove(x) noset = True if space != None: subset = find_subset(space) for s in subset.values(): columns = (x,y,s) if are_independent(data[:,columns]): noset = False break # test empty s columns = (x,y) if are_independent(data[:,columns]): noset = False if noset: nb[x].append(y) # place an undirected edge beteewn x and y #print "{} and {} has an edge".format(x, y) return check_symmetric(nb) # ### Learning Arc Directions # * 1) Learn V-structure. For each non-adjacent X,Y with a common neighbor S, check if given Z, X and Y are independent. If not, create a v structure with {X -> S <- Y} # * 2) After learning v-structures, recursively check two rules: # * If X - Y and there is a directed path from X to Y, then change X-Y to X->Y # * If X, Y are not adjacent, check if an S exists such that X -> S, S - Y, then change S - Y to S -> Y. # * also referenced Chap 3.3-3.4 in Daphne's book. # + ''' from learnAlg.py: learn arc directions ''' def learnDir(data, nb, alpha): leftToRight = {} # find V-structure for x in nb.keys(): leftToRight[x] = [] for y in range(x+1, data.shape[1]): # find non-adjacent x,y if y in nb[x]: continue # find their common neighbor commonNb = list(set(nb[x]).intersection(nb[y])) for s in commonNb: # check if x and y are independent given common neighbour belongs columns = (x,y,s) if not are_independent(data[:,columns]): if s not in leftToRight[x]: leftToRight[x].append(s) if y not in leftToRight.keys(): leftToRight[y] = [] if s not in leftToRight[y]: leftToRight[y].append(s) #print "{} -> {} <- {}".format(x, s, y) # recursively applying two rules util converge last = {} while last != leftToRight: last = copy(leftToRight) for x in nb.keys(): for y in nb.keys(): # case1: adjacent if y in nb[x]: # find undirected edges if y in leftToRight[x] or x in leftToRight[y]: continue # if existing a directed path from x to y, set x -> y if hasDirectedPath(x,y,leftToRight): if y not in leftToRight[x]: leftToRight[x].append(y) #print "{} -> {}".format(x, y) # case2: non-adjacent # if existing s that x -> s and s - y. set s -> y else: for s in leftToRight[x]: if s in nb[y]: # not s <- y if y not in leftToRight[s] and s not in leftToRight[y]: leftToRight[s].append(y) #print "{} -> {}".format(s, y) return leftToRight ''' recursive call to find a directed path from x to y ''' def hasDirectedPath(x, y, leftToRight): if leftToRight[x] == None: return False if y in leftToRight[x]: return True else: for i in leftToRight[x]: if hasDirectedPath(i, y, leftToRight): return True # - # ### Iterations and Count Occurences of Each Edge # * After all iterations are done, **edges** are returned in a form like {left node:{right node: # occurences in all iterations}} or you could use **printEdge** to print the edges exceeding a specified **threshold**. # * Example for threshold: if edge x -> y appeared 6 times in 10 iterations, with a threshold of 0.5, since 6 > 10*0.5, the edge will be displayed by the printEdge function. # + def constraintBN(filename, location, sample_size = 100, iteration = 1, alpha = 0.5): # left -> right edge = {} print "sample_size: {}, iterations: {}, alpha: {}".format(sample_size, iteration, alpha) for i in range(iteration): print "iteration {}".format(i) path = location + filename #clean_path = location + filename.split(".")[0] + "-num.csv" # Reformat data and replace string, size = sample_size data, field = reformat(path, size = sample_size) if i == 0: printAttr(field) data = np.array(data, np.int32) #data = np.genfromtxt(clean_path, dtype='int32', delimiter=',') # 1. Find Markov Blankets mb = gs(data, alpha = alpha) mb = check_symmetric(mb) # 2. learning neighbors nb = learnNb(data, mb, alpha = alpha) nb = check_symmetric(nb) # 3. learning directions arc = learnDir(data, nb, alpha = alpha) # calculate occurences for left in arc.keys(): right = arc[left] if left not in edge.keys(): edge[left] = {} for r in right: if r not in edge[left].keys(): edge[left][r] = 1 else: edge[left][r] += 1 printEdge(edge, itr = iteration) return edge def printEdge(edge, itr, threshold = 0.8): for e in edge: right = edge[e] for r in right: if edge[e][r] > threshold*itr: print "{} -> {} ({})".format(e, r, edge[e][r]) # - edges = constraintBN(filename, location, sample_size = sample_size, iteration = iteration, alpha = alpha) # ### Limitations # #### Limited Representation # * BN can only be used to represent causal relationship and fail to represnet correlations. # * Fail to represent distributions like {A $\perp$ C | B, D} and {B $\perp$ D | A, C} # # #### Complexity
learnBN/Interactive_Constraint_Based_BN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets def make_meshgrid(x, y, h=.02): x_min, x_max = x.min() - 1, x.max() + 1 y_min, y_max = y.min() - 1, y.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return xx, yy def plot_contours(ax, clf, xx, yy, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter models = (svm.SVC(kernel='linear', C=C), svm.LinearSVC(C=C), svm.SVC(kernel='rbf', gamma=0.7, C=C), svm.SVC(kernel='poly', degree=3, C=C)) models = (clf.fit(X, y) for clf in models) # title for the plots titles = ('SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel') fig, sub = plt.subplots(2, 2) plt.subplots_adjust(wspace=0.4, hspace=0.4) X0, X1 = X[:, 0], X[:, 1] xx, yy = make_meshgrid(X0, X1) for clf, title, ax in zip(models, titles, sub.flatten()): plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8) ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xlabel('Sepal length') ax.set_ylabel('Sepal width') ax.set_xticks(()) ax.set_yticks(()) ax.set_title(title) plt.show() # -
Old Jupyter Notebooks/SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Implement a Graph Class! # # That's it! # # Best of luck and reference the video lecture for any questions! # # You have to fully worked out implementations in the lectures, so make sure to refer to them! # # Good Luck!
code/algorithms/course_udemy_1/Graphs/Graph Interview Questions/Implement a Graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd DATASET_0_1 = "../CSV/data_10time_10frame_train_0_1.csv" DATASET_0_2 = "../CSV/data_10time_10frame_train_0_2.csv" DATASET_1_2FACE = "../CSV/data_10time_10frame_train_1_2face.csv" DATASET_1_book = "../CSV/data_10time_10frame_train_1_book.csv" dataset_0_1 = pd.read_csv(DATASET_0_1, header=None) dataset_0_2 = pd.read_csv(DATASET_0_2, header=None) dataset_1_2face = pd.read_csv(DATASET_1_2FACE, header=None) dataset_1_book = pd.read_csv(DATASET_1_book, header=None) dataset_0_1.head() dataset = [dataset_0_1, dataset_0_2, dataset_1_2face, dataset_1_book] dataset = pd.concat(dataset) dataset.shape dataset.corr() X = dataset.iloc[:, 0:55].values Y = dataset.iloc[:, 55].values X Y from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0) from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() pol_reg.fit(X_poly, Y) pol_reg.predict(poly_reg.fit_transform([X[120]])) import pickle model = 'finalized_model.sav' pickle.dump(pol_reg, open(model, 'wb')) a = "[1, 2, 3]" [list(a) ] len(X[1]) X[1] from sklearn.linear_model import LogisticRegression poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) logis_pol_reg = LogistiLogistiRegression() logis_pol_reg.fit(X_poly, Y) arr = logis_pol_reg.predict_proba(poly_reg.fit_transform([X[100]])) arr[0][1] model = 'finalized_logistist_model.sav' pickle.dump(logis_pol_reg, open(model, 'wb'))
Model/handling_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Compute diversity indices # %run "Header.ipynb" import math import json import pysam import pysamstats import skbio import pileup from matplotlib import pyplot # ## Parameters of the diversity index: $p$ and $m$ # # See paper for description. # + m = 5 # The possible values of p we'll try. These are NOT p-values so i'm not gonna call them that lmao percentages = [50, 25, 10, 5, 2, 1, 0.5, 0.25, 0.15] # No edges with length < this will be included in the diversity index MIN_SEQ_LEN = 1e6 # - # ## Compute diversity index for varying values of $p$ # # Note that not all genomes that pass the filter for one value of $p$ will pass the filter for other values of $p$. This is expected. # This should include all edges' alignments now, not just the three selected genomes! bf = pysam.AlignmentFile("../main-workflow/output/fully-filtered-and-sorted-aln.bam", "rb") # ### Get a mapping of sequence (edge) name to sequence length # + # Compute mapping of edge name to length -- need to know this for some of the computations below. # # We don't use skbio.DNA.read() for this since that seems to only return a single sequence from the # FASTA file at once (it's configurable with the seq_num parameter, but I don't want to accidentally # make this a "Schlemiel the Painter's" algorithm by first iterating to seq 1, then starting over and # iterating to seq 2, and so on). Easiest to just handle this parsing from scratch. seq2len = {} seqname = None with open("../main-workflow/output/all_edges.fasta", "r") as fastafile: # Assumes that sequences are not split up over multiple lines (so a FASTA file with N sequences # should have only 2N lines, maybe 2N + 1 if there's an extra empty newline at the bottom of the file) for linenum, line in enumerate(fastafile): if line.startswith(">"): if linenum % 2 != 0: raise ValueError("something weird with > location in all_edges.fasta. Go yell at Marcus.") seqname = line.strip()[1:] else: if linenum % 2 != 1: raise ValueError("something weird with non > location in all_edges.fasta. Go yell at Marcus.") if seqname is None: raise ValueError("No sequence name set yet?") seqlen = len(line.strip()) seq2len[seqname] = seqlen seqname = None print(f"seq2len contains {len(seq2len):,} entries. The length of edge_1 is {seq2len['edge_1']:,} bp.") # - num_long_enough_seqs = 0 for seq in seq2len: if seq2len[seq] >= MIN_SEQ_LEN: num_long_enough_seqs += 1 print(f"{num_long_enough_seqs:,} / {len(seq2len):,} seqs have a length of at least {MIN_SEQ_LEN:,.0f} bp.") # Some stuff we precompute to avoid doing this a zillion times unnecessarily p2mincov = get_p2mincov(percentages, m) # ### Actually compute the diversity indices # + # The main results of this work will be saved here. p2seq2dividx = {p: {} for p in percentages} num_seqs = len(seq2len) for si, seq in enumerate(seq2len.keys(), 1): # Immediately ignore sequences with length < 1 Mbp. if seq2len[seq] < MIN_SEQ_LEN: #print(f"Skipping since sequence length < {MIN_SEQ_LEN:,.0f} bp.") continue else: pct = 100 * (si / num_seqs) print(f"On seq {seq} ({si:,} / {num_seqs:,}) ({pct:.2f}%)...", end=" ", flush=True) print(f"Length = {seq2len[seq] / 1e6:,.2f} Mbp.\n\tProgress:", end=" ", flush=True) # Keep track of how many positions in this sequence are sufficiently covered (the classification of a # position as sufficiently covered or not will depend on the value of p set). # We'll then use this to determine whether or not we can compute the diversity index for a sequence for # a given value of p. p2sufficientlycoveredpositionct = {p: 0 for p in percentages} # Numbers of identified p-mutations within just the sufficiently-covered positions for a given p for this # sequence. p2mutationct = {p: 0 for p in percentages} one_tenth_len = math.floor(seq2len[seq] / 10) # Go through each position in the sequence's pileup and look for sufficiently-covered positions and # p-mutations within those sufficiently-covered positions. # # See bam-to-jsons.py for a description of why these stat_variation() params are useful. # # NOTE: The pileup module is (currently) only set up for the three selected MAGs, so we need to use # pysamstats in order to figure this stuff out. for ri, rec in enumerate( pysamstats.stat_variation( bf, chrom=seq, fafile="../main-workflow/output/all_edges.fasta", start=0, end=seq2len[seq], truncate=True, pad=True, max_depth=1e6 ), 1 ): # very simple progress bar for my own sake. might miss a few dots here and there if ri % one_tenth_len == 0: print("*", end="", flush=True) matches = rec["matches"] mismatches = rec["mismatches"] # NOTE: As with "coverage" in the coverage plots, this doesn't take into account deletions at a # given position. That info is available through pysamstats so we could use it here if desired. cov = matches + mismatches for p in percentages: if cov >= p2mincov[p]: p2sufficientlycoveredpositionct[p] += 1 ref_i = "ACGT".index(rec["ref"]) mock_pileup = [[rec["A"], rec["C"], rec["G"], rec["T"]], ref_i, rec["deletions"]] if pileup.naively_call_mutation(mock_pileup, p): p2mutationct[p] += 1 print("") # Now that we've looked through all positions, see which value(s) of p we can compute the div index for # for this sequence. Do so, and save the resulting info. valid_p = [] half_seq_len = 0.5 * seq2len[seq] for p in percentages: if p2sufficientlycoveredpositionct[p] >= half_seq_len: valid_p.append(p) dividx = p2mutationct[p] / p2sufficientlycoveredpositionct[p] p2seq2dividx[p][seq] = dividx print(f"\tDiv index for p = {p}%: ", dividx) print(f"\tDiversity index was defined for {len(valid_p)} / {len(percentages)} values of p: {valid_p}.") # - # ### Save the diversity index info to a file # # Since this can take a few hours to compute, this helps if we have to restart the notebook halfway through / etc. with open("misc-output/p2seq2dividx.json", "w") as jf: jf.write(json.dumps(p2seq2dividx)) # ## Visualize the diversity indices # # As histograms sharing the same x-axes. with open("misc-output/p2seq2dividx.json", "r") as jf: p2seq2dividx = json.load(jf) # + max_div_idx = float("-inf") max_div_idx_p = None max_div_idx_seq = None for p in percentages: sp = str(p) for seq in p2seq2dividx[sp].keys(): di = p2seq2dividx[sp][seq] if di > max_div_idx: max_div_idx = di max_div_idx_p = p max_div_idx_seq = seq print(f"Max diversity index is at p = {max_div_idx_p}%, seq = {max_div_idx_seq}: div index is {max_div_idx}") xlim_max = math.ceil(max_div_idx * 100) print(f"xlim max is therefore x = {xlim_max}%.") # + # (Omit p = 0.15%, because it's not very interesting -- only one edge [CAMP] is sufficiently-covered enough.) row_percentages = percentages[:-1] fig, axes = pyplot.subplots(len(row_percentages), 1, sharey=True, gridspec_kw={"hspace": 0.98}) # matches the log-ratio plots and the cov/len summary plots # TODO move to header notebook to avoid redundancy seq2color = {"edge_6104": "#00cc00", "edge_1671": "#ff0000", "edge_2358": "#880088"} seq2linestyle = {"edge_6104": "-", "edge_1671": "--", "edge_2358": ":"} for pi, p in enumerate(row_percentages): # We want bins to be identical and easy to compare between plots. So we use 50 bins, total: # 10 bins per percentage (so the first bin is [0%, 0.1%), the next is [0.1%, 0.2%), etc. up to [4.9%, 5%]). # (This should scale with the xlim_max thing we just set, but as of writing it's x = 5%. So 50 bins.) # range() doesn't support floating-point steps so we first get an integer range then convert this to a # range of floating-point vals. bins = [x / 10 for x in range((xlim_max * 10) + 1)] # Use linewidth & edgecolor to give the histogram bars a nice, subtle outline: # https://stackoverflow.com/a/42741774 axes[pi].hist( [100 * v for v in p2seq2dividx[str(p)].values()], color="#888888", linewidth=1, edgecolor="#555555", bins=bins ) axes[pi].set_ylim(0) axes[pi].set_xlim(-0.05, xlim_max) axes[pi].set_yscale("symlog") # Don't write the axis numbers as 10^0, 10^1, etc. -- just write as 1, 10, etc. # https://stackoverflow.com/a/49751075 axes[pi].yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter()) # Show nice tiny labels for increments of 0.1%. axes[pi].set_xticks(bins, minor=True) axes[pi].xaxis.set_minor_formatter("{x:.1f}") # Adjust the x-axis tick font sizes: https://stackoverflow.com/a/11386056 axes[pi].tick_params(axis="x", which="major", labelsize=12) axes[pi].tick_params(axis="x", which="minor", labelsize=8) # Also adjust the y-axis tick font sizes; prevent the 1 and the 0 from overlapping axes[pi].tick_params(axis="y", which="major", labelsize=10) nsc = len(p2seq2dividx[str(p)]) mincov = p2mincov[p] axes[pi].set_title( f"$p$ = {p}%\n{nsc} sufficiently-covered sequences (minimum sufficient coverage: {mincov:,.0f}x)", fontsize=12 ) if pi == len(row_percentages) - 1: axes[pi].set_xlabel("Diversity index (%)", fontsize="15") for s in SEQS: if s in p2seq2dividx[str(p)]: label = seq2name[s] if pi == 0 else None axes[pi].axvline( x=p2seq2dividx[str(p)][s] * 100, color=seq2color[s], linewidth=5, linestyle=seq2linestyle[s], label=label ) # loc parameter is the lower-left corner of the legend, in "axes" coordinates. Don't ask. # (The color stuff changes the legend's background from the gray imposed by the ggplot-esque styles we set # to just a white background so it fits with a white background) # The handlelength makes the legend longer, so we can see the difference between the different MAGs' # line styles more clearly fig.legend(loc=(0.828, 0.915), facecolor="white", edgecolor="white", handlelength=7) fig.set_size_inches(15, 18) st = f"Histograms of diversity indices\nConsidering only the {num_long_enough_seqs} edge sequences in the graph with length $\geq$ 1 Mbp" fig.suptitle(st, y=0.95, x=0.515, fontsize=15) # y-axis label spanning multiple rows of plots; could probably do this more elegantly but this works so whatevs # the color of #777777 is intended to match the x-axis labels' colors fig.text( 0.07, 0.295, "Number of edge sequences with diversity indices within a given range", rotation="vertical", fontsize="15", color="#777777" ) fig.savefig("figs/diversity-indices.png", bbox_inches="tight") # -
notebooks/DiversityIndices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="wIumsTztV1tN" # # Deep Learning Optimizer for Hydrogel Bilayer Design # # *Last Edited: 2021.04.05* # # *** # *Important Notes* # - **5 step Patterning Step limit!** # - **Fix Analysis a.output** # # *Notes* # - **min segement length[um]** = 1,000 or 800 # - max strip segments: 6 ~ 8 # - current best model: DigitClassifier_20201014_mod1 or 3.h5 # # *** # * Goal: **Build an algorithm to find designs that can actuate DNA-co-polymerizer bilayer strips into multiple digits.** # - The DNA-co-polymerizer bilayer strips are bilayer segments stacked together horizontally. # - Currently, 4 orthogonal system of actuation + 1 dummy. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="JXhAv77MVpZb" outputId="8ae699d8-e849-40ca-c6d4-7d9821e11eb8" # Package Importing import csv, math, os, time, copy, matplotlib, datetime, keras import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from scipy import io as spio from scipy.ndimage import gaussian_filter from scipy.stats import bernoulli from math import log10, floor from skimage import transform, exposure print(keras.__version__) # 2.4.3 print(tf.__version__) # 2.2.0 # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="qufG21JQaR84" outputId="98a56bb4-60e3-4805-915a-ace41cfd8a9c" class SegmentedCurve: """ Object that defines a curve. A curve is defined with following properties: 1. segment lengths *list* - list of numbers specifying the length of each segments making up the curve. - unit: um 2. rocs *list* - list of numbers specifying the radius of curvature of each segment. - set it to a high number such as 1e5 for flat line. 3. ctls *list* - list of the contour length change for each segments. Functions: 1. set_points - generate the curve as a list of points. 2. rotate - rotate the curve. 3. translate - translate the curve. 4. generate_image - generate the image of the curve from the points. 5. plot - plot the curve. """ def __init__(self, segment_lengths, rocs = [], ctls = []): """ By default this curve starts at (0, 0) and tangent angle 0. Deafault ROC is 999999. """ self.segment_lengths = segment_lengths if not rocs: self.rocs = np.ones_like(segment_lengths) * 999999 else: self.rocs = rocs if not ctls: self.segment_lengths = np.array(segment_lengths) else: self.segment_lengths = np.array(segment_lengths) * ctls self.initial_point = [0, 0] self.initial_tangent_angle = 0 self.set_points() def set_points(self): """ Start generating the rest of the segment points from the origin and based one the segment length and ROC. """ s_iter = SegmentedCurveIterator(self) self.x_points = []; self.y_points = [] self.tangent_angles = [] while True: try: element = next(s_iter) self.x_points.append(element[0]) self.y_points.append(element[1]) self.tangent_angles.append(s_iter.tangent_angle) except StopIteration: break def rotate(self, angle): """ Rotate the curve by adding an angle, and regrowing the rest of the curve. """ self.initial_tangent_angle += angle self.set_points() def translate(self, coords): """ Translate the curve by shifting its origin, and regrowing the rest of the curve. """ self.initial_point += coords self.set_points() def generate_image(self, width = 28, height = 28, padding = 0, filter = None, filter_sigma = 1): """ Generates the image of the curve. **Parameters** width *int* the width of the image in pixel. height *int* the height of the image in pixel. As the trained images are 28 x 28 pixels images, we return an image of the same size. padding *int* - provide additional space if needed. - not sure working or not... filter *string* Can choose None or "Gaussian", used to blur the image so it resembles the digits more. filter_sigma *int or float* Sigma for the Gaussian filter. """ contour_length = np.sum(self.segment_lengths) half_dimension = 1.5 * contour_length / (2 * math.sqrt(2)) #print("Old half dimension was "+ str(half_dimension)) x_span = np.amax(self.x_points) - np.amin(self.x_points) y_span = np.amax(self.y_points) - np.amin(self.y_points) half_dimension = max([x_span, y_span]) #print("Half dimension is " + str(half_dimension)) x_center = np.median(self.x_points) y_center = np.median(self.y_points) new_x = np.array(self.x_points) - x_center new_y = np.array(self.y_points) - y_center new_x = new_x * (width / 2 + padding) // half_dimension new_y = new_y * (height / 2 + padding) // half_dimension image_pixels = np.zeros(shape = (height + 2 * padding, width + 2 * padding)) for i in range(len(new_x)): image_pixels[math.floor(new_y[i]) + math.floor(height / 2) + padding][math.floor(new_x[i]) + math.floor(width / 2) + padding] += 1 p50, p98 = np.percentile(image_pixels, (50, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(image_pixels, (50, 99)) image_pixels = exposure.rescale_intensity(image_pixels, in_range = (p50, p98)) image_pixels = image_pixels / np.amax(image_pixels, axis=(0, 1)) image_pixels = transform.resize(image_pixels, (width + padding, height + padding)) if filter is "Gaussian": image_pixels = gaussian_filter(image_pixels, sigma = filter_sigma) p50, p98 = np.percentile(image_pixels, (50, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(image_pixels, (50, 99)) image_pixels = exposure.rescale_intensity(image_pixels, in_range = (p50, p98)) return image_pixels def generate_image_v2(self, width = 28, height = 28, padding = 0, filter = "Gaussian",\ filter_sigma = .8, half_dimension = 5000): """ Generates the image of the curve. **Parameters** width *int* the width of the image in pixel. height *int* the height of the image in pixel. As the trained images are 28 x 28 pixels images, we return an image of the same size. padding *int* - provide additional space if needed. - not sure working or not... filter *string* Can choose None or "Gaussian", used to blur the image so it resembles the digits more. filter_sigma *int or float* Sigma for the Gaussian filter. """ #print("Half dimension is " + str(half_dimension)) x_center = np.median(self.x_points) y_center = np.median(self.y_points) new_x = np.array(self.x_points) - x_center new_y = np.array(self.y_points) - y_center new_x = new_x * (width / 2 + padding) // half_dimension new_y = new_y * (height / 2 + padding) // half_dimension image_pixels = np.zeros(shape = (height + 2 * padding, width + 2 * padding)) for i in range(len(new_x)): image_pixels[math.floor(new_y[i]) + math.floor(height / 2) + padding][math.floor(new_x[i]) + math.floor(width / 2) + padding] += 1 p50, p98 = np.percentile(image_pixels, (50, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(image_pixels, (50, 99)) image_pixels = exposure.rescale_intensity(image_pixels, in_range = (p50, p98)) image_pixels = image_pixels / np.amax(image_pixels, axis=(0, 1)) image_pixels = transform.resize(image_pixels, (width + padding, height + padding)) if filter is "Gaussian": image_pixels = gaussian_filter(image_pixels, sigma = filter_sigma) p50, p98 = np.percentile(image_pixels, (50, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(image_pixels, (50, 99)) image_pixels = exposure.rescale_intensity(image_pixels, in_range = (p50, p98)) return image_pixels def plot(self): """ Plot and show the curve. """ fig, ax = plt.subplots(1, 1, figsize = (5, 5)) ax.plot(self.x_points, self.y_points, linewidth = 15) plt.show() class SegmentedCurveIterator: """ This class is mainly used as the generator for the SegmentedCurve class. """ def __init__(self, segmented_curve): self.rocs = segmented_curve.rocs self.segment_starts = np.cumsum(segmented_curve.segment_lengths) - segmented_curve.segment_lengths self.curve_end = np.sum(segmented_curve.segment_lengths) self.segment_starts = np.append(self.segment_starts,[self.curve_end]) self.last_point = segmented_curve.initial_point self.tangent_angle = segmented_curve.initial_tangent_angle self.current_length = 0; self.current_segment = 0 # delta can be considered as the MESH SIZE for segments. self.delta = 20 def __next__(self): self.current_length += self.delta if self.current_length > self.curve_end: raise StopIteration() if self.current_length > self.segment_starts[self.current_segment + 1]: self.current_segment += 1 angle_change = self.delta / self.rocs[self.current_segment] self.tangent_angle += angle_change pos_change = [self.delta*math.sin(self.tangent_angle), \ self.delta*math.cos(self.tangent_angle)] self.last_point = np.add(self.last_point,pos_change) return self.last_point # Example of a curve total length = 100um + 200um + 500um. # And RoC = 1e5um, 670um, -830um for the radius of curvature. s2 = SegmentedCurve([100, 200, 300],[1e5, 670, -830]) s2.plot() # + colab={"base_uri": "https://localhost:8080/", "height": 408} colab_type="code" id="0pOc-QYScDZF" outputId="f8ea6721-0255-453c-c46c-fdaf412117a8" def get_curvature(rocs, states, top_type, bottom_type): """ Table reading function for retrieving the curvature and contour length change. """ return rocs[int(states[int(top_type)])][int(states[int(bottom_type)])][int(top_type)][int(bottom_type)] class ActuatorStrip: """ Object that defines a bilayer hydrogel actuator. An actuator strip is defined with following properties: 1. lengths *list* - list of numbers specifying the length of each segments making up the curve. - unit: um 2. identities *list* - the actuator system that each segment is patterned with. 3. rocs *list* - radius of curvature table for bilayer - set it to a high number such as 1e5 for flat line. 4. ctls *list( - contour length table for bilayer """ def __init__(self, lengths, identities, rocs, ctls): self.segment_lengths = lengths self.identities = identities self.rocs = rocs self.ctls = ctls self.actuator_types = len(rocs[0][0]) - 1 self.self_intersecting = [] self.output_info = {} self.selected = {} def generate_curves(self): """ Generates the points for all possible strips formed. """ self.curves = [] for i in range(pow(2, self.actuator_types)): # i is an integer -- the qth bit of i in binary encodes whether the qth actuator type is on. curvatures = []; ctlengths = []; states = [0] for k in range(self.actuator_types): # << bitwise shift operators if i & (1 << k): states.append(1) else: states.append(0) # print(states) # see how each image states is made for j in range(len(self.segment_lengths)): curvatures.append(get_curvature(self.rocs, states, self.identities[0][j], self.identities[1][j])) ctlengths.append(get_curvature(self.ctls, states, self.identities[0][j], self.identities[1][j])) s = SegmentedCurve(self.segment_lengths, curvatures, ctlengths) self.curves.append(s) def determine_self_intersection(self): """ NEED A FASTER self intersecting check, currently not in use. """ state_count = pow(2, self.actuator_types) for j in range(state_count): #print("Looking at self intersection " + str(j)) #self.self_intersecting.append(self.curves[j].is_self_intersecting()) self.self_intersecting.append(0) #print("Self intersecting is now " + str(self.self_intersecting)) def plot_input_design(self, save = False): fig , ax = plt.subplots(1, figsize = (5, 5)) fig_width = int(np.sum(self.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(self.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 12, color = "white", family = "serif", weight = "bold") for i in range(len(self.segment_lengths)): ax.annotate("%dum"%(self.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(self.segment_lengths[0:i])), xytext=(fig_width-strip_width*5, strip_width*1.5+np.sum(self.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 12, color = "white", family = "serif", weight = "bold") plt.title("Input Design", fontsize = 15, family = "serif", weight = "bold") plt.axis(False) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_input.png", dpi = 600) plt.show() def analysis(self, rotation = 20): self.generate_curves() bestImgs = np.zeros(shape = (16, 28, 28)); bctr = 0; HAS9 = 0; HAS6 = 0 state_name = ["ALL OFF", "S1 ON", "S2 ON", "S1 & S2", "S3 ON", "S1 & S3", "S2 & S3", "S1 & S2 & S3", "S4 ON", "S1 & S4", "S2 & S4", "S1 & S2 & S4", "S3 & S4", "S1 & S3 & S4", "S2 & S3 & S4", "ALL ON"] rotations = [] for curve in self.curves: possibility = np.zeros(shape = (rotation, 28, 28)); ctr = 0 for _ in range(rotation): curve.rotate(2*math.pi / rotation) img = curve.generate_image(filter = 'Gaussian') possibility[ctr]=img;ctr+=1 score = cnn_digit_model.predict(possibility.reshape(possibility.shape[0], 28, 28, 1)) info = [(i, np.argmax(score[i]), score[i][np.argmax(score[i])]) for i in range(len(score)) if np.argmax(score[i]) < 10] if info: best = np.argmax([info[i][2] for i in range(len(info))]) if info[best][1] == 9: if not HAS9: HAS9 = True bestImgs[bctr] = possibility[info[best][0]] elif not HAS6: try: best = [info[i][1] for i in range(len(info))].index(6) bestImgs[bctr] = possibility[info[best][0]] except: bestImgs[bctr] = possibility[info[best][0]] else: bestImgs[bctr] = possibility[info[best][0]] elif info[best][1] == 6: if not HAS6: HAS6 = True bestImgs[bctr] = possibility[info[best][0]] elif not HAS9: try: best = [info[i][1] for i in range(len(info))].index(9) bestImgs[bctr] = possibility[info[best][0]] except: bestImgs[bctr] = possibility[info[best][0]] else: bestImgs[bctr] = possibility[info[best][0]] else: bestImgs[bctr] = possibility[info[best][0]] else: bestImgs[bctr] = possibility[-2] rotations.append((best+1)*2*math.pi / rotation) bctr += 1 score = cnn_digit_model.predict(bestImgs.reshape(16, 28, 28, 1)) info = [(np.argmax(score[i]), score[i][np.argmax(score[i])]) for i in range(len(score))] self.output_info = {i:{"img":bestImgs[i], "dig":info[i][0], "score":info[i][1], "state":state_name[i],\ "rotations":rotations[i]} for i in range(16)} contest = [[] for i in range(11)]; [contest[self.output_info[i]["dig"]].append((self.output_info[i]["score"], i)) for i in self.output_info] for i in range(11): if contest[i] and i!=10: self.selected[i] = {"img":self.output_info[sorted(contest[i])[-1][1]]["img"],\ "score":sorted(contest[i])[-1][0],\ "num":sorted(contest[i])[-1][1],\ "state":self.output_info[sorted(contest[i])[-1][1]]["state"]} def generate_unscaled_imgs(self, filter_sigma = .8, half_dimension = 5000): self.generate_curves(); self.unscaled_imgs = np.zeros(shape = (16, 28, 28)) for i in range(16): curve = self.curves[i]; curve.rotate(self.output_info[i]["rotations"]) img = curve.generate_image_v2(filter_sigma = filter_sigma, half_dimension = half_dimension) self.unscaled_imgs[i] = img def plot_output_map(self, score = True, save = False): state_count = pow(2, self.actuator_types) if state_count != 16: raise WrongDesignType() title_name = ["ALL OFF", "S1 ON", "S2 ON", "S1 & S2", "S3 ON", "S1 & S3", "S2 & S3", "S1 & S2 & S3", "S4 ON", "S1 & S4", "S2 & S4", "S1 & S2 & S4", "S3 & S4", "S1 & S3 & S4", "S2 & S3 & S4", "ALL ON"] fig = plt.figure(figsize = (6,6)); ctr = 0 self.generate_curves() for i in range(state_count): plt.subplot(4, 4, ctr+1) curve = self.curves[i]; img = curve.generate_image(filter = 'Gaussian') plt.imshow(img) plt.title(title_name[i], fontsize = 10, family = "serif", weight = "bold", y = .95) if score: scores = cnn_digit_model.predict(img.reshape(1,28,28,1))[0] plt.text(img.shape[1]*.05, img.shape[1]*.9, "{}: {:.3f}".format(np.argmax(scores), np.max(scores)), fontsize = 12, family = "serif", weight = "bold", color = "white") plt.axis(False); ctr += 1 fig.suptitle("Output Map", fontsize = 12, family = "serif", weight = "bold", y = .95) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_output.png", dpi = 600) plt.show() def plot_advanced_output_map(self, rotation = 20, save = False): """ - Enables rotation and mirroring to search larger space - Enables simultaneous presense of 6 and 9 """ if not self.output_info or rotation != 20: self.analysis(rotation = rotation) fig = plt.figure(figsize = (6,6)); ctr = 0 for i in range(pow(2, self.actuator_types)): plt.subplot(4, 4, ctr+1) img = self.output_info[ctr]["img"] plt.imshow(img) plt.title(self.output_info[ctr]["state"], fontsize = 10, family = "serif", weight = "bold", y = .95) plt.text(img.shape[1]*.05, img.shape[1]*.9, "{}: {:.3f}".format(self.output_info[ctr]["dig"], self.output_info[ctr]["score"]), fontsize = 12, family = "serif", weight = "bold", color = "white") plt.axis(False); ctr += 1 fig.suptitle("Output Map", fontsize = 12, family = "serif", weight = "bold", y = .95) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_advancedoutput.png", dpi = 600) plt.show() def plot_selected_output_map(self, rotation = 20, save = False): if not self.selected or rotation != 20: self.analysis(rotation = rotation) fig = plt.figure(figsize = (10, 4)) for i in range(10): plt.subplot(2, 5, i+1) if i in self.selected.keys(): img = self.selected[i]['img'] plt.imshow(img) plt.text(img.shape[1]*.05, img.shape[1]*.9, "Score: {:.3f}".format(self.selected[i]['score']), fontsize = 12, family = "serif", weight = "bold", color = "white") plt.title("{} ({})".format(i, self.selected[i]['state']), fontsize = 10, family = "serif", weight = "bold", y = .98) else: plt.imshow(np.zeros(shape = (28, 28))) plt.title("{} (n.a.)".format(i), fontsize = 10, family = "serif", weight = "bold", y = .98) plt.axis(False) fig.suptitle("Optimized Output Map", fontsize = 15, family = "serif", weight = "bold", y = .98) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_selectedoutput.png", dpi = 600) plt.show() def plot_input_and_all(self, rotation = 20, save = False): if not self.selected or rotation != 20: self.analysis(rotation = rotation) fig = plt.figure(figsize=(12,6)) ax = plt.subplot(1,2,1) fig_width = int(np.sum(self.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(self.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 12, color = "white", family = "serif", weight = "bold") for i in range(len(self.segment_lengths)): ax.annotate("%dum"%(self.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(self.segment_lengths[0:i])), xytext=(fig_width-strip_width*5, strip_width*1.5+np.sum(self.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 12, color = "white", family = "serif", weight = "bold") plt.title("Input Design", fontsize = 15, family = "serif", weight = "bold") plt.axis(False) ctr = 0; shiftlist = [5,5,5,5,9,9,9,9,13,13,13,13,17,17,17,17] for i in range(pow(2, self.actuator_types)): plt.subplot(4, 8, ctr + shiftlist[ctr]) img = self.output_info[ctr]["img"] plt.imshow(img) plt.title(self.output_info[ctr]["state"], fontsize = 10, family = "serif", weight = "bold", y = .95) plt.text(img.shape[1]*.05, img.shape[1]*.9, "{}: {:.3f}".format(self.output_info[ctr]["dig"], self.output_info[ctr]["score"]), fontsize = 12, family = "serif", weight = "bold", color = "white") plt.axis(False); ctr += 1 fig.suptitle("Design Input and Output Map", fontsize = 15, family = "serif", weight = "bold", y = .95) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_inandoutput.png", dpi = 600) plt.show() def plot_input_and_selected(self, rotation = 20, save = False): if not self.selected or rotation != 20: self.analysis(rotation = rotation) fig = plt.figure(figsize=(16,4)) ax = plt.subplot(1,4,1) fig_width = int(np.sum(self.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(self.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(self.segment_lengths[0:i])),strip_width,self.segment_lengths[i], color = cm.colors[self.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 11, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 11, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 11, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 11, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 11, color = "white", family = "serif", weight = "bold") for i in range(len(self.segment_lengths)): ax.annotate("%dum"%(self.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(self.segment_lengths[0:i])), xytext=(fig_width-strip_width*5, strip_width*1.5+np.sum(self.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 11, color = "white", family = "serif", weight = "bold") plt.title("Input Design", fontsize = 15, family = "serif", weight = "bold") plt.axis(False) plt.subplots_adjust(wspace = 0.01, hspace = .2) shiftlist = list(range(3,8))+list(range(10,15)) for i in range(10): plt.subplot(2, 7, shiftlist[i]) if i in self.selected.keys(): img = self.selected[i]['img'] plt.imshow(img) plt.text(img.shape[1]*.05, img.shape[1]*.9, "Score: {:.3f}".format(self.selected[i]['score']), fontsize = 12, family = "serif", weight = "bold", color = "white") plt.title("{} ({})".format(i, self.selected[i]['state']), fontsize = 12, family = "serif", weight = "bold", y = .98) else: plt.imshow(np.zeros(shape = (28, 28))) plt.title("{} (n.a.)".format(i), fontsize = 12, family = "serif", weight = "bold", y = .98) plt.axis(False) fig.suptitle("Design Input and Optimized Output Map", fontsize = 16, family = "serif", weight = "bold", y = 1.05) plt.subplots_adjust(wspace = 0.01, hspace = .2) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_selectedoutput.png", dpi = 600) plt.show() def save_imgs(self, rotation = 20, path = os.getcwd()): if not self.output_info or rotation != 20: self.analysis(rotation = rotation) for i in range(16): try: objt = self.output_info; foldername = "d{}".format(objt[i]["dig"]); filename = datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S_%f") + "_output.png"; plt.imsave("/".join((path, foldername, filename)), objt[i]["img"]) except: continue # segment_lengths_ex_four_types = [938, 954, 1022, 1333, 1333] # segment_identities_ex_four_types = [[2,3,2,2,3],[1,2,4,4,3]] # a = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) # # a.generate_curves() # cnn_digit_model = load_model("CNN_dig_RSQ2_v1.h5") # a.plot_input_and_all() # a.plot_input_and_selected() # - # # create folders for digit images saving # for i in range(11): # os.mkdir("d{}".format(i)) # print("Folders Made!") # + [markdown] colab_type="text" id="ornIrJb7K_Kh" # ## Actuator System Parameters # # **Radius of Curvature** # # |S1.2 RoC [um]|1 OFF Second round|1 ON/ OTHER OFF| # |---|---|---| # |S1.2|nc |- | # |S2.2|1430|670+100=770| # |S5 |1430|625+100=725| # |S6.2|1430|590+100=690| # |No DNA|assume 5e3|assume 630+100=730| # # |S2.2 RoC [um]|BOTH OFF|2 ON/ OTHER OFF| # |---|---|---| # |S1.2|1540|670+150=820| # |S2.2|nc|-| # |S5 |1250|500+150=650| # |S6.2|1430|545+150=695| # |No DNA|assume 5e3|assume 590+150=740| # # |S5 RoC [um]|BOTH OFF|3 ON/ OTHER OFF| # |---|---|---| # |S1.2|1200|570| # |S2.2|1550|667| # |S5 |nc|-| # |S6.2|1250|830| # |No DNA|assume 5e3|assume 770| # # |S6.2 RoC [um]|BOTH OFF|4 ON/ OTHER OFF| # |---|---|---| # |S1.2|1060|500| # |S2.2|1250|667| # |S5 |2000|670| # |S6.2|nc|-| # |No DNA|assume 5e3|assume 630| # # **RoC Matrix** # # *Definition* # # - RoC bending downward is positive, bending upward (ex. shape U) negative. # - **nc**: for no curvature = 1e5, **-**: for no value, but still put in 1e8 to avoid confusing the table. # - Lacking most data for pos_pos matrix, for systems sharing similar swelling behavior (sys 1, 2, 4) when both swollen, assume RoC = nc; for system 3, that swells a lot, assume slight bending caused and value = 3500um. # # |top neg, bot neg|0|1|2|3|4| # |---|---|---|---|---|---| # |0|nc |5e3|5e3|5e3|5e3| # |1|-5e3|nc |nc |nc |nc | # |2|-5e3|nc |nc |nc |nc | # |3|-5e3|nc |nc |nc |nc | # |4|-5e3|nc |nc |nc |nc | # # |top pos, bot pos|0|1|2|3|4| # |---|---|---|---|---|---| # |0|-|-|-|-|-| # |1|-|nc|*assumed nc*|3500|*assumed nc*| # |2|-|*assumed nc*|nc|3500|*assumed nc*| # |3|-|-3500|-3500|nc|-3500| # |4|-|*assumed nc*|*assumed nc*|3500|nc| # # |top pos, bot neg|0|1|2|3|4| # |---|---|---|---|---|---| # |0|-|730|740|770|630| # |1|-|- |820|570|500| # |2|-|770|- |667|667| # |3|-|725|650|- |670| # |4|-|690|695|830|- | # # |top neg, bot pos|0|1|2|3|4| # |---|---|---|---|---|---| # |0|- |- |- |- |- | # |1|-730|- |-770|-725|-690| # |2|-740|-820|- |-650|-695| # |3|-770|-570|-667|- |-830| # |4|-630|-500|-667|-670|- | # + [markdown] colab_type="text" id="f4Wj4bxr_wLX" # **Delta Contour Lengths** # # ||S1.2 Contour Length| # |--- |---| # |S1.2|from monolayer (second round), 0.45| # |S2.2|0.24| # |S5 |0.18| # |S6.2|0.19| # # ||S2.2 Contour Length| # |--- |---| # |S1.2|0.16| # |S2.2|from monolayer (second round), (0.45)| # |S5 |0.2| # |S6.2|0.23| # # ||S5 Contour Length| # |--- |---| # |S1.2|0.32| # |S2.2|0.59| # |S5 |from monolayer (second round), 0.7| # |S6.2|0.32| # # ||S6.2 Contour Length| # |--- |---| # |S1.2|0.25| # |S2.2|0.5| # |S5 |0.35| # |S6.2|from monolayer (second round), 0.4| # # **Contour Matrix** # # - for pos_pos, use the value from monolayer for diagonals, and slightly uptune/downtune for paired systems. # - for pos_neg, downtune .2 for connection with no-DNAs. # # |top neg, bot neg|0|1|2|3|4| # |---|---|---|---|---|---| # |0|1|1|1|1|1 # |1|1|1|1|1|1 # |2|1|1|1|1|1 # |3|1|1|1|1|1 # |4|1|1|1|1|1 # # |top pos, bot pos|0|1|2|3|4| # |---|---|---|---|---|---| # |0|-|-|-|-|-| # |1|-|1.45|assume 1.4|assume 1.55|assume 1.42| # |2|-|assume 1.4|assume 1.4|assume 1.5|assume 1.4| # |3|-|assume 1.55|assume 1.5|1.75|assume 1.55| # |4|-|assume 1.42|assume 1.4|assume 1.55|1.45| # # |top pos, bot neg|0|1|2|3|4| # |---|---|---|---|---|---| # |0|-|assume 1.22|assume 1.14|assume 1.3|assume 1.33| # |1|-| -|1.16|1.32|1.25| # |2|-|1.24|- |1.59|1.50| # |3|-|1.18|1.20|- |1.35| # |4|-|1.19|1.23|1.32|- | # # |top neg, bot pos|0|1|2|3|4| # |---|---|---|---|---|---| # |0|- | -| -| -| -| # |1|assume 1.22| - |1.24|1.18|1.19| # |2|assume 1.14|1.16|- |1.20|1.23| # |3|assume 1.30|1.32|1.59|- |1.32| # |4|assume 1.33|1.25|1.50|1.35|- | # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="kdRDykZ_cEvU" outputId="aa1266df-ffbb-4b1f-cad1-0277ebd0c7a7" # Set up the RoC matrix actuator_types = 4; nc = 1e5; na = 1e8 neg_neg_rocs = [[nc, 5e3, 5e3, 5e3, 5e3],[-5e3, nc, nc, nc, nc],[-5e3, nc, nc, nc, nc],[-5e3, nc, nc, nc, nc],[-5e3, nc, nc, nc, nc]] pos_pos_rocs = [[na, na, na, na, na],[na, nc, nc, 3500, nc],[na, nc, nc, 3500, nc],[na, -3500, -3500, nc, -3500],[na, nc, nc, 3500, nc]] pos_neg_rocs = [[na, 730, 740, 770, 630],[na, na, 820, 570, 500],[na, 770, na, 667, 667],[na, 725, 650, nc, 670],[na, 690, 695, 830, na]] neg_pos_rocs = [[na, na, na, na, na],[-730, na, -770, -725, -690],[-740, -820, na, -650, -695],[-770, -570, -667, na, -830],[-630, -500, -667, -670, na]] four_t_rocs = [[neg_neg_rocs, neg_pos_rocs],[pos_neg_rocs, pos_pos_rocs]] # Set up the ContourLength matrix na = 1 neg_neg_ctls = [[1, 1, 1, 1, 1],[1, 1, 1, 1, 1],[1, 1, 1, 1, 1],[1, 1, 1, 1, 1],[1, 1, 1, 1, 1]] pos_pos_ctls = [[na, na, na, na, na],[na, 1.45, 1.4, 1.55, 1.42],[na, 1.4, 1.4, 1.5, 1.4],[na, 1.55, 1.5, 1.75, 1.55],[na, 1.42, 1.4, 1.55, 1.45]] pos_neg_ctls = [[na, 1.22, 1.14, 1.3, 1.33],[na, na, 1.16, 1.32, 1.25],[na, 1.24, na, 1.59, 1.50],[na, 1.18, 1.20, nc, 1.35],[na, 1.19, 1.23, 1.32, na]] neg_pos_ctls = [[na, na, na, na, na],[1.22, na, 1.24, 1.18, 1.19],[1.14, 1.16, na, 1.20, 1.23],[1.3, 1.32, 1.59, na, 1.32],[1.33, 1.25, 1.50, 1.35, na]] four_t_ctls = [[neg_neg_ctls, neg_pos_ctls],[pos_neg_ctls, pos_pos_ctls]] # - segment_lengths_ex_four_types = [450, 900, 1189, 1355, 900] segment_identities_ex_four_types = [[4,3,3,1,4],[2,1,2,2,2]] a = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") # a.generate_curves() a.plot_input_design() a.plot_output_map(score=False) # a.plot_input_and_all() # ## Evolution History of the Project # + # Sample random design # max_seg = 7 # segment_lengths_ex_four_types = np.random.random(size = (max_seg,)) * 500 + 600 # segment_identities_ex_four_types = np.random.randint(0, high=(4 + 1),size = (2, max_seg)) # print(segment_lengths_ex_four_types) # print(segment_identities_ex_four_types) # Sample Design # 447.00237374 907.26817329 1176.51880725 1355.23921038 894.26759248] segment_lengths_ex_four_types = [938, 954, 1022, 843, 931, 722, 702, 655, 1066, 947] segment_identities_ex_four_types = [[2,3,2,3,2,3,4,0,1,4],[4,4,3,1,3,4,4,1,3,2]] b = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) # a.generate_curves() # set model for classification # CNN_dig_v1, CNN_dig_RSQ2_v1 # a.plot_input_design(save = True) # print("In the beginning, we started with MNIST trained CNN, but has low accuracy.") # cnn_digit_model = load_model("CNN_dig_v1.h5") # a.plot_output_map(score = True, save = False) # print("We later added strip data to improve accuracy and enable random squiggle identification.") # cnn_digit_model = load_model("CNN_dig_v1.h5") # a.plot_input_design(save = False) # a.plot_output_map(score = False, save = False) # a.plot_output_map(score = True, save = False) # print("We further increased the searching space vi`a rotation and mirroring") # a.plot_input_and_all(rotation = 10, save = False) # a.plot_input_and_selected(rotation = 20, save = False) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 521} colab_type="code" id="xZDseXUVkL30" outputId="201d8abf-ea81-47f6-bff7-ea80e4eb812c" # ## Design Parameters # # 1. **6 (8) segments** # 2. **min seg length = 1000 um(or 800)** # # Are able to form some interesting structures. # - # **loss function to be minimized** # + def loss_on_dig_sim_var_v2(strip, rotation = 20): """ - loss on digit similarity and variety - default 20 rotations - Final design score list would be stored in a np array, documenting the max score for each digit. - Reconsider np.log as log would greatly lock into one design for a perfect 1 or 7, as log(1-1) = -inf The loss is then calculated by the following: digits_for_score = np.log(1 - digit_recs) * 5000 * digits_form) loss = np.sum(digits_for_score) Where we're taking the log of each value's distance against one, and scaling with the digit numbers that it can form. For design that can form more digit number and higher similairy, they can get lower loss. """ strip.analysis(rotation = rotation); output = strip.selected dig_score = np.zeros(shape=(10, )) if output.keys(): for i in output.keys(): dig_score[i] = output[i]["score"] # did 1.001 - dig_score as -inf often appears when 1 is in dig_score loss = np.sum(np.log(1.001 - dig_score) * 5000 * np.count_nonzero(dig_score)) return loss loss_on_dig_sim_var_v2(b, rotation = 4) # -2080099.4708855439 if 0, messed up, restart kernal # - # **NEw update Function for mutation that limits fab complexity** # + def fab_steps_strip_requires(identities): top_steps = np.size(np.unique(identities[0])) bottom_steps = np.size(np.unique(identities[1])) return top_steps + bottom_steps def sp_mutation_maxfab_v1(parent_strip, max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * (max_segment_length - min_segment_length) + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") satisfy_max_fab = False while satisfy_max_fab == False: identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] if (fab_steps_strip_requires(new_identities) <= max_fab_steps): satisfy_max_fab = True offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip # b = sp_mutation_v2(a, 100, 600, four_t_rocs, four_t_ctls, 4) # print(a.identities) # print(b.identities) # print(a.segment_lengths) # print(b.segment_lengths) # - # **Update Function for mutation -- old, no restrictions on fab complexity** # + def sp_mutation_v2(parent_strip, max_segments, min_segment_length,\ rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * min_segment_length + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip # b = sp_mutation_v2(a, 100, 600, four_t_rocs, four_t_ctls, 4) # print(a.identities) # print(b.identities) # print(a.segment_lengths) # print(b.segment_lengths) # - # **Genetic Algorithm for Optimizing** def strip_optimizer_gen_alg_v3(rocs, ctls, loss_function, mutation_function, save_filename,\ max_segments = 10, min_segment_length = 600,\ max_segment_length = 2000, max_fab_steps = 6,\ population_size = 20, generation_limit = 2500): """ Genetic Algorithm for Strip Optimizer 1. Creates 100 (or 20 for debug) different random initial design. 2. Score them based on the loss function and get a sum of pupulation score. 3. Kill 80% of the population who has score in the last 80%. 4. Mutate the survivor and rescore them. 5. Goes on and on and on until reaches generation limit """ def plot_best_fives(population_loss, strip_population, SAVE = False): """ plots 5 best strip design for overall visualization """ best5s = np.argsort(population_loss)[:5] for best_num in np.flip(best5s): best_strip = strip_population[best_num] print("Actuator segments are " + str(best_strip.identities)) print("Segment lengths are " + str(best_strip.segment_lengths)) print("Loss is " + str(population_loss[best_num])) # best_strip.plot_input_and_all(rotation = 20, save = False) best_strip.plot_input_and_selected(rotation = 20, save = SAVE) def save_best_tens(filename, mode, population_loss, strip_population, gen): """ save 10 best strip design of each generation. """ with open(filename, mode) as f: f.write("Generation {}\n".format(gen)) best10s = np.argsort(population_loss)[:10] for best_num in np.flip(best10s): best_strip = strip_population[best_num] formed = list(best_strip.selected.keys()) f.write("Segments: {}\nIdentities: {}\nFormed: {}\n\n".format(str(best_strip.segment_lengths),str(best_strip.identities),str(formed))) itr = 0; num_actuator_types = len(rocs[0][0]) - 1 # generate initial population population_lengths = np.random.random(size = (population_size, max_segments)) * (max_segment_length - min_segment_length) + min_segment_length population_identities = np.random.randint(0, high = (num_actuator_types + 1), size = (population_size, 2, max_segments)) for i in range(population_size): found_fabbable = False #print("Generating Step " + str(i)) while found_fabbable == False: population_identities[i]=np.random.randint(0, high = (num_actuator_types + 1), size = (2, max_segments)) if fab_steps_strip_requires(population_identities[i]) <= max_fab_steps: found_fabbable = True strip_population = [ActuatorStrip(population_lengths[num], population_identities[num], four_t_rocs, four_t_ctls) for num in range(population_size)] [strip.generate_curves() for strip in strip_population] population_loss = [loss_function(strip) for strip in strip_population] [strip.save_imgs() for strip in strip_population] # plot 5 best individuals for visualization plot_best_fives(population_loss, strip_population, SAVE = False) # save 5 best individuals save_best_tens(save_filename, "w", population_loss, strip_population, itr) while itr < generation_limit: itr += 1 # evolution and mutation print("Evolution {}".format(itr)) # kills 80% of the population survivors = np.argsort(population_loss)[:int(np.floor(population_size/5))] print("Survivors: " + str(survivors)) print("Survivor loss: " + str([population_loss[i] for i in survivors])) # mutation and creates 4 offspring for each survivor new_population = [mutation_function(strip_population[survivor_id], max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types) for survivor_id in survivors for _ in range(4)] [strip.generate_curves() for strip in new_population] # Add survivors to new population [new_population.append(strip_population[survivor_id]) for survivor_id in survivors]; new_loss = [loss_function(strip) for strip in new_population] [strip.save_imgs() for strip in new_population] # plot 5 best individuals for visualization if itr == generation_limit: plot_best_fives(new_loss, new_population, SAVE = True) else: plot_best_fives(new_loss, new_population, SAVE = False) save_best_tens(save_filename, "a+", new_loss, new_population, itr) print("delta population loss: %.4f"%(np.sum(new_loss)-np.sum(population_loss))) population_loss = new_loss; strip_population = new_population # + colab={} colab_type="code" id="mGOssv6qov-7" # if __name__ == "__main__": # # load model for classification # # cnn_digit_model = load_model("DigitClassifier_20201014_mod3.h5") # # genetic algorithm start! # for i in range(5): # filename = datetime.datetime.now().strftime("%Y%m%d_%H%M")+"_seg8fabsteps5.txt".format(i) # strip_optimizer_gen_alg_v3(four_t_rocs, four_t_ctls,\ # loss_on_dig_sim_var_v2, sp_mutation_maxfab_v1, filename,\ # max_segments = 5, min_segment_length = 400,\ # max_segment_length = 1600, max_fab_steps = 5, \ # population_size = 20, generation_limit = 50) # - # # Converged Results and Useful Functions # *** # + colab={} colab_type="code" id="K5em068Ck_7t" def result_visualizer(result): datalist = result.replace("[","",6).replace("]","",6).split() Segments = 0; Identities = 0; sl = []; for i in datalist: if i == 'Segments:': Segments = 1 elif i == 'Identities:': Segments = 0; Identities = 1; idts = np.zeros(shape = (2, len(sl)), dtype = int) elif i == 'Formed:': Identities = 0 elif Identities > len(sl): idts[1][Identities-1-len(sl)] = i; Identities += 1 elif Identities: idts[0][Identities-1] = i; Identities += 1 if Segments and i != 'Segments:': sl.append(float(i)) s1 = ActuatorStrip(sl, idts, four_t_rocs, four_t_ctls) return s1 # - def ultimate_plotter(teststrip, digit_order, rotate_angle, score_index,\ test = False, save = False): teststrip.generate_curves() shiftlist = [5,5,5,5,9,9,9,9,13,13,13,13,17,17,17,17] statelist = ["ALL OFF", "S1 ON", "S2 ON", "S1 & S2", "S3 ON", "S1 & S3", "S2 & S3", "S1 & S2 & S3", "S4 ON", "S1 & S4", "S2 & S4", "S1 & S2 & S4", "S3 & S4", "S1 & S3 & S4", "S2 & S3 & S4", "ALL ON"] fig = plt.figure(figsize = (12, 6)) ax = plt.subplot(1, 2, 1) if not test: fig_width = int(np.sum(teststrip.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(teststrip.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 12, color = "white", family = "serif", weight = "bold") for i in range(len(teststrip.segment_lengths)): ax.annotate("%dum"%(teststrip.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])), xytext=(fig_width-strip_width*5, strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 12, color = "white", family = "serif", weight = "bold") plt.title("Input Design", fontsize = 15, family = "serif", weight = "bold") plt.axis(False) ctr = 0; for i in range(16): ax = plt.subplot(4, 8, ctr + shiftlist[ctr]) curve = teststrip.curves[digit_order[i]]; curve.rotate(rotate_angle[i]*math.pi/180) img = curve.generate_image(filter = 'Gaussian') plt.imshow(img) plt.title(statelist[digit_order[i]], fontsize = 10, family = "serif", weight = "bold", y = .95) if i < 10: plt.plot(range(28),[0]*28, lw = 4, color = "#ffdf2b") plt.plot(range(28),[27]*28, lw = 4, color = "#ffdf2b") plt.plot([0]*28,range(28), lw = 4, color = "#ffdf2b") plt.plot([27]*28,range(28), lw = 4, color = "#ffdf2b") scores = cnn_digit_model.predict(img.reshape(1,28,28,1))[0] plt.text(img.shape[1]*.05, img.shape[1]*.9, "{}: {:.3f}".format(np.argsort(scores)[-score_index[i]], np.sort(scores)[-score_index[i]]), fontsize = 9, family = "serif", weight = "bold", color = "white") plt.axis(False); ctr += 1 fig.suptitle("Design Input and Output Map", fontsize = 15, family = "serif", weight = "bold", y = .95) if save: plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S") + "_inandoutput.png", dpi = 600) plt.show() # + import cv2 def imflatfield(I, sigma): """ Python equivalent imflatfield implementation I format must be BGR and type of I must be uint8 """ A = I.astype(np.float32) / 255 # A = im2single(I); Ihsv = cv2.cvtColor(A, cv2.COLOR_RGB2HSV) # Ihsv = rgb2hsv(A); A = Ihsv[:, :, 2] # A = Ihsv(:,:,3); filterSize = int(2 * np.ceil(2 * sigma) + 1); # filterSize = 2*ceil(2*sigma)+1; # shading = imgaussfilt(A, sigma, 'Padding', 'symmetric', 'FilterSize', filterSize); % Calculate shading shading = cv2.GaussianBlur(A, (filterSize, filterSize), sigma, borderType = cv2.BORDER_REFLECT) meanVal = np.mean(A) # meanVal = mean(A(:),'omitnan') #% Limit minimum to 1e-6 instead of testing using isnan and isinf after division. shading = np.maximum(shading, 1e-6) # shading = max(shading, 1e-6); B = A * meanVal / shading # B = A*meanVal./shading; #% Put processed V channel back into HSV image, convert to RGB Ihsv[:, :, 2] = B # Ihsv(:,:,3) = B; B = cv2.cvtColor(Ihsv, cv2.COLOR_HSV2RGB) # B = hsv2rgb(Ihsv); B = np.round(np.clip(B*255, 0, 255)).astype(np.uint8) # B = im2uint8(B); return B def image_flat_field(img, sigma = 30): out2 = imflatfield(img, sigma) # Conver out2 to float32 before converting to LAB out2 = out2.astype(np.float32) / 255 # out2 = im2single(out2); shadow_lab = cv2.cvtColor(out2, cv2.COLOR_BGR2Lab) # shadow_lab = rgb2lab(out2); max_luminosity = 100 L = shadow_lab[:, :, 0] / max_luminosity # L = shadow_lab(:,:,1)/max_luminosity; shadow_adapthisteq = shadow_lab.copy() # shadow_adapthisteq = shadow_lab; # shadow_adapthisteq(:,:,1) = adapthisteq(L)*max_luminosity; clahe = cv2.createCLAHE(clipLimit=20, tileGridSize=(8,8)) cl1 = clahe.apply((L*(2**16-1)).astype(np.uint16)) # CLAHE in OpenCV does not support float32 (convert to uint16 and back). shadow_adapthisteq[:, :, 0] = cl1.astype(np.float32) * max_luminosity / (2**16-1) shadow_adapthisteq = cv2.cvtColor(shadow_adapthisteq, cv2.COLOR_Lab2BGR) # shadow_adapthisteq = lab2rgb(shadow_adapthisteq); # Convert shadow_adapthisteq to uint8 shadow_adapthisteq = np.round(np.clip(shadow_adapthisteq*255, 0, 255)).astype(np.uint8) # B = im2uint8(B); return shadow_adapthisteq # - # ## Even Numbers # + idts = [[4, 1, 4, 4, 4],[2, 2, 3, 2, 2]] sl = [1653, 1606, 1412, 1769, 1013] cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") teststrip = ActuatorStrip(sl, idts, four_t_rocs, four_t_ctls) # - teststrip.plot_selected_output_map() teststrip.plot_output_map(score=False) teststrip.generate_unscaled_imgs(filter_sigma=.25) # + fig = plt.figure(figsize = (18, 6)) ax = plt.subplot(1, 4, 1) fig_width = int(np.sum(teststrip.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(teststrip.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 12, color = "white", family = "serif", weight = "bold") for i in range(len(teststrip.segment_lengths)): ax.annotate("%dum"%(teststrip.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])),\ xytext=(fig_width-strip_width*5.5, strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 12, color = "white", family = "serif", weight = "bold") plt.title("Even Digit Transformer\n Input Design",\ linespacing = 1.5, fontsize = 15, family = "serif", weight = "bold") plt.axis(False); plt.subplots_adjust(wspace = 0.2, hspace = 0.2) for i in range(5): ax = plt.subplot(3, 8, 19 + i) ax.set_xticks([]); ax.set_yticks([]) plt.imshow(teststrip.selected[int(i*2)]["img"], cmap = "gray") # plt.title("Digit {}".format(int(i*2)),\ # y = .96, fontsize = 14, family = "serif", weight = "bold") if i == 0: plt.ylabel("Model\nEvaluation", fontsize = 12, family = "serif", weight = "bold") plt.text(28*.3, 28*.93, "{}: {:.3f}".format(int(i*2), teststrip.selected[int(i*2)]["score"]),\ fontsize = 12, family = "serif", weight = "bold", color = "white") for i in range(5): ax = plt.subplot(3, 8, 11 + i) ax.set_xticks([]); ax.set_yticks([]) num = teststrip.selected[int(i*2)]["num"] plt.imshow(teststrip.unscaled_imgs[num], cmap = "gray") plt.title("State: {}".format(teststrip.selected[int(i*2)]["state"]),\ y = .96, fontsize = 13, family = "serif", weight = "bold") if i == 0: plt.ylabel("Digit\nSimulation", fontsize = 12, family = "serif", weight = "bold") ax.add_patch(matplotlib.patches.Rectangle((20, 23), 5.6, 1, color = "white")) ax.text(18, 27, "2 mm", fontsize = 10, color = "white", family = "serif", weight = "bold") # plt.text(28*.01, 28*.93, "rotate: {:.0f}$^o$".format(360/np.pi/2*teststrip.output_info[num]["rotations"]),\ # fontsize = 12, family = "serif", weight = "bold", color = "white") for i in range(5): ax = plt.subplot(3, 8, 3 + i) ax.set_xticks([]); ax.set_yticks([]) img = plt.imread("Exp2/{}.jpg".format(int(i*2))) pl = 70 p50, p98 = np.percentile(img, (pl, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(img, (pl, 99)) img = exposure.rescale_intensity(img, in_range = (p50, p98)) plt.imshow(img) if i == 0: plt.ylabel("Experiment\nData", fontsize = 14, family = "serif", weight = "bold") plt.title("Digit {}".format(int(i*2)),\ y = .96, fontsize = 14, family = "serif", weight = "bold") # plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S") + "fig.png", dpi = 900) plt.show() # - # ## Odd Numbers # + idts = [[1, 2, 1, 3, 1],[2, 4, 2, 2, 2]] sl = [1898, 1138, 1635, 1069, 1199] cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") teststrip = ActuatorStrip(sl, idts, four_t_rocs, four_t_ctls) # - teststrip.plot_selected_output_map() teststrip.plot_output_map(score=False) teststrip.generate_unscaled_imgs(filter_sigma=.2) # + fig = plt.figure(figsize = (18, 6)) ax = plt.subplot(1, 4, 1) fig_width = int(np.sum(teststrip.segment_lengths) * 1.2); strip_width = int(fig_width/21); shift = int(fig_width*.6) cm = plt.cm.get_cmap('tab20') ax.imshow(np.ones(shape=(fig_width, fig_width)), cmap = "tab20b") for i in range(len(teststrip.segment_lengths)): ax.add_patch(matplotlib.patches.Rectangle((fig_width/2-strip_width,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[0][i]])) ax.add_patch(matplotlib.patches.Rectangle((fig_width/2,strip_width+np.sum(teststrip.segment_lengths[0:i])),strip_width,teststrip.segment_lengths[i], color = cm.colors[teststrip.identities[1][i]])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, shift), strip_width*3, strip_width, color = cm.colors[0])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*1.5+shift), strip_width*3, strip_width, color = cm.colors[1])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*3+shift), strip_width*3, strip_width, color = cm.colors[2])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*4.5+shift), strip_width*3, strip_width, color = cm.colors[3])) ax.add_patch(matplotlib.patches.Rectangle((strip_width, strip_width*6+shift), strip_width*3, strip_width, color = cm.colors[4])) ax.text(shift/2.8, strip_width*1+shift, "Sys0", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*2.5+shift, "Sys1", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*4+shift, "Sys2", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*5.5+shift, "Sys3", fontsize = 12, color = "white", family = "serif", weight = "bold") ax.text(shift/2.8, strip_width*7+shift, "Sys4", fontsize = 12, color = "white", family = "serif", weight = "bold") for i in range(len(teststrip.segment_lengths)): ax.annotate("%dum"%(teststrip.segment_lengths[i]), xy=(fig_width/2+strip_width,strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])),\ xytext=(fig_width-strip_width*5.5, strip_width*1.5+np.sum(teststrip.segment_lengths[0:i])),\ arrowprops = dict(arrowstyle="-|>", color="white"), fontsize = 12, color = "white", family = "serif", weight = "bold") plt.title("Odd Digit Transformer\n Input Design",\ linespacing = 1.5, fontsize = 15, family = "serif", weight = "bold") plt.axis(False); plt.subplots_adjust(wspace = 0.2, hspace = 0.2) for i in range(5): ax = plt.subplot(3, 8, 19 + i) ax.set_xticks([]); ax.set_yticks([]) plt.imshow(teststrip.selected[int(i*2+1)]["img"], cmap = "gray") # plt.title("Digit {}".format(int(i*2)),\ # y = .96, fontsize = 14, family = "serif", weight = "bold") if i == 0: plt.ylabel("Model\nEvaluation", fontsize = 12, family = "serif", weight = "bold") plt.text(28*.3, 28*.93, "{}: {:.3f}".format(int(i*2+1), teststrip.selected[int(i*2+1)]["score"]),\ fontsize = 12, family = "serif", weight = "bold", color = "white") for i in range(5): ax = plt.subplot(3, 8, 11 + i) ax.set_xticks([]); ax.set_yticks([]) num = teststrip.selected[int(i*2+1)]["num"] plt.imshow(teststrip.unscaled_imgs[num], cmap = "gray") plt.title("State: {}".format(teststrip.selected[int(i*2+1)]["state"]),\ y = .96, fontsize = 13, family = "serif", weight = "bold") if i == 0: plt.ylabel("Digit\nSimulation", fontsize = 12, family = "serif", weight = "bold") ax.add_patch(matplotlib.patches.Rectangle((20, 23), 5.6, 1, color = "white")) ax.text(18, 27, "2 mm", fontsize = 10, color = "white", family = "serif", weight = "bold") # plt.text(28*.01, 28*.93, "rotate: {:.0f}$^o$".format(360/np.pi/2*teststrip.output_info[num]["rotations"]),\ # fontsize = 12, family = "serif", weight = "bold", color = "white") for i in range(5): ax = plt.subplot(3, 8, 3 + i) ax.set_xticks([]); ax.set_yticks([]) img = plt.imread("Exp3/{}.jpg".format(int(i*2+1))) pl = 70 p50, p98 = np.percentile(img, (pl, 98)) if p98 == 0 and p50 == 0: p50, p98 = np.percentile(img, (pl, 99)) img = exposure.rescale_intensity(img, in_range = (p50, p98)) plt.imshow(img) if i == 0: plt.ylabel("Experiment\nData", fontsize = 14, family = "serif", weight = "bold") plt.title("Digit {}".format(int(i*2+1)),\ y = .96, fontsize = 14, family = "serif", weight = "bold") # plt.savefig(datetime.datetime.now().strftime("%Y%m%d_%H_%M_%S") + "fig.png", dpi = 900) plt.show() # - # + # # Robustness test and local optimizer # idts = [[2,3,4,0,3,2],[0,1,3,0,2,2]] # sl = [1500, 1690, 1330, 1230, 1400, 1320] # cnn_digit_model = load_model("DigitClassifier_20201014_mod3.h5") # val = loss_on_dig_sim_var_v2(ActuatorStrip(sl, idts, four_t_rocs, four_t_ctls)) # population = 50 # sl_pop = np.random.normal(sl, scale = 200, size = (population, 6)) # lo_pop = [loss_on_dig_sim_var_v2(ActuatorStrip(i, idts, four_t_rocs, four_t_ctls)) for i in sl_pop] # fig = plt.figure(figsize = (6, 6)) # plt.title("Loss Population", fontsize = 15, family = "serif", weight = "bold") # plt.hist(lo_pop) # plt.axvline(val, color = "k", ls = "--") # plt.xticks(family = "serif", fontsize = 13, weight = 'bold') # plt.yticks(family = "serif", fontsize = 13, weight = 'bold') # plt.show() # - # ## 6 Segmenters # + # perfect idts = [[2,3,4,0,3,2],[0,1,3,0,2,2]] sl = [1330, 1780, 1520, 1090, 1450, 1020] cnn_digit_model = load_model("DigitClassifier_20201014_mod3.h5") teststrip = ActuatorStrip(sl, idts, four_t_rocs, four_t_ctls) # teststrip.plot_output_map(score = False, save = False) # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 digit_order = [12, 0, 5, 4, 6, 10, 1, 8, 3, 7, 15, 9, 13, 11, 14, 2] rotate_angle = [ 0, 0,-30,140,190,-80, 90,180, 50,280, 0, 0,200, 0,180,180] score_index = [ 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ultimate_plotter(teststrip, digit_order, rotate_angle, score_index,\ test = False, save = False) # -
Archives/20210505_StripDesign_Optimizer_v10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Vacancy in a Silicon Cluster - Python version # # + import sys sys.path.insert(0, '../temp') from julip import JulipCalculator, JulipOptimizer from ase.build import bulk # - # reference energy # energy per unit volume of a homogeneous silicon crystal at = bulk("Si") calc = JulipCalculator("JuLIP.Potentials.StillingerWeber()") at.set_calculator(calc) Eref = at.get_potential_energy() Eref # + # cluster with vacancy at = bulk("Si", cubic=True) * (4, 4, 4) del at[len(at)/2] at.set_calculator(calc) # energy before relaxing E0 = at.get_potential_energy() at0 = at.copy() E0 # - opt = JulipOptimizer(at) results = opt.run(fmax=1e-6) E1 = at.get_potential_energy() results # defect formation energy print "Vacancy Formation Energy without relaxing: ", E0 - (len(at)+1)*Eref/2 print "Vacancy Formation Energy with relaxing: ", E1 - (len(at)+1)*Eref/2 # geometry optimisation from ase.optimize import LBFGSLineSearch at = at0.copy() at.set_calculator(calc) opt = LBFGSLineSearch(at) opt.run(1e-3) E1 = at.get_potential_energy() # defect formation energy print "Vacancy Formation Energy without relaxing: ", E0 - (len(at)+1)*Eref/2 print "Vacancy Formation Energy with relaxing: ", E1 - (len(at)+1)*Eref/2
examples/BulkSilicon-Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/RMDircio/SHL_StudyGuides/blob/main/Sprint2_Kaggle_Challenge/DS22_Unit2_Sprint2_Study_Guide.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="je4zZu-ebOEy" # # Use this as a study guide to fill in gaps in your knowlege about Linear Models. You will not have to define words in the Sprint Challenge. # # After you can do all the steps below, then look over the word banks and answer accordingly. Hope this helps :D # + [markdown] id="q9MS7tFvbRKY" # 1. Can I import a CSV file into a DataFrame? # 2. Can I **engineer** a new feature for a dataset? # 3. Can I split a DataFrame into a **feature matrix** and **target vector**? # 4. Can I split a dataset into a **training**, **validation**, and **test set**? # 5. Can I establish the baseline **accuracy** for a **classification** problem? # 6. Can I combine **transformers** with a **predictor** using a **pipeline**? # 7. Can I build a **decision tree** model and a **random forest** model? # 8. Can I evaluate a model using common metrics like accuracy, **precision**, and **recall**? # 9. Can I use a **grid search** or a **randomized search** to tune the **hyperparameters** of a model? # 10. If given a confusion matrix for a model, can I calculate its precision and recall? # 11. Can I create a confusion matrix for a model I've built? # + [markdown] id="OunwwT_b5Bdl" # # Decision Trees # # [StatQuest Video Index](https://statquest.org/video-index/) # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="viQ_XOhD-LAC" outputId="063b7ed5-1636-401f-a74e-b531aa295a95" from IPython.display import YouTubeVideo YouTubeVideo('7VeUPuFGJHk') # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="gnGezqbnEklO" outputId="28d7ca96-6267-4df5-ae49-04b43666a1e8" from IPython.display import YouTubeVideo YouTubeVideo('wpNl-JwwplA') # + [markdown] id="P2PZJ0cV5Qx_" # Words to Know from Decision Trees 1 Lecture/Assignment: # <br>_Make sure to cite your sources for each word_ # # `Outliers` - <font color="darkcyan"> _Your words here_ # # <br>`Missing Values` - <font color="darkcyan"> _Your words here_ # # <br>`Decision Tree` - <font color="darkcyan"> _Your words here_ # # <br>`DecisionTreeClassifier()` - <font color="darkcyan"> _Your words here_ # # <br>`Feature Importance` - <font color="darkcyan"> _Your words here_ # # <br>`Non-Linear Relationships` - <font color="darkcyan"> _Your words here_ # + [markdown] id="fMAHfsFIAGOT" # # Random Forests # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="xVVUPtepdfQe" outputId="da4f11c1-1aec-4fe9-b37c-c9aba009c49b" from IPython.display import YouTubeVideo YouTubeVideo('J4Wdy0Wc_xQ') # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="EM-4fXtUdsm9" outputId="bcd2b489-c0cf-4538-9a3b-cd73244ee47c" from IPython.display import YouTubeVideo YouTubeVideo('sQ870aTKqiM') # + [markdown] id="wOIMsjvDASqB" # Words to Know from Random Forests Lecture/Assignment: # # <br>_Make sure to cite your sources for each word_ # <br> # # `RandomForestClassifier()` - <font color="darkcyan"> _Your words here_ # # <br>`Ordinal Encoding` - <font color="darkcyan"> _Your words here_ # # <br>`Overfitting` - <font color="darkcyan"> _Your words here_ # # <br>`High Cardinality` - <font color="darkcyan"> _Your words here_ # # <br>`Validation Sets` - <font color="darkcyan"> _Your words here_ # # <br>`Test Sets` - <font color="darkcyan"> _Your words here_ # # <br>`Accuracy Score` - <font color="darkcyan"> _Your words here_ # # <br>`Hyperparameter Tuning` - <font color="darkcyan"> _Your words here_ # # <br>If something is `Trained in Parallel`, what does that mean? - <font color="darkcyan"> _Your words here_ # # <br>How does `Categorical Encoding` affect our tree models differenly from our linear models? - <font color="darkcyan"> _Your words here_ # + [markdown] id="QZdhMPWSI4mC" # # Cross Validation # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="7RKEBe2lcqU_" outputId="1c5f4f38-3649-4aab-f937-3f76f2988e44" from IPython.display import YouTubeVideo YouTubeVideo('fSytzGwwBVw') # + [markdown] id="rlTIg31qJA-h" # Words to Know from Cross Validation Lecture/Assignment: # # <br>_Make sure to cite your sources for each word_ # <br> # # `k-fold Cross Validation` - <font color="darkcyan"> _Your words here_ # # <br>`Cross_val_score` - <font color="darkcyan"> _Your words here_ # # <br>What does it mean if the `Cross Validation scores` are similar to each other? - <font color="darkcyan"> _Your words here_ # # <br>What does it mean if the `Cross Validation scores` are `NOT` similar to each other? - <font color="darkcyan"> _Your words here_ # # <br>`Overfitting` - <font color="darkcyan"> _Your words here_ # # <br>`Folds` - <font color="darkcyan"> _Your words here_ # # <br>`Iterations` - <font color="darkcyan"> _Your words here_ # # <br>What is `k`? - <font color="darkcyan"> _Your words here_ # # <br>`GridSearchCV()` - <font color="darkcyan"> _Your words here_ # # <br> What is the `param_grid` in `GridSearchCV` for? - <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Best Accuracy Score` for `GridSearchCV`?- <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Best Parameters` from `GridSearchCV`?- <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Best Estimators` from `GridSearchCV`?- <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Predictions` from `GridSearchCV`?- <font color="darkcyan"> _Your words here_ # # <br> `RandomSearchCV()` <font color="darkcyan"> _Your words here_ # # <br> What is `param_distrubution` in `RandomSearchCV` for?- <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Best Parameters` from `RandomSearchCV`?- <font color="darkcyan"> _Your words here_ # # <br> How do you get the `Best Accuracy Score` from `RandomSearchCV`?- <font color="darkcyan"> _Your words here_ # + [markdown] id="2RRfdQUERhfj" # # Classification Metrics # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="rec0xXaBc9AW" outputId="04106238-6212-40cd-fcdd-f6494e00b2bc" from IPython.display import YouTubeVideo YouTubeVideo('Kdsp6soqA7o') # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="31Bb4-sNdJVy" outputId="703f4997-281b-44fb-83a1-0d18de36d6ca" from IPython.display import YouTubeVideo YouTubeVideo('4jRBRDbJemM') # + [markdown] id="_q6Q6yhiSAg9" # Words to Know from Classification Metrics Lecture/Assignment: # # <br>_Make sure to cite your sources for each word_ # <br> # # `Confusion Matrix` - <font color="darkcyan"> _Your words here_ # # <br>`Precision` and `Precision Score` - <font color="darkcyan"> _Your words here_ # # <br>`Recall` and `Recall Score` - <font color="darkcyan"> _Your words here_ # # <br>`Thresholds` - <font color="darkcyan"> _Your words here_ # # <br>`Predicted probabilities` - <font color="darkcyan"> _Your words here_ # # <br>What parameters are needed when you use `plot_confusion_matrix()`? - <font color="darkcyan"> _Your words here_ # # <br>`True Positives` - <font color="darkcyan"> _Your words here_ # # <br>`True Negatives` - <font color="darkcyan"> _Your words here_ # # <br>`False Positives` - <font color="darkcyan"> _Your words here_ # # <br>`False Positives` - <font color="darkcyan"> _Your words here_ # # <br>Can you use `Classification Metrics` on `Linear Models`? Why or why not? - <font color="darkcyan"> _Your words here_ # + [markdown] id="i9To7CrM-W20" # # Imports to know: # # # + [markdown] id="GDE2fPR8evk0" # When would you use each import and what does the import statement look like in code? # # <br>`pandas` - <font color="darkcyan"> _Your words here_ # # <br>`Matplotlib` - <font color="darkcyan"> _Your words here_ # # <br>`Numpy` - <font color="darkcyan"> _Your words here_ # # <br>`Scikit-Learn` - <font color="darkcyan"> _Your words here_ # # <br>`sklearn` - <font color="darkcyan"> _Your words here_ # # <br>`category_encoders` - <font color="darkcyan"> _Your words here_ # # <br>`make_pipeline` - <font color="darkcyan"> _Your words here_ # # <br>`train_test_split` - <font color="darkcyan"> _Your words here_ # # <br>`LogisticRegression` - <font color="darkcyan"> _Your words here_ # # <br>`OneHotEncoder` - <font color="darkcyan"> _Your words here_ # # <br>`SimpleImputer` - <font color="darkcyan"> _Your words here_ # # <br>`StandardScaler` - <font color="darkcyan"> _Your words here_ # # <br>`accuracy_score` - <font color="darkcyan"> _Your words here_ # # <br>`FunctionTransformer` - <font color="darkcyan"> _Your words here_ # # <br>`RandomForestClassifier` - <font color="darkcyan"> _Your words here_ # # <br>`GridSearchCV` - <font color="darkcyan"> _Your words here_ # # <br>`RandomizedSearchCV` - <font color="darkcyan"> _Your words here_ # # <br>`OrdinalEncoder` - <font color="darkcyan"> _Your words here_ # # <br>`OneHotEncoder` - <font color="darkcyan"> _Your words here_ # # <br>`cross_val_score` - <font color="darkcyan"> _Your words here_ # # <br>`RandomForestRegressor` - <font color="darkcyan"> _Your words here_ # # <br>`SelectKBest` - <font color="darkcyan"> _Your words here_ # # <br>`LinearRegression` - <font color="darkcyan"> _Your words here_ # # <br>`mean_absolute_error` - <font color="darkcyan"> _Your words here_ # # <br>`DecisionTreeClassifier ` - <font color="darkcyan"> _Your words here_ # -
Sprint2_Kaggle_Challenge/DS22_Unit2_Sprint2_Study_Guide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import tensorflow as tf import tensorflow_datasets as tfds # + mnist_dataset, mnist_info = tfds.load(name='mnist', with_info=True, as_supervised=True) # with_info=True provides tuple containing information about the version, features, number of samples. I used these info to store it in the mnist_info # as_supervised=True loads the dataset as a 2-tuple structure (input, target) to separate them. mnist_train, mnist_test = mnist_dataset['train'], mnist_dataset['test'] # Here I extracted the training and testing datasets with the built references num_validation_samples = 0.1 * mnist_info.splits['train'].num_examples num_validation_samples = tf.cast(num_validation_samples, tf.int64) # I defined the number of validation samples (10%) as a % of the training samples and cast the number as an integer. num_test_samples = mnist_info.splits['test'].num_examples num_test_samples = tf.cast(num_test_samples, tf.int64) # I stored the number of test samples in a dedicated variable. def scale(image, label): image = tf.cast(image, tf.float32) # The value must be a float since the possible values for the inputs are 0 to 255 (256 different shades of grey) image /= 255. # Returns a value between 0 and 1. return image, label # The function 'scale' takes an MNIST image and its label. It prefers inputs between 0 and 1. scaled_train_and_validation_data = mnist_train.map(scale) test_data = mnist_test.map(scale) # The method .map() applies a custom transformation to the given dataset. # I scaled them all so they have the same magnitude. # There was no need to shuffle the test data because I didn't be training my model using it. # I decided that a single Batch would be equal to the size of the test data to hasten the process. BUFFER_SIZE = 10000 # I can't shuffle the whole dataset in one go because it can't fit in memory, so I set this BUFFER_SIZE parameter for this huge dataset. shuffled_train_and_validation_data = scaled_train_and_validation_data.shuffle(BUFFER_SIZE) # I shuffled the train and validation data and specified the BUFFER_SIZE. validation_data = shuffled_train_and_validation_data.take(num_validation_samples) # My validation data was equal to 10% of the training set. # Here I used the .take() method to take that many samples train_data = shuffled_train_and_validation_data.skip(num_validation_samples) # The train_data is everything else, so I skipped as many samples as there are in the validation dataset. BATCH_SIZE = 150 # I determined the optimal batch size here. train_data = train_data.batch(BATCH_SIZE) # Here I took the advantage to batch the train data to be able to iterate over the different batches. validation_data = validation_data.batch(num_validation_samples) test_data = test_data.batch(num_test_samples) # I batched the train and test datasets here. validation_inputs, validation_targets = next(iter(validation_data)) # + input_size = 784 output_size = 10 # Use same hidden layer size for both hidden layers. Not a necessity. # There is a convenient method 'Flatten' that simply takes our 28x28x1 tensor and orders it into a (None,) or (28x28x1,) = (784,) vector hidden_layer_size = 2500 model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28, 1)), # This is the first layer (the input layer) # each observation is 28x28x1 pixels, therefore it is a tensor of rank 3 # I used the 'flatten' to actually create a feed forward neural network tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 1st hidden layer tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 2nd hidden layer tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 3rd hidden layer tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 4th hidden layer tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 5th hidden layer # This code outputs: activation(dot(input, weight) + bias) tf.keras.layers.Dense(output_size, activation='softmax') # output layer # the final layer is no different, except that I activated it using the Softmax method. ]) # - custom_optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) model.compile(optimizer=custom_optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) NUM_EPOCHS = 10 # model.fit(train_data, epochs=NUM_EPOCHS, validation_data=(validation_inputs, validation_targets), validation_steps=10, verbose=2) test_loss, test_accuracy = model.evaluate(test_data) print('Test loss: {0:.2f}. Test accuracy: {1:.2f}%'.format(test_loss, test_accuracy*100.)) # Better formatting
Deep Neural Network for MNIST Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import glob import os import time import imageio import numpy as np import soundfile as sf from constants import * import common # - def generate_fb_and_mfcc(signal, sample_rate): # Pre-Emphasis pre_emphasis = 0.97 emphasized_signal = np.append( signal[0], signal[1:] - pre_emphasis * signal[:-1]) # Framing frame_size = 0.025 frame_stride = 0.01 # Convert from seconds to samples frame_length, frame_step = ( frame_size * sample_rate, frame_stride * sample_rate) signal_length = len(emphasized_signal) frame_length = int(round(frame_length)) frame_step = int(round(frame_step)) # Make sure that we have at least 1 frame num_frames = int( np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) pad_signal_length = num_frames * frame_step + frame_length z = np.zeros((pad_signal_length - signal_length)) # Pad Signal to make sure that all frames have equal # number of samples without truncating any samples # from the original signal pad_signal = np.append(emphasized_signal, z) indices = ( np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile( np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1) ).T ) frames = pad_signal[indices.astype(np.int32, copy=False)] # Window frames *= np.hamming(frame_length) # Fourier-Transform and Power Spectrum NFFT = 512 # Magnitude of the FFT mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Power Spectrum pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Filter Banks nfilt = 40 low_freq_mel = 0 # Convert Hz to Mel high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Equally spaced in Mel scale mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Convert Mel to Hz hz_points = (700 * (10**(mel_points / 2595) - 1)) bin = np.floor((NFFT + 1) * hz_points / sample_rate) fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1)))) for m in range(1, nfilt + 1): f_m_minus = int(bin[m - 1]) # left f_m = int(bin[m]) # center f_m_plus = int(bin[m + 1]) # right for k in range(f_m_minus, f_m): fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1]) for k in range(f_m, f_m_plus): fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m]) filter_banks = np.dot(pow_frames, fbank.T) # Numerical Stability filter_banks = np.where( filter_banks == 0, np.finfo(float).eps, filter_banks) # dB filter_banks = 20 * np.log10(filter_banks) # MFCCs # num_ceps = 12 # cep_lifter = 22 # ### Keep 2-13 # mfcc = dct( # filter_banks, # type=2, # axis=1, # norm='ortho' # )[:, 1 : (num_ceps + 1)] # (nframes, ncoeff) = mfcc.shape # n = np.arange(ncoeff) # lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter) # mfcc *= lift return filter_banks # + def process_audio(input_dir, debug=False): files = [] extensions = ['*.flac'] for extension in extensions: files.extend(glob.glob(os.path.join(input_dir, extension))) for file in files: if debug: file = ('build/test/' 'de_f_63f5b79c76cf5a1a4bbd1c40f54b166e.fragment1.flac') start = time.time() print(file) signal, sample_rate = sf.read(file) assert len(signal) > 0 assert sample_rate == 22050 fb = generate_fb_and_mfcc(signal, sample_rate) fb = fb.astype(DATA_TYPE, copy=False) assert fb.dtype == DATA_TYPE assert fb.shape == (WIDTH, FB_HEIGHT) # .npz extension is added automatically file_without_ext = os.path.splitext(file)[0] # info = np.finfo(fb.dtype) # Get the information of the incoming image type # fbb = fb.astype(DATA_TYPE) / info.max # normalize the data to 0 - 1 # fbb = 255 * fbb # Now scale by 255 # img = fbb.astype(np.uint8) imageio.imwrite(file_without_ext + '.png', fb) np.savez_compressed(file_without_ext + '.fb', data=fb) if debug: end = time.time() print("It took [s]: ", end - start) # data is casted to uint8, i.e. (0, 255) imageio.imwrite('fb_image.png', fb) exit(0) # + if __name__ == "__main__": import argparse # # parser = OptionParser() # parser = argparse.ArgumentParser( # description='Generate various features from audio samples.') # parser.add_argument('--debug', dest='debug', action='store_true') # parser.set_defaults(debug=False) # args = parser.parse_args() args = {"description":'Generate various features from audio samples.',"debug":False } if args.get('debug'): process_audio(os.path.join(common.DATASET_DIST, 'train'), debug=True) else: process_audio(os.path.join(common.DATASET_DIST, 'test')) process_audio(os.path.join(common.DATASET_DIST, 'train')) # -
generate_filter_banks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Week 12 MCD Technical Factor Test # ### **Import Libraries** # + # import needed libraries import numpy as np from numpy import loadtxt from pandas_datareader import data, wb import datetime import holidays from dateutil.relativedelta import relativedelta import pandas as pd pd.options.mode.chained_assignment = None import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.figure import Figure import matplotlib.dates as mdates import seaborn as sns import itertools from itertools import chain from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.model_selection import TimeSeriesSplit from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.feature_selection import VarianceThreshold from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout from xgboost import XGBClassifier from xgboost import plot_importance import warnings warnings.filterwarnings("ignore", category=FutureWarning) # - # ### **Data Preprocessing** # + def data_preparation(path): factor_df = pd.read_csv(path) # remove non-rank columns columns = factor_df.columns.drop(['End of Month', 'PERMNO', 'GVKEY', 'Point-in-time CUSIP', 'Point-in-time Ticker', 'Point-in-time Name', 'Last Known CUSIP', 'Last Known Ticker', 'Last Known Name']) factor_df = factor_df[columns] # Convert CalendarDate into datatime format factor_df['Date'] = pd.to_datetime(factor_df['Calendar Date'], format='%Y%m%d') factor_df = factor_df.drop(['Calendar Date'], axis=1) # create a new dataframe with 'CalendarDate' column as index factor_df = factor_df.set_index('Date') # handling missing data factor_df[factor_df=='.'] = pd.np.nan factor_df = factor_df.astype(float) # remove columns contains nan factor_df = factor_df.dropna(axis=1, how='any') return factor_df MCD_tech_factor_df = data_preparation('tech_factor_data/MCD_tech_inputs_daily_quintile_ranks_s2000.csv') MCD_tech_factor_df.head() # - MCD_tech_factor_df.columns # ### **Variance Test for Technical Factors** var_thres = 0.2 def variance_feature_selection(factor_df, var_thres): selector = VarianceThreshold(var_thres) fit = selector.fit_transform(factor_df) dfvariances = pd.DataFrame(selector.variances_) dfcolumns = pd.DataFrame(factor_df.columns) #concat two dataframes for better visualization featureVariances = pd.concat([dfcolumns,dfvariances],axis=1) featureVariances.columns = ['Factor','Variance'] #naming the dataframe columns print(featureVariances.nlargest(34,'Variance')) #print 10 best features return factor_df[factor_df.columns[selector.get_support(indices=True)]] # ### **Technical Factor Selection** var_MCD_tech_factor_df = variance_feature_selection(MCD_tech_factor_df, var_thres) # + sns.set_context('notebook', font_scale=2) def plot_one_column(df, column_name): plt.style.use('ggplot') fig, gx = plt.subplots(figsize = (40,10)) gx.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m')) gx.plot(df.index, df[column_name], color='blue', label=column_name) gx.set_xlabel('Dates',size=40, fontsize=40) # gx.set_ylabel('Quintile Rank', fontsize=20) gx.set_title(column_name, size=40) # other parameters gx.grid(True) # - # ### **Function to Generate real-time labels** # + ONE_DAY = datetime.timedelta(days=1) HOLIDAYS_US = holidays.US() # function to return the date of n bussiness days before the test date def n_business_day_before(test_date, n): last_day = test_date for _ in range(n): last_day = last_day - ONE_DAY while last_day.weekday() in holidays.WEEKEND or last_day in HOLIDAYS_US: last_day -= ONE_DAY return last_day def up_down_threshold(df, train_start_date, test_date, pred_day): ''' df: dataframe contains ReturnNDays column ''' train_end_date = n_business_day_before(test_date, pred_day) dataset_mask = (df.index>=train_start_date) & (df.index<=train_end_date) return_list = df['Return{}Days'.format(pred_day)].loc[dataset_mask].tolist() sorted_return_list = sorted(return_list, key=float) # print(sorted_return_list) down_thres_index = int(len(sorted_return_list)/3 + 1) up_thres_index = down_thres_index * 2 down_thres, up_thres = sorted_return_list[down_thres_index], sorted_return_list[up_thres_index] return down_thres, up_thres def generate_NDayReturn(factor_df, ticker_name, start_date, end_date, pred_day): price_df = data.DataReader(ticker_name, 'yahoo', start_date, end_date) price_df = price_df[['Adj Close']] df = pd.merge(factor_df, price_df, on='Date') # obtain n day return percent in the future df['Return{}Days'.format(pred_day)] = df['Adj Close'].pct_change(pred_day).shift(-pred_day) # remove the last n days NaN returns df = df[:-pred_day] # remove 'Adj Close' columns columns = df.columns.drop(['Adj Close']) df = df[columns] return df # function to generate dir labels real time based on the shifting input training window balance def generate_labels_real_time(df, pred_day, train_start, test_date): # calculate training set up/down thresholds down_thres, up_thres = up_down_threshold(df, train_start, test_date, pred_day) # Obtain 'DirNDays' column based on thresholds df['Dir{}Days'.format(pred_day)] = 'neu' df.loc[df['Return{}Days'.format(pred_day)]>up_thres, 'Dir{}Days'.format(pred_day)] = 'pos' df.loc[df['Return{}Days'.format(pred_day)]<down_thres, 'Dir{}Days'.format(pred_day)] = 'neg' # remove 'Return5Days' and 'Adj Close' columns columns = df.columns.drop(['Return{}Days'.format(pred_day)]) df = df[columns] #Creating the dependent variable class # 'neg': 0, 'neu': 1, 'pos': 2 le = LabelEncoder() df['Dir{}Days'.format(pred_day)] = le.fit_transform(df['Dir{}Days'.format(pred_day)]) df = df.astype(float) df = df.astype({'Dir{}Days'.format(pred_day): int}) return df # - # ### **Function for Model Building** # + def model_testing(df, test_date_start, history_window, pred_day, model_name): test_dates = df.loc[test_date_start:].index.tolist() y_true = [] y_pred = [] # select ml model if model_name == 'LR': model = LogisticRegression() # model = LogisticRegression(penalty='l1', C=10, solver='liblinear') elif model_name == 'NB': model = GaussianNB() elif model_name == 'RF': model = RandomForestClassifier(n_estimators = 8, criterion = 'entropy', random_state = 42) elif model_name == 'KNN': model = KNeighborsClassifier(n_neighbors=12) elif model_name == 'AdaBoost': model = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10), n_estimators=200,algorithm="SAMME.R", learning_rate=0.1, random_state=0) for test_date in test_dates: # create training and testing set train_start = test_date - history_window # print('test_date: {t} \t train_end: {e}'.format(t=test_date, e=train_end)) real_time_df = generate_labels_real_time(df, pred_day, train_start, test_date) # dataset will include test_date data also dataset = real_time_df.loc[train_start:test_date] # training set last label should be current test_date train_set = dataset[:-pred_day].values test_set = dataset.loc[test_date].values X_train, y_train = train_set[:, :-1], train_set[:, -1] X_test, y_test = test_set[:-1].reshape(1,-1), test_set[-1] # training and testing sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) model.fit(X_train, y_train) y_true.append(y_test) y_pred.append(model.predict(X_test)) # label_plot(real_time_df,'Dir{}Days Distribution'.format(pred_day), train_start, test_date, pred_day) y_true, y_pred = np.array(y_true), np.array(y_pred).transpose().flatten() return y_true, y_pred, test_dates # - # #### **Function for Model Performance Evaluation** # + def model_performance(y_test, y_pred, model_name): precision = precision_score(y_test, y_pred, average='weighted') recall = recall_score(y_test, y_pred, average='weighted') accuracy = accuracy_score(y_test, y_pred) f1_wght = f1_score(y_test, y_pred, average='weighted') f1_micro = f1_score(y_test, y_pred, average='micro') performance_df = pd.DataFrame({'Model': model_name, 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1wght': f1_wght, 'F1micro': f1_micro}, index=[0]) return performance_df def error_distance_count(y_test, y_pred): error_distance = np.absolute(y_test - y_pred) unique, counts = np.unique(error_distance, return_counts=True) counts = counts / len(y_test) error_dic = dict(zip(unique, counts)) return error_dic def plot_error_distance(y_test, y_pred, title): #========== plot setting=========== plt.rcParams['figure.figsize'] = (10, 5) sns.set_context('notebook', font_scale=1.5) plt.style.use('seaborn-darkgrid') error_dic = error_distance_count(y_test, y_pred) plt.xlabel('Error Distance') plt.ylabel('Error Rate') plt.title(title, loc='center') plt.bar(list(error_dic.keys()), error_dic.values(), color='g') plt.show() def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): #========== plot setting=========== plt.rcParams['figure.figsize'] = (10, 5) sns.set_context('notebook', font_scale=1.5) plt.style.use('seaborn-white') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(cm.shape[1]) plt.xticks(tick_marks, rotation=45) ax = plt.gca() ax.set_xticklabels((ax.get_xticks()).astype(str)) plt.yticks(tick_marks) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], '.0f'), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # plot predictions vs observations def plot_pred_perform(y_true, y_pred, test_dates, title): #========== plot setting=========== plt.rcParams['figure.figsize'] = (30, 10) sns.set_context('notebook', font_scale=3.5) plt.style.use('seaborn-whitegrid') plt.scatter(test_dates, y_true,s=50, c='r', alpha=0.8, label='True Dir', edgecolor='none') plt.scatter(test_dates, y_pred, s=50, c='b', alpha=0.3, label='Predict Dir', edgecolor='none') plt.title(title) plt.ylabel('Direction Label') # plt.xticks(rotation=45) plt.xlabel('Date') # plt.legend() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() # function return univariate distributions of Dir5Days def label_plot(df, title, start_date, end_date, pred_day): dataset_mask = (df.index>start_date) & (df.index<end_date) # date_start = test_date_start - history_window # dataset_mask = (df.index>date_start) dataset = df.loc[dataset_mask] plt.rcParams['figure.figsize'] = (10, 5) sns.set_context('notebook', font_scale=1.5) plt.style.use('seaborn-darkgrid') sns.countplot(dataset['Dir{}Days'.format(pred_day)],palette='Set1') plt.title(title, loc='center') plt.tight_layout() plt.show() def model_evalutation(y_true, y_pred, test_dates, model_name): cm = confusion_matrix(y_true, y_pred) plot_confusion_matrix(cm, 'Confusion Matrix for {}'.format(ticker_name)) plot_error_distance(y_true, y_pred, 'Error Performance for {}'.format(model_name)) performance = model_performance(y_true, y_pred, model_name) plot_pred_perform(y_true, y_pred, test_dates, 'Prediction Performance for {}'.format(model_name)) print(performance) return performance # - # #### **Model Test** for factor in MCD_tech_factor_df.columns: print(factor) # + start_date = datetime.datetime(2000, 1, 3) end_date = datetime.datetime(2019, 2, 28) ticker_name = "MCD" pred_day = 5 # predict the future n days return test_date_start = datetime.datetime(2017, 1, 10) # testing start date history_window = relativedelta(years=6) # training history window MODEL_NAME = 'LR' selected_MCD_tech_factors = MCD_tech_factor_df[['One-Week Daily Price Trend', 'Relative Strength Index', 'One-Month Change in Average Daily Share Turnover', 'MACD Signal']] selected_MCD_tech_news_factors = MCD_tech_factor_df[['One-Week Daily Price Trend', 'Relative Strength Index', 'One-Month Change in Average Daily Share Turnover', 'Daily Media Sentiment Indicator', 'MACD Signal']] MCD_tech_df = generate_NDayReturn(selected_MCD_tech_factors, ticker_name, start_date, end_date, pred_day) MCD_tech_news_df = generate_NDayReturn(selected_MCD_tech_news_factors, ticker_name, start_date, end_date, pred_day) y_true, y_pred_tech, test_dates = model_testing(MCD_tech_df, test_date_start, history_window, pred_day, MODEL_NAME) y_true, y_pred_news, test_dates = model_testing(MCD_tech_news_df, test_date_start, history_window, pred_day, MODEL_NAME) perform_without_news = model_performance(y_true, y_pred_tech, 'without news') perform_with_news =model_performance(y_true, y_pred_news, 'with news') result = pd.concat([perform_without_news, perform_with_news]) result performance_with_news = model_evalutation(y_true, y_pred_news, test_dates, MODEL_NAME) performance_without_news = model_evalutation(y_true, y_pred_tech, test_dates, MODEL_NAME) # - result # ### **History Window Test for LR** # + start_date = datetime.datetime(2000, 1, 3) end_date = datetime.datetime(2019, 2, 28) ticker_name = "MCD" pred_day = 5 # predict the future n days return test_date_start = datetime.datetime(2017, 1, 10) # testing start date history_window = relativedelta(years=7) # training history window MODEL_NAME = 'LR' MCD_tech_df = generate_NDayReturn(selected_MCD_tech_factors, ticker_name, start_date, end_date, pred_day) def history_window_test(tech_df, test_date_start, model_name): history_windows = list(range(1, 16)) accuracies = [] for hw in history_windows: history_window = relativedelta(years=hw) # training history window y_true, y_pred, test_dates =model_testing(tech_df, test_date_start, history_window, pred_day, model_name) accuracy = accuracy_score(y_true, y_pred) print('history_window: {h} year\t acc: {a:.3f}'.format(h=hw, a=accuracy)) accuracies.append(accuracy) plt.rcParams['figure.figsize'] = (30, 10) sns.set_context('notebook', font_scale=3.0) plt.style.use('seaborn-darkgrid') plt.plot(history_windows, accuracies, linestyle='-', marker='o', markersize=12, color='blue') plt.title('Histroy Window Test for {}'.format(ticker_name)) plt.ylabel('Accuracy') plt.xlabel('Years') plt.show() history_window_test(MCD_tech_df, test_date_start, MODEL_NAME) # - # ### **Rolliing Window Test for Price Trends for MCD** # + start_date = datetime.datetime(2000, 1, 3) end_date = datetime.datetime(2019, 2, 28) ticker_name = "MCD" pred_day = 5 # predict the future n days return test_date_start = datetime.datetime(2017, 1, 10) # testing start date history_window = relativedelta(years=6) # training history window MODEL_NAME = 'LR' def price_trend_test(tech_factor_df, test_date_start, model_name): price_trend = ['One-Week Daily Price Trend', 'One-Month Daily Price Trend', 'Three-Month Daily Price Trend', 'Nine-Month Daily Price Trend'] accuracies = [] for pt in price_trend: selected_tech_factors = tech_factor_df[[pt, 'Relative Strength Index', 'One-Month Change in Average Daily Share Turnover', 'Daily Media Sentiment Indicator', 'MACD Signal']] tech_df = generate_NDayReturn(selected_tech_factors, ticker_name, start_date, end_date, pred_day) y_true, y_pred, test_dates = model_testing(tech_df, test_date_start, history_window, pred_day, model_name) cm = confusion_matrix(y_true, y_pred) plot_confusion_matrix(cm, 'Confusion Matrix for {}'.format(model_name)) accuracy = accuracy_score(y_true, y_pred) print('{p} \t acc: {a:.3f}'.format(p=pt, a=accuracy)) accuracies.append(accuracy) plt.rcParams['figure.figsize'] = (30, 10) sns.set_context('notebook', font_scale=3.0) plt.style.use('seaborn-darkgrid') plt.plot(price_trend, accuracies, linestyle='-', marker='o', markersize=12, color='blue') plt.title('Price Trend Test for {}'.format(ticker_name)) plt.ylabel('Accuracy') # plt.xlabel('Days') plt.show() price_trend_test(MCD_tech_factor_df, test_date_start, MODEL_NAME) # - # ### **Predict Day Test for MCD** # + history_window = relativedelta(years=6) # training history window def prediction_day_thresholds_test(selected_tech_factors, model_name): pred_days = [1, 2, 3, 4, 5, 10, 15, 20] accuracies = [] for pd in pred_days: tech_df = generate_NDayReturn(selected_tech_factors, ticker_name, start_date, end_date, pd) y_true, y_pred, test_dates = model_testing(tech_df, test_date_start, history_window, pd, model_name) accuracy = accuracy_score(y_true, y_pred) print('predict day: {p} days\t acc: {a:.3f}'.format(p=pd, a=accuracy)) cm = confusion_matrix(y_true, y_pred) plot_confusion_matrix(cm, 'Confusion Matrix for {n} for predict day {p}, acc = {a:.3f}'.format(n=model_name, p=pd, a=accuracy )) accuracies.append(accuracy) plt.rcParams['figure.figsize'] = (30, 10) sns.set_context('notebook', font_scale=3.0) plt.style.use('seaborn-darkgrid') plt.plot(pred_days, accuracies, linestyle='-', marker='o', markersize=12, color='blue') plt.title('Prediction Day Test for {}'.format(ticker_name)) plt.ylabel('Accuracy') plt.xlabel('Prediction Day') plt.show() prediction_day_thresholds_test(selected_MCD_tech_factors, MODEL_NAME) # -
News-Based-Trading-Strategy-Project/MCD_NLPTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import math import itertools from scipy import linalg import numpy as np import pandas as pd from sklearn.metrics.pairwise import cosine_similarity from scipy import sparse from sklearn.metrics import pairwise_distances from scipy.spatial.distance import cosine data = pd.read_json('./data/jobs.json', orient='records') data.job_description = data.job_description.apply(lambda t: t[0]) data # + # full_skills_doc = data[['job_description', 'skills']] # full_skills_doc # - # # Term-Document Matrix # + all_skills = data.skills.tolist() all_skills = np.asarray(all_skills) all_skills = list(itertools.chain(*all_skills)) all_skills[:5] # - td_matrix = pd.DataFrame() td_matrix['skills'] = all_skills td_matrix.drop_duplicates(inplace=True) def term_frequency(t, d): return d.count(t) # + idf_values = {} all_skills = td_matrix['skills'].tolist() num_of_docs = len(data.index) for skill in all_skills: _skill = skill contains_token = map(lambda doc: _skill in doc, data.skills.tolist()) idf_values[skill] = math.log(float(num_of_docs) / (1 + sum(contains_token))) # - idf_values.get('SSRS') print len(td_matrix) print len(data) # + # td_matrix = td_matrix # data = data def calc_td_matrix(i, row): for ix, tdrow in td_matrix.iterrows(): doc = 'd' + str(i) td_matrix.loc[ix, doc] = idf_values.get(tdrow['skills'] ,0) * term_frequency(tdrow['skills'], row['job_description']) for i, row in data.iterrows(): calc_td_matrix(i, row) # Export td_matrix.to_csv('tmp/td_matrix.csv', index=False, encoding='utf-8') td_matrix # - # + _td_matrix = td_matrix _td_matrix = _td_matrix.set_index('skills') skills_sparse = sparse.csr_matrix(_td_matrix) similarities = cosine_similarity(skills_sparse) print('pairwise dense output:\n {}\n'.format(similarities)) # - distance_matrix = pairwise_distances(skills_sparse, metric="cosine") distance_matrix x = pd.DataFrame(similarities) x.to_csv('tmp/x.csv', index=False) x.columns = _td_matrix.index x.set_index(_td_matrix.index, inplace=True) x x[(x >= 0.9).any(axis=1)].to_csv('./tmp/related_test.csv',encoding='utf8') a = np.random.randn(9, 6) + 1.j*np.random.randn(9, 6) a.shape U, s, Vh = linalg.svd(a, full_matrices=False) np.dot(U, np.dot(np.diag(s), Vh))
.ipynb_checkpoints/related-skills-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 # language: python # name: python3 # --- # + [markdown] id="02ea1f6f090f46e5966468336006c630" # # Accelerate Deep Learning Model training with Watson Machine Learning Accelerator # # + [markdown] id="9bc04e069c7c4c86ac640afdecdefbad" # ### Notebook created by <NAME>, <NAME> in Jan 2021 # # ### In this notebook, you will learn how to use the Watson Machine Learning Accelerator (WML-A) API and accelerate deep learning model training on GPU with Watson Machine Learning Accelerator. # # This notebook uses the PyTorch Resnet18 model, which performs image classification using a basic computer vision image classification example. The model will be trained both on CPU and GPU to demonstrate that training models on GPU hardware deliver faster result times. # # # This notebook covers the following sections: # # 1. [Setting up required packages](#setup)<br> # # 2. [Configuring your environment and project details](#configure)<br> # # 3. [Training the model on CPU](#cpu)<br> # # 4. [Training the model on GPU with Watson Machine Learning Accelerator](#gpu)<br> # + [markdown] id="7fcde3deab74495e871e4da0f83fdfac" # <a id = "setup"></a> # ## Step 1: Setting up required packages # # + [markdown] id="b4020a53992743179053ecb345e637a4" # #### First, install torchvision which is required to train the PyTorch Resnet18 model on CPU. # Note: You will need to create a custom environment with 16VCPU and 32GB # + id="bd69fac3990e4ef28a38a29b0b83b614" # ! pip install torchvision # + id="199618d2bbe1488d8820cd8a62402e99" import torchvision # + [markdown] id="01c07e0d7cc44bf787719d848a58c1f3" # #### Next, define helper methods: # + id="975a773eb5f7406085b30b08bc3d9922" # import tarfile import tempfile import os import json import pprint import pandas as pd from IPython.display import display, FileLink, clear_output import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) from matplotlib import pyplot as plt # %pylab inline import base64 import json import time import urllib import tarfile def query_job_status(job_id,refresh_rate=3) : execURL = dl_rest_url +'/execs/'+ job_id['id'] pp = pprint.PrettyPrinter(indent=2) keep_running=True res=None while(keep_running): res = req.get(execURL, headers=commonHeaders, verify=False) monitoring = pd.DataFrame(res.json(), index=[0]) pd.set_option('max_colwidth', 120) clear_output() print("Refreshing every {} seconds".format(refresh_rate)) display(monitoring) pp.pprint(res.json()) if(res.json()['state'] not in ['PENDING_CRD_SCHEDULER', 'SUBMITTED','RUNNING']) : keep_running=False time.sleep(refresh_rate) return res def query_executor_stdout_log(job_id) : execURL = dl_rest_url +'/scheduler/applications/'+ job_id['id'] + '/executor/1/logs/stdout?lastlines=1000' #'https://{}/platform/rest/deeplearning/v1/scheduler/applications/wmla-267/driver/logs/stderr?lastlines=10'.format(hostname) commonHeaders2={'accept': 'text/plain', 'X-Auth-Token': access_token} print (execURL) res = req.get(execURL, headers=commonHeaders2, verify=False) print(res.text) def query_train_metric(job_id) : #execURL = dl_rest_url +'/execs/'+ job_id['id'] + '/log' execURL = dl_rest_url +'/execs/'+ job_id['id'] + '/log' #'https://{}/platform/rest/deeplearning/v1/scheduler/applications/wmla-267/driver/logs/stderr?lastlines=10'.format(hostname) commonHeaders2={'accept': 'text/plain', 'X-Auth-Token': access_token} print (execURL) res = req.get(execURL, headers=commonHeaders2, verify=False) print(res.text) # save result file def download_trained_model(job_id) : from IPython.display import display, FileLink # save result file commonHeaders3={'accept': 'application/octet-stream', 'X-Auth-Token': access_token} execURL = dl_rest_url +'/execs/'+ r.json()['id'] + '/result' res = req.get(execURL, headers=commonHeaders3, verify=False, stream=True) print (execURL) tmpfile = '/project_data/data_asset/' + r.json()['id'] +'.zip' print ('Save model: ', tmpfile ) with open(tmpfile,'wb') as f: f.write(res.content) f.close() def make_tarfile(output_filename, source_dir): with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) # + [markdown] id="41e72dbf695c4f258de5e6d85685c7c2" # <a id = "configure"></a> # ## Step 2: Configuring your environment and project details # + [markdown] id="fe8c33b48a9a4f10aeb34eabc2e1f1d3" # To set up your project details, provide your credentials in this cell. You must include your cluster URL, username, and password. # + id="4fa3be661bab44b58d8e86d383a4df65" hostname='$host_name' # please enter Watson Machine Learning Accelerator host name login='$login:password' # please enter the login and password es = base64.b64encode(login.encode('utf-8')).decode("utf-8") print(es) commonHeaders={'Authorization': 'Basic '+es} req = requests.Session() auth_url = 'https://{}/auth/v1/logon'.format(hostname) print(auth_url) a=requests.get(auth_url,headers=commonHeaders, verify=False) access_token=a.json()['accessToken'] print(access_token) # + id="24d476b80c6c4f7d8abe927b3656e1de" dl_rest_url = 'https://{}/platform/rest/deeplearning/v1'.format(hostname) commonHeaders={'accept': 'application/json', 'X-Auth-Token': access_token} req = requests.Session() # + [markdown] id="de65166a99d0485aa885f938ffcec89f" # <a id = "cpu"></a> # ## Step 3: Training the model on CPU # + [markdown] id="8aea74018e6d4c26920e02a00637f4c6" # #### Prepare the model files for running on CPU: # + id="3e3692cc27324c4680410dffcec5cd73" import os DATA_DIR='/project_data/data_asset/pytorch-resnet/data' RESULT_DIR='/project_data/data_asset/pytorch-resnet/result' model_dir = f'/project_data/data_asset/pytorch-resnet/resnet' model_main = f'main.py' model_resnet = f'resnet.py' os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(RESULT_DIR, exist_ok=True) os.makedirs(model_dir, exist_ok=True) # + id="a89ecd024ebf4acf9338ea0157415a61" # %%writefile {model_dir}/{model_main} # #!/usr/bin/env python # coding: utf-8 # # Image Classification Using PyTorch Resnet with Watson Machine Learning Accelerator Notebook # This asset details the process of performing a basic computer vision image classification example using the notebook functionality within Watson Machine Learning Accelerator. In this asset, you will learn how to accelerate your training with pytorch resnet model upon the cifar10 dataset. # # Please refer to [Resnet Introduction](https://arxiv.org/abs/1512.03385) for more details. from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import datasets, transforms import torchvision.models as models #from resnet import resnet18 import time import numpy import sys import os import glob import argparse log_interval = 10 seed = 1 use_cuda = False completed_batch =0 completed_test_batch =0 criterion = nn.CrossEntropyLoss() parser = argparse.ArgumentParser(description='Tensorflow MNIST Example') parser.add_argument('--batch-size', type=int, default=32, metavar='N', help='input batch size for training (default: 128)') parser.add_argument('--epochs', type=int, default=5, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--cuda', action='store_true', default=False, help='disables CUDA training') args = parser.parse_args() print(args) # ## Create the Resnet18 model print("Use cuda: ", use_cuda) # ## Download the Cifar10 dataset # If you set download=True, the CIFAR-10 [CIFAR-10 python version](https://www.cs.toronto.edu/~kriz/cifar.html) dataset is automatically downloaded and used by the Notebook. # If you want to use a different dataset or have previously downloaded a dataset, # set download=False and specify the directory that contains the dataset # An exmpale to dowload the CIFAR-10 dataset: # > mkdir ${DATA_DIR}/cifar10 # > cd ${DATA_DIR}/cifar10 # > wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz # > tar -zxf cifar-10-python.tar.gz DATA_DIR='/project_data/data_asset/pytorch-resnet/data' RESULT_DIR='/project_data/data_asset/pytorch-resnet/result' model_dir = f'/project_data/data_asset/pytorch-resnet/resnet' def getDatasets(): train_data_dir = DATA_DIR + '/cifar10' test_data_dir = DATA_DIR + '/cifar10' transform_train = transforms.Compose([ transforms.Resize(224), #transforms.RandomCrop(self.resolution, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) return (torchvision.datasets.CIFAR10(root=train_data_dir, train=True, download=True, transform = transform_train), torchvision.datasets.CIFAR10(root=test_data_dir, train=False, download=True, transform = transform_test) ) torch.manual_seed(seed) device = torch.device("cuda" if use_cuda else "cpu") print ('device:', device) kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_dataset, test_dataset = getDatasets() train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) # ## Implement the customized train and test loop def train(model, device, train_loader, optimizer, epoch): global completed_batch train_loss = 0 correct = 0 total = 0 model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item() completed_batch += 1 print ('Train - batches : {}, average loss: {:.4f}, accuracy: {}/{} ({:.0f}%)'.format( completed_batch, train_loss/(batch_idx+1), correct, total, 100.*correct/total)) def test(model, device, test_loader, epoch): global completed_test_batch global completed_batch model.eval() test_loss = 0 correct = 0 total = 0 completed_test_batch = completed_batch - len(test_loader) with torch.no_grad(): for batch_idx, (data, target) in enumerate(test_loader): data, target = data.to(device), target.to(device) output = model(data) loss = criterion(output, target) test_loss += loss.item() # sum up batch loss _, pred = output.max(1) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() total += target.size(0) completed_test_batch += 1 test_loss /= len(test_loader.dataset) test_acc = 100. * correct / len(test_loader.dataset) # Output test info for per epoch print('Test - batches: {}, average loss: {:.4f}, accuracy: {}/{} ({:.0f}%)\n'.format( completed_batch, test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # ## Create the Resnet18 model #use_cuda = not args.no_cuda print("Use cuda: ", use_cuda) model_type = "resnet18" print("=> using pytorch build-in model '{}'".format(model_type)) model = models.resnet18() #model = models.resnet50() # Using pytorch built-in resnet18 model, the model is pre-trained on the ImageNet dataset, # which has 1000 classifications. To transfer it to cifar10 dataset, we can modify the last fully-connected layer output size to 10 for param in model.parameters(): param.requires_grad = True # set False if you only want to train the last layer using pretrained model # Replace the last fully-connected layer # Parameters of newly constructed modules have requires_grad=True by default model.fc = nn.Linear(512, 10) # (Optional) To use wmla pretrained resnet18 model for cifar10, load the model weight file. The pretrained model weight file can be downloaded [here](https://?). weightfile = DATA_DIR + "/checkpoint/model_epoch_final.pth" if os.path.exists(weightfile): print ("Initial weight file is " + weightfile) model.load_state_dict(torch.load(weightfile, map_location=lambda storage, loc: storage)) # ## Run the model trainings #print(model) model.to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0, dampening=0, weight_decay=0, nesterov=False) epochs = args.epochs scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1, last_epoch=-1) # Output total iterations info for deep learning insights print("Total iterations: %s" % (len(train_loader) * epochs)) #print("RESULT_DIR: " + os.getenv("RESULT_DIR")) #RESULT_DIR = os.getenv("RESULT_DIR") os.makedirs(RESULT_DIR, exist_ok=True) for epoch in range(1, epochs+1): print("\nRunning epoch %s ... It might take several minutes for each epoch to run." % epoch) train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader, epoch) scheduler.step() torch.save(model.state_dict(), RESULT_DIR + "/model_epoch_%d.pth"%(epoch)) torch.save(model.state_dict(), RESULT_DIR + "/model_epoch_final.pth") # + [markdown] id="4f3ad43f9efb42a68057fac608bd21c8" # ## Training results on CPU # # #### Training was run from a Cloud Pak for Data Notebook utilizing a CPU kernel. # # # In the custom environment that was created with **16vCPU** and **32GB**, it took **1560 seconds** (or approximately **26 minutes**) to complete 1 EPOCH training. # # + id="789d930b39b14945bf4fa4850b0d2475" import datetime starttime = datetime.datetime.now() # ! python /project_data/data_asset/pytorch-resnet/resnet/main.py --epochs 1 endtime = datetime.datetime.now() print("Training cost: ", (endtime - starttime).seconds, " seconds.") # + [markdown] id="bd858908a5714a46881266091e1442a1" # <a id = "gpu"></a> # ## Step 4: Training the model on GPU with Watson Machine Learning Accelerator # # #### Prepare the model files for running on GPU: # + id="b7cf774caa0c4902a2bd0719b5b955d0" import os model_dir = f'/project_data/data_asset/pytorch-resnet/resnet-wmla' model_main = f'main.py' os.makedirs(model_dir, exist_ok=True) # + id="64a1d4d199a9464aa0c589f9791eb66e" # %%writefile {model_dir}/{model_main} # #!/usr/bin/env python # coding: utf-8 # # Image Classification Using PyTorch Resnet with Watson Machine Learning Accelerator Notebook # This asset details the process of performing a basic computer vision image classification example using the notebook functionality within Watson Machine Learning Accelerator. In this asset, you will learn how to accelerate your training with pytorch resnet model upon the cifar10 dataset. # # Please refer to [Resnet Introduction](https://arxiv.org/abs/1512.03385) for more details. from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import datasets, transforms import torchvision.models as models import time import sys import os import glob import argparse log_interval = 10 seed = 1 use_cuda = False completed_batch =0 completed_test_batch =0 criterion = nn.CrossEntropyLoss() parser = argparse.ArgumentParser(description='Tensorflow MNIST Example') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='input batch size for training (default: 128)') parser.add_argument('--epochs', type=int, default=1, metavar='N', help='number of epochs to train (default: 1)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('cuda', action='store_true', default=True, help='enables CUDA training') args = parser.parse_args() print(args) # ## Create the Resnet18 model use_cuda = args.cuda print("Use cuda: ", use_cuda) # ## Download the Cifar10 dataset # If you set download=True, the CIFAR-10 [CIFAR-10 python version](https://www.cs.toronto.edu/~kriz/cifar.html) dataset is automatically downloaded and used by the Notebook. # If you want to use a different dataset or have previously downloaded a dataset, # set download=False and specify the directory that contains the dataset # An exmpale to dowload the CIFAR-10 dataset: # > mkdir ${DATA_DIR}/cifar10 # > cd ${DATA_DIR}/cifar10 # > wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz # > tar -zxf cifar-10-python.tar.gz print("DATA_DIR: " + os.getenv("DATA_DIR")) DATA_DIR = os.getenv("DATA_DIR") def getDatasets(): train_data_dir = DATA_DIR + "/cifar10" test_data_dir = DATA_DIR + "/cifar10" transform_train = transforms.Compose([ transforms.Resize(224), #transforms.RandomCrop(self.resolution, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) return (torchvision.datasets.CIFAR10(root=train_data_dir, train=True, download=True, transform = transform_train), torchvision.datasets.CIFAR10(root=test_data_dir, train=False, download=True, transform = transform_test) ) torch.manual_seed(seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_dataset, test_dataset = getDatasets() train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) # ## Implement the customized train and test loop def train(model, device, train_loader, optimizer, epoch): global completed_batch train_loss = 0 correct = 0 total = 0 model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item() completed_batch += 1 print ('Train - batches : {}, average loss: {:.4f}, accuracy: {}/{} ({:.0f}%)'.format( completed_batch, train_loss/(batch_idx+1), correct, total, 100.*correct/total)) def test(model, device, test_loader, epoch): global completed_test_batch global completed_batch model.eval() test_loss = 0 correct = 0 total = 0 completed_test_batch = completed_batch - len(test_loader) with torch.no_grad(): for batch_idx, (data, target) in enumerate(test_loader): data, target = data.to(device), target.to(device) output = model(data) loss = criterion(output, target) test_loss += loss.item() # sum up batch loss _, pred = output.max(1) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() total += target.size(0) completed_test_batch += 1 test_loss /= len(test_loader.dataset) test_acc = 100. * correct / len(test_loader.dataset) # Output test info for per epoch print('Test - batches: {}, average loss: {:.4f}, accuracy: {}/{} ({:.0f}%)\n'.format( completed_batch, test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # ## Create the Resnet18 model model_type = "resnet18" #model_type = "resnet50" print("=> using pytorch build-in model '{}'".format(model_type)) model = models.resnet18() # Using pytorch build-in resnet18 model, the model is pre-trained on the ImageNet dataset, # which has 1000 classifications. To transfer it to cifar10 dataset, we can modify the last fully-connected layer output size to 10 for param in model.parameters(): param.requires_grad = True # set False if you only want to train the last layer using pretrained model # Replace the last fully-connected layer # Parameters of newly constructed modules have requires_grad=True by default model.fc = nn.Linear(512, 10) # (Optional) To use wmla pretrained resnet18 model for cifar10, load the model weight file. The pretrained model weight file can be downloaded [here](https://?). weightfile = DATA_DIR + "/checkpoint/model_epoch_final.pth" if os.path.exists(weightfile): print ("Initial weight file is " + weightfile) model.load_state_dict(torch.load(weightfile, map_location=lambda storage, loc: storage)) # ## Run the model trainings model.to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0, dampening=0, weight_decay=0, nesterov=False) epochs = args.epochs scheduler = optim.lr_scheduler.StepLR(optimizer, 30, 0.1, last_epoch=-1) # Output total iterations info for deep learning insights print("Total iterations: %s" % (len(train_loader) * epochs)) print("RESULT_DIR: " + os.getenv("RESULT_DIR")) RESULT_DIR = os.getenv("RESULT_DIR") os.makedirs(RESULT_DIR, exist_ok=True) for epoch in range(1, epochs+1): print("\nRunning epoch %s ... It might take several minutes for each epoch to run." % epoch) train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader, epoch) scheduler.step() torch.save(model.state_dict(), RESULT_DIR + "/model/model_epoch_%d.pth"%(epoch)) torch.save(model.state_dict(), RESULT_DIR + "/model/model_epoch_final.pth") # + [markdown] id="512529be85064a589196e893c24cc1e8" # ## Training results on GPU # # #### Training was run from a Cloud Pak for Data Notebook utilizing a GPU kernel. # # # In the custom environment that was created with **16vCPU** and **32GB**, it took **147seconds** (or approximately **2.5 minutes**) to complete 1 EPOCH training. # # + id="9b428dc3dcef44bf94f7afe59e68f5d8" files = {'file': open('/project_data/data_asset/pytorch-resnet/resnet-wmla/main.py', 'rb')} args = '--exec-start PyTorch --cs-datastore-meta type=fs \ --workerDeviceNum 1 \ --model-main main.py --epochs 1' # + id="12744932929149de91df0af4d20bfd50" starttime = datetime.datetime.now() r = requests.post(dl_rest_url+'/execs?args='+args, files=files, headers=commonHeaders, verify=False) if not r.ok: print('submit job failed: code=%s, %s'%(r.status_code, r.content)) job_status = query_job_status(r.json(),refresh_rate=5) endtime = datetime.datetime.now() print("\nTraining cost: ", (endtime - starttime).seconds, " seconds.") # + [markdown] id="4cdf692a2d9740209a8484619f409a0a" # ## Training metrics and logs # # #### Retrieve and display the model training metrics: # + id="28bd037ad580460aa3545c65c19c47dc" query_train_metric(r.json()) # + [markdown] id="132ffd64d79c4b81837a565291dc67da" # #### Retrieve and display the model training logs: # + id="87c7cd6ac5d64afa8947783fb5f854e5" query_executor_stdout_log(r.json()) # + [markdown] id="2e6b41ad92f648798297104d2c3ad558" # ## Download trained model from Watson Machine Learning Accelerator # + id="d6f15e89a33948ea872a5e8499312ddd" download_trained_model(r.json()) # + id="d54fd7cf1f2549b38b72c11a1e5293ae"
dli-learning-path/tutorials-cpd-wmla/Accelerate_deep_learning_model_with_WMLA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Piecewise exponential models and creating custom models # # This section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-decreasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is: # # $$ S(t) = \exp\left(-H(t)\right)$$ # # Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: # # $$h(t) = \frac{dH(t)}{dt}$$ # # which has the immediate relation to the survival function: # # $$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$ # # Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute than integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about versus if we define the hazard and ask questions about the other two. # # First, let's revisit some simpler parametric models. # # #### The Exponential model # # Recall that the Exponential model has a constant hazard, that is: # # $$ h(t) = \frac{1}{\lambda} $$ # # which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data. # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' from matplotlib import pyplot as plt import numpy as np import pandas as pd from lifelines.datasets import load_waltons waltons = load_waltons() T, E = waltons['T'], waltons['E'] # + from lifelines import ExponentialFitter fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4)) epf = ExponentialFitter().fit(T, E) epf.plot_hazard(ax=ax[0]) epf.plot_cumulative_hazard(ax=ax[1]) ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard") epf.print_summary(3) # - # This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the Exponential's lack of fit is very obvious. # + from lifelines import NelsonAalenFitter ax = epf.plot(figsize=(8,5)) naf = NelsonAalenFitter().fit(T, E) ax = naf.plot(ax=ax) plt.legend() # - # It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. # # #### Piecewise Exponential models # # What if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as: # # $$ # h(t) = \begin{cases} # \lambda_0, & \text{if $t \le \tau_0$} \\ # \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ # \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ # ... # \end{cases} # $$ # # This model should be flexible enough to fit better to our dataset. # # The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. # # For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$. # # + from lifelines import PiecewiseExponentialFitter # looking at the above plot, I think there may be breaks at t=40 and t=60. pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E) fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4)) ax = pf.plot(ax=axs[1]) pf.plot_hazard(ax=axs[0]) ax = naf.plot(ax=ax, ci_show=False) axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard") pf.print_summary(3) # - # We can see a much better fit in this model. A quantitative measure of fit is to compare the log-likelihood between exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. # # #### Univarite models in _lifelines_ # # I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented: # # ```python # class ExponentialFitter(ParametericUnivariateFitter): # # _fitted_parameter_names = ["lambda_"] # # def _cumulative_hazard(self, params, times): # lambda_ = params[0] # return times / lambda_ # ``` # # We only need to specify the cumulative hazard function because of the 1:1:1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. # # # #### Defining our own survival models # # # To show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form. # # $$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$ # # We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. # # **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below. # + from lifelines.fitters import ParametericUnivariateFitter import autograd.numpy as np class InverseTimeHazardFitter(ParametericUnivariateFitter): # we tell the model what we want the names of the unknown parameters to be _fitted_parameter_names = ['alpha_'] # this is the only function we need to define. It always takes two arguments: # params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names # times: a vector of times that will be passed in. def _cumulative_hazard(self, params, times): alpha = params[0] return alpha /(80 - times) # + itf = InverseTimeHazardFitter() itf.fit(T, E) itf.print_summary() ax = itf.plot(figsize=(8,5)) ax = naf.plot(ax=ax, ci_show=False) plt.legend() # - # The best fit of the model to the data is: # # $$H_1(t) = \frac{21.51}{80-t}$$ # # Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter: # # $$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$ # # If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems. class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter): _fitted_parameter_names = ['alpha_', 'beta_'] # Sequence of (min, max) pairs for each element in x. None is used to specify no bound _bounds = [(0, None), (75.0001, None)] def _cumulative_hazard(self, params, times): alpha, beta = params return alpha / (beta - times) # + two_f = TwoParamInverseTimeHazardFitter() two_f.fit(T, E) two_f.print_summary() ax = itf.plot(ci_show=False, figsize=(8,5)) ax = naf.plot(ax=ax, ci_show=False) two_f.plot(ax=ax) plt.legend() # - # From the output, we see that the value of 76.55 is the suggested asymptote, that is: # # $$H_2(t) = \frac{16.50} {76.55 - t}$$ # # The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. # # $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$ # # + from lifelines.fitters import ParametericUnivariateFitter class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter): _fitted_parameter_names = ['alpha_', 'beta_', 'gamma_'] _bounds = [(0, None), (75.0001, None), (0, None)] # this is the only function we need to define. It always takes two arguments: # params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names # times: a numpy vector of times that will be passed in by the optimizer def _cumulative_hazard(self, params, times): a, b, c = params return a / (b - times) ** c # + three_f = ThreeParamInverseTimeHazardFitter() three_f.fit(T, E) three_f.print_summary() ax = itf.plot(ci_show=False, figsize=(8,5)) ax = naf.plot(ax=ax, ci_show=False) ax = two_f.plot(ax=ax, ci_show=False) ax = three_f.plot(ax=ax) plt.legend() # - # Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. # # Why fit parametric models anyways? Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? # # 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. # # 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ # # 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity. # + fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True) new_timeline = np.arange(0, 85) three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline) three_f.plot_hazard(label='hazard', ax=axs[0]).legend() three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend() three_f.plot_survival_function(label='survival function', ax=axs[2]).legend() fig.subplots_adjust(hspace=0) # Hide x labels and tick labels for all but bottom plot. for ax in axs: ax.label_outer() # - # ### 3-parameter Weibull distribution # # We can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter: # # $$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$ # # (When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like: # + import autograd.numpy as np from autograd.scipy.stats import norm # I'm shifting this to exaggerate the effect T = T + 10 class ThreeParameterWeibullFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_", "rho_", "theta_"] _bounds = [(0, None), (0, None), (0, T.min()-0.001)] def _cumulative_hazard(self, params, times): lambda_, rho_, theta_ = params return ((times - theta_) / lambda_) ** rho_ # - tpw = ThreeParameterWeibullFitter() tpw.fit(T, E) tpw.print_summary() ax = tpw.plot_cumulative_hazard(figsize=(8,5)) ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False) # ### Inverse Gaussian distribution # # The inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution) # + from autograd.scipy.stats import norm class InverseGaussianFitter(ParametericUnivariateFitter): _fitted_parameter_names = ['lambda_', 'mu_'] def _cumulative_density(self, params, times): mu_, lambda_ = params v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \ np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1) return v def _cumulative_hazard(self, params, times): return -np.log(1-self._cumulative_density(params, times)) # + from lifelines.datasets import load_rossi rossi = load_rossi() igf = InverseGaussianFitter() igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500)) igf.print_summary() igf.plot_hazard() # - # ### Bounded lifetimes using the beta distribution # # Maybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't. n = 100 T = 5 * np.random.random(n)**2 T_censor = 10 * np.random.random(n)**2 E = T < T_censor T_obs = np.minimum(T, T_censor) # + from autograd_gamma import betainc class BetaFitter(ParametericUnivariateFitter): _fitted_parameter_names = ['alpha_', 'beta_', "m_"] _bounds = [(0, None), (0, None), (T.max(), None)] def _cumulative_density(self, params, times): alpha_, beta_, m_ = params return betainc(alpha_, beta_, times / m_) def _cumulative_hazard(self, params, times): return -np.log(1-self._cumulative_density(params, times)) # - beta_fitter = BetaFitter().fit(T_obs, E) beta_fitter.plot() beta_fitter.print_summary() # ### Gompertz class GompertzFitter(ParametericUnivariateFitter): # this parameterization is slightly different than wikipedia. _fitted_parameter_names = ['nu_', 'b_'] def _cumulative_hazard(self, params, times): nu_, b_ = params return nu_ * (np.expm1(times / b_)) ggf = GompertzFitter() ggf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 120)) ggf.print_summary() ggf.plot_survival_function() # ## Discrete survival models # # So far we have only been investigating continous time survival models, where times can take on any positive value. If we want to consider discrete survival times (for example, over the positive integers), we need to make a small adjustment. With discrete survival models, there is a slightly more complicated relationship between the hazard and cumulative hazard. This is because there are two ways to define the cumulative hazard. # # $$H_1(t) = \sum_i^t h(t_i) $$ # # $$H_2(t) = -\log(S(t))$$ # # We also no longer have the relationship that $h(t) = \frac{d H(t)}{dt}$, since $t$ is no longer continous. Instead, depending on which verion of the cumulative hazard you choose to use (inference will be the same), we have to redefine the hazard function in *lifelines*. # # $$ h(t) = H_1(t) - H_1(t-1) $$ # $$ h(t) = 1 - \exp(H_2(t) - H_2(t+1)) $$ # # [Here is an example](https://stats.stackexchange.com/questions/417303/what-is-the-likelihood-for-this-process) of a discrete survival model, that may not look like a survival model at first, where we use a redefined `_hazard` function. # # Looking for more examples of what you can build? See other unique survival models in the docs on [time-lagged survival](Modelling time-lagged conversion rates.ipynb)
examples/Piecewise Exponential Models and Creating Custom Models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4: Populations # In our last assignment, we used systematic experimentation to figure out how and why our hillclimber was working -- paying particular attention to how we modify selection pressure, and how that affects search trajectories over fitness landscapes of varying ruggedness. # # In this assignment, we'll continue to build out the basic backbone of our evolutionary algorithm, while applying the same lens of systematic investigation and exploration around implementation details. In particular, we'll expand our single-parent/single-child hillclimber into a full population, and explore how crossover and selection manifest themselves in our algorithm. # + # imports import numpy as np import copy import matplotlib.pyplot as plt plt.style.use('seaborn') import scikits.bootstrap as bootstrap import warnings warnings.filterwarnings('ignore') # Danger, <NAME>! (not a scalable hack, and may surpress other helpful warning other than for ill-conditioned bootstrapped CI distributions) import scipy.stats # for finding statistical significance import random # - # ### Q1: Implementing Individuals within a Population # As we beging to work with populations, it will get a increasingly messy to keep track of each individual's genome and fitness seperately as they move around the population and through generational time. To help simplify this, let's implement each individual within a population as an instance of an `Individual` class. To start, this class will be quite simple and will just be an object which has attributes for both the individual's `genome` and its `fitness`. Since we will only be using fitness functions that depend on a single individual in this assignment, let's also implement an `eval_fitness` for each individual that will evaluate and update its stored fitness value when called. class Individual: def __init__(self, fitness_function, bit_string_length): self.genome = [] for i in range(bit_string_length): self.genome.append(random.randint(0,1)) self.fitness_function = fitness_function self.fitness = self.eval_fitness() def eval_fitness(self): self.fitness = self.fitness_function(self.genome) # ### Q2: Modifying the hillclimber # Let's take the basic hillclimber from our last assignment and turn it into a full fleged evolutionary algorithm. Again, please feel free to leverage your prior work (or our prior solution sets) and copy-and-paste liberally. # # In particular, our first version of this algorithm will have a number of parents and a number of children given as parameters (a la evolutionary strategies), two-point crossover (of randomly selected parents), and truncation selection. Please also include arguemtns to this evolutionary_algorithm function which allow you dictate whether the algorithm will use mutation (the same single bit flip we used before), crossover, or both (for use in the following question). # # To get a finer-grain look at convergence rates of these different approaches, let's also modify the output of this function to return the fitness of the top individual at each generation. def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, bit_string_length=10, num_elements_to_mutate=1, crossover=True): """ Evolutinary Algorithm (copied from the basic hillclimber in our last assignment) parameters: fitness_function: (callable function) that return the fitness of a genome given the genome as an input parameter (e.g. as defined in Landscape) total_generations: (int) number of total iterations for stopping condition num_parents: (int) the number of parents we downselect to at each generation (mu) num_children: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda) bit_string_length: (int) length of bit string genome to be evoloved num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation) crossover (bool): whether to perform crossover when generating children returns: fitness_over_time: (numpy array) track record of the top fitness value at each generation """ # initialize record keeping best = {'fitness':0, 'genome': []} fitness_over_time = [] best_over_time = [] # the initialization proceedure parents = [] children = [] for i in range(num_parents): parents.append({'fitness': 0, 'genome': []}) for j in range(bit_string_length): parents[i]['genome'].append(random.randint(0,1)) # get population fitness for i in range(num_parents): parents[i]['fitness'] = fitness_function(parents[i]['genome']) # if parents[i]['fitness'] > best['fitness']: # best = parents[i].copy() for i in range(total_generations): # repeat # the modification procedure children = [] # inheretance if not crossover: children = parents.copy() # crossover if crossover: while len(children)<num_children: random.shuffle(parents) for j in range(0, num_parents, 2): # select both parents first_parent = parents[j] second_parent = parents[j+1] # randomly select a point for crossover crossover_points = random.sample(range(0,bit_string_length), 2) crossover_points = sorted(crossover_points) # switch genes between these two points between the two individuals child1 = {'fitness': 0, 'genome': parents[j]['genome'][:crossover_points[0]] + parents[j+1]['genome'][crossover_points[0]:crossover_points[1]] + parents[j]['genome'][crossover_points[1]:]} child2 = {'fitness': 0, 'genome': parents[j+1]['genome'][:crossover_points[0]] + parents[j]['genome'][crossover_points[0]:crossover_points[1]] + parents[j+1]['genome'][crossover_points[1]:]} # Do we only keep the best of the parents and children? child1['fitness'] = fitness_function(child1['genome']) child2['fitness'] = fitness_function(child2['genome']) family = [child1, child2, parents[j].copy(), parents[j+1].copy()] family = sorted(family, key=lambda x: x['fitness'], reverse=True) children.append(family[0]) children.append(family[1]) # Or do we just keep the children no matter what # children.append(child1) # children.append(child2) if len(children) > num_children: children = children[:num_children] # mutation if num_elements_to_mutate > 0: # loop through each child for j in range(num_children): # store the current genome for comparison current = children[j].copy() # set fitness value back to zero since this will change children[j]['fitness'] = 0 # create storage array for elements to mutate elements_mutated = [] elements_mutated = random.sample(range(0,bit_string_length), num_elements_to_mutate) # loop through the array of indices to be mutated for k in range(len(elements_mutated)): # set the bit to the opposite of what it currently is if children[j]['genome'][elements_mutated[k]] == 0: children[j]['genome'][elements_mutated[k]] = 1 else: children[j]['genome'][elements_mutated[k]] = 0 # Do we only keep the good mutations? # set the child to the best of original form vs mutated form if current['fitness'] > fitness_function(children[j]['genome']): children[j] = current.copy() # or do we keep the mutation no matter what? # children[j] = children[j] # the assessement procedure for j in range(num_children): if children[j]['fitness'] == 0: children[j]['fitness'] = fitness_function(children[j]['genome']) # selection procedure parents = children.copy() parents = sorted(parents, key=lambda x: x['fitness'], reverse=True) parents = parents[:num_parents] # record keeping fitness_over_time.append(parents[0]['fitness']) best_over_time.append(parents[0].copy()) if (parents[0]['fitness']) > best['fitness']: best = parents[0].copy() return fitness_over_time # ### Q3: Running Experiments # Similar to last week, let's systemtically run and plot the results. To start let's use `50` parents (mu) and `50` children (lambda). For simplicity, let's go back to the one-max problem (and normalize the fitness, using `np.mean` isntead of `np.sum` for our fitness function in case we want to make comparisons across different genome lengths -- though for now, let's start with a bit string genome of length `200`). # # Also taking pieces from your experimental comparison scripts from last week, please run this for the the case of mutation only, crossover only, and employing both mutation and crossover. Run `20` independent repitions for each condition. # + num_runs = 20 total_generations = 100 num_elements_to_mutate = 1 bit_string_length = 200 num_parents = 50 num_children = 50 fitness_function = np.mean experiment_results = {} experiment_results['mutation_only'] = [] experiment_results['crossover_only'] = [] experiment_results['crossover_and_mutation'] = [] for i in range(num_runs): # mutation only m_only = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, False) experiment_results['mutation_only'].append(m_only) # crossover only c_only = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, 0, True) experiment_results['crossover_only'].append(c_only) # both crossover and mutation c_and_m = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, True) experiment_results['crossover_and_mutation'].append(c_and_m) # - # ### Q3b: Visualization # We will also modify our plotting scripts from before to show how fitness increases over generational time across these three treatments (with boostrapped confidence intervals as before). As we also did previously, please plot the three experimental conditions run above on the same figure for ease of comparisons. # + def plot_mean_and_bootstrapped_ci_over_time(input_data = None, name = "change me", x_label = "change me", y_label="change me", y_limit = None): """ parameters: input_data: (numpy array of shape (generations, num_repitions)) solution metric to plot name: (string) name for legend x_label: (string) x axis label y_label: (string) y axis label returns: None """ generations = input_data.shape[0] CIs = [] mean_values = [] for i in range(generations): mean_values.append(np.mean(input_data[i])) CIs.append(bootstrap.ci(input_data[i], statfunction=np.mean)) mean_values=np.array(mean_values) print(CIs) high = [] low = [] for i in range(len(CIs)): low.append(CIs[i][0]) high.append(CIs[i][1]) low = np.array(low) high = np.array(high) fig, ax = plt.subplots() y = range(0, generations) ax.plot(y, mean_values, label=name) ax.fill_between(y, high, low, color='b', alpha=.2) ax.set_xlabel(x_label) ax.set_ylabel(y_label) ax.legend() if (name) and len(name)>0: ax.set_title(name) plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['mutation_only'])), name='mutation_only', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['crossover_only'])), name='crossover_only', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['crossover_and_mutation'])), name='crossover_and_mutation', y_label="Fitness", x_label='Generations') # - # ### Q4: Analysis of Crossover # Is crossover effective on this problem? How does crossover compare to mutation? How do the two interact? # **Crossover is effective depending on implementation. If we only allow positive crossover, where we keep the two best family members (between 2 parents and 2 offspring), then we get a highly effective algorithm. If, on the other hand, we keep negative or positive, we get some jumping around regarding fitness on this problem. This is because crossover makes rather large changes compared to a rather small mutation rate. Crossover can jump much farther in the fitness landscape than mutations. Mutations only does show a positive effect if we keep only positive mutations and do not allow negative ones. The reason that this strategy with both crossover and mutations works well for this problem is due to the simplicity of the fitness landscape. Crossover and mutation can interact together by building upon one another toward a more fit solution. # The results above are obtained by only allowing positive crossover and positive mutation. This can change drastically when allowing both positive and negative crossover and/or mutation. Crossover makes the fitness bounce around pretty drastically in this case, and mutations make the fitness stay rather close to it's original value but still bouncing around on a smaller scale.** # ### Q5: Propose and Implement a New Crossover Modification # We've implemented one specfic type of crossover (two-point crossover with two randomly chosen parents). What other variatons to crossover might you consider? Describe it in the box below, and what you anticipate as the effects of it (positive or negative). # **I propose uniform crossover only allowing positive children. Uniform crossover involves traversing over each gene in the genome and with some probability choosing the first parent's or second parent's allele for it. In this case, we will choose each parent's allele with a 50% probability. I anticipate that the effects will be similar to what we see above. My reasoning is that since we are only allowing positive children, we will have a very similar trajectory depending on how lucky/unlucky we are with our probabilities on choosing the better allele.** # ### Q5b: Let's test it! # Copy your evoluationary_algoirthm code and modify it to include your new experimental treatment. Run and visualize this treatment as above. Feel free to alo pull in any statistical test scripts/functions from last week, should that help you to analyze and compare this new approach. # your new evolutionary_algorithm def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, bit_string_length=10, num_elements_to_mutate=1, crossover=True): """ Evolutinary Algorithm (copied from the basic hillclimber in our last assignment) parameters: fitness_function: (callable function) that return the fitness of a genome given the genome as an input parameter (e.g. as defined in Landscape) total_generations: (int) number of total iterations for stopping condition num_parents: (int) the number of parents we downselect to at each generation (mu) num_children: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda) bit_string_length: (int) length of bit string genome to be evoloved num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation) crossover (bool): whether to perform crossover when generating children returns: fitness_over_time: (numpy array) track record of the top fitness value at each generation """ # initialize record keeping best = {'fitness':0, 'genome': []} fitness_over_time = [] best_over_time = [] # the initialization proceedure parents = [] children = [] for i in range(num_parents): parents.append({'fitness': 0, 'genome': []}) for j in range(bit_string_length): parents[i]['genome'].append(random.randint(0,1)) # get population fitness for i in range(num_parents): parents[i]['fitness'] = fitness_function(parents[i]['genome']) # if parents[i]['fitness'] > best['fitness']: # best = parents[i].copy() for i in range(total_generations): # repeat # the modification procedure children = [] # inheretance if not crossover: children = parents.copy() # crossover if crossover: while len(children)<num_children: random.shuffle(parents) for j in range(0, num_parents, 2): # select both parents first_parent = parents[j] second_parent = parents[j+1] # randomly select a point for crossover crossover_points = random.sample(range(0,bit_string_length), 2) crossover_points = sorted(crossover_points) # switch the tail end of the genomes at this point between the two individuals child1 = {'fitness': 0, 'genome': []} child2 = {'fitness': 0, 'genome': []} for k in range(bit_string_length): child1['genome'].append(parents[random.randint(j,j+1)]['genome'][k]) child2['genome'].append(parents[random.randint(j,j+1)]['genome'][k]) child1['fitness'] = fitness_function(child1['genome']) child2['fitness'] = fitness_function(child2['genome']) # Do we only keep the best of the parents and children? family = [child1, child2, parents[j].copy(), parents[j+1].copy()] family = sorted(family, key=lambda x: x['fitness'], reverse=True) children.append(family[0]) children.append(family[1]) # Or do we just keep the children no matter what # children.append(child1) # children.append(child2) if len(children) > num_children: children = children[:num_children] # mutation if num_elements_to_mutate > 0: # loop through each child for j in range(num_children): # store the current genome for comparison current = children[j].copy() # set fitness value back to zero since this will change children[j]['fitness'] = 0 # create storage array for elements to mutate elements_mutated = [] elements_mutated = random.sample(range(0,bit_string_length), num_elements_to_mutate) # loop through the array of indices to be mutated for k in range(len(elements_mutated)): # set the bit to the opposite of what it currently is if children[j]['genome'][elements_mutated[k]] == 0: children[j]['genome'][elements_mutated[k]] = 1 else: children[j]['genome'][elements_mutated[k]] = 0 # Do we only keep the good mutations? # set the child to the best of original form vs mutated form if current['fitness'] > fitness_function(children[j]['genome']): children[j] = current.copy() # or do we keep the mutation no matter what? # children[j] = children[j] # the assessement procedure for j in range(num_children): if children[j]['fitness'] == 0: children[j]['fitness'] = fitness_function(children[j]['genome']) # selection procedure parents = children.copy() parents = sorted(parents, key=lambda x: x['fitness'], reverse=True) parents = parents[:num_parents] # record keeping fitness_over_time.append(parents[0]['fitness']) best_over_time.append(parents[0].copy()) if (parents[0]['fitness']) > best['fitness']: best = parents[0].copy() return fitness_over_time # + # experimentation # num_runs = 20 # total_generations = 100 # num_elements_to_mutate = 1 # bit_string_length = 200 # num_parents = 50 # num_children = 50 # fitness_function = np.mean # experiment_results = {} experiment_results['mutation_only'] = [] experiment_results['crossover_only'] = [] experiment_results['crossover_and_mutation'] = [] for i in range(num_runs): # mutation only m_only = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, False) experiment_results['mutation_only'].append(m_only) # crossover only c_only = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, 0, True) experiment_results['crossover_only'].append(c_only) # both crossover and mutation c_and_m = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, True) experiment_results['crossover_and_mutation'].append(c_and_m) # - # visualization plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['mutation_only'])), name='mutation_only', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['crossover_only'])), name='crossover_only', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['crossover_and_mutation'])), name='crossover_and_mutation', y_label="Fitness", x_label='Generations') # ### Q6: Well... What happened? # Describe the effect of your approach. If it did not work out as expected, please hypotheize as to why this is the case. If it did work out well, please comment on how broadly you think this finding might apply (or in what experimental conditions you might expect to come to a different conclusion). # **This worked out better than I thought it would. Crossover alone was able to find the optimal solution after about 50 generations. This means that the uniform crossover worked very well in maximizing sum of the bits. This makes sense to me now since I am choosing the best of the family members, not just the best of the children in my crossover loop. Meaning we have a decent chance to make fairly large jumps in our fitness landscape compared to 2-point crossover. This is because potentially, uniform crossover can produce a bit string of all ones on the first try, whereas, this isn't even a possibility for something like 2-point crossover in most cases. 2-point crossover can only work with the alleles that exist within the two parents that it is looking at. It will also only change the genes between the two random points it has chosen. This makes the uniform crossover a bit more exploratory in this regard, meaning that sometimes it will find better solutions. This fitness landscape may have been an advatageous place for positive uniform crossover because of it's simplicity.** # ### Q7: Implementing Tournament Selection # Aside from crossover, including populations also gives us the opportunity to explore alternate selection mechanisms. As mentioned in class, tournament selection is one of my go-to methods for parent selection, so let's implement it here. The tournament should rely on input parameters such as the `tournament_size` to determine how many solutions will compete in a given tournament or `num_tournament_winners` to determine how many individuals from each tournament will be selected to move on as parents of the next generation. Touraments can be selected from the population with or without replacement (specifically I'm referring to making sure all individuals appear in at least one tournament before any individual partakes in one for a second time), and here feel free to use whichever version is simpler for you to implement and understand (which I expect will be the case with replacement). def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, bit_string_length=10, num_elements_to_mutate=1, crossover=True, tournament_size=4, num_tournament_winners=2): """ Evolutinary Algorithm (copied from the basic hillclimber in our last assignment) parameters: fitness_funciton: (callable function) that return the fitness of a genome given the genome as an input parameter (e.g. as defined in Landscape) total_generations: (int) number of total iterations for stopping condition num_parents: (int) the number of parents we downselect to at each generation (mu) num_childre: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda) bit_string_length: (int) length of bit string genome to be evoloved num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation) crossover: (bool) whether to perform crossover when generating children tournament_size: (int) number of individuals competing in each tournament num_tournament_winners: (int) number of individuals selected as future parents from each tournament (must be less than tournament_size) returns: fitness_over_time: (numpy array) track record of the top fitness value at each generation """ # initialize record keeping best = {'fitness':0, 'genome': []} fitness_over_time = [] best_over_time = [] # the initialization proceedure parents = [] children = [] for i in range(num_parents): parents.append({'fitness': 0, 'genome': []}) for j in range(bit_string_length): parents[i]['genome'].append(random.randint(0,1)) # get population fitness for i in range(num_parents): parents[i]['fitness'] = fitness_function(parents[i]['genome']) # if parents[i]['fitness'] > best['fitness']: # best = parents[i].copy() for i in range(total_generations): # repeat # the modification procedure children = [] # inheretance if not crossover: children = parents.copy() # crossover if crossover: while len(children)<num_children: random.shuffle(parents) for j in range(0, num_parents, 2): # select both parents first_parent = parents[j] second_parent = parents[j+1] # randomly select a point for crossover crossover_points = random.sample(range(0,bit_string_length), 2) crossover_points = sorted(crossover_points) # switch genes between these two points between the two individuals child1 = {'fitness': 0, 'genome': parents[j]['genome'][:crossover_points[0]] + parents[j+1]['genome'][crossover_points[0]:crossover_points[1]] + parents[j]['genome'][crossover_points[1]:]} child2 = {'fitness': 0, 'genome': parents[j+1]['genome'][:crossover_points[0]] + parents[j]['genome'][crossover_points[0]:crossover_points[1]] + parents[j+1]['genome'][crossover_points[1]:]} child1['fitness'] = fitness_function(child1['genome']) child2['fitness'] = fitness_function(child2['genome']) # Do we only keep the best of the parents and children? family = [child1, child2, parents[j].copy(), parents[j+1].copy()] family = sorted(family, key=lambda x: x['fitness'], reverse=True) children.append(family[0]) children.append(family[1]) # Or do we just keep the children no matter what # children.append(child1) # children.append(child2) if len(children) > num_children: children = children[:num_children] # mutation if num_elements_to_mutate > 0: # loop through each child for j in range(num_children): # store the current genome for comparison current = children[j].copy() # set fitness value back to zero since this will change children[j]['fitness'] = 0 # create storage array for elements to mutate elements_mutated = [] elements_mutated = random.sample(range(0,bit_string_length), num_elements_to_mutate) # loop through the array of indices to be mutated for k in range(len(elements_mutated)): # set the bit to the opposite of what it currently is if children[j]['genome'][elements_mutated[k]] == 0: children[j]['genome'][elements_mutated[k]] = 1 else: children[j]['genome'][elements_mutated[k]] = 0 # Do we only keep the good mutations? # set the child to the best of original form vs mutated form if current['fitness'] > fitness_function(children[j]['genome']): children[j] = current.copy() # or do we keep the mutation no matter what? # children[j] = children[j] # the assessement procedure for j in range(num_children): if children[j]['fitness'] == 0: children[j]['fitness'] = fitness_function(children[j]['genome']) # selection procedure parents = children.copy() random.shuffle(parents) new_parents = [] # loop for the number of parents that we want while len(new_parents) < num_parents: random.shuffle(parents) for j in range(0, num_parents, tournament_size): winners = [] for k in range(0, tournament_size): if len(parents) <= j+k: winners.append(parents[random.randint(0,len(parents)-1)]) else: winners.append(parents[j+k]) winners = sorted(winners, key=lambda x: x['fitness'], reverse=True) winners = winners[:num_tournament_winners] new_parents += winners parents = sorted(new_parents, key=lambda x: x['fitness'], reverse=True)[:num_parents] # record keeping fitness_over_time.append(parents[0]['fitness']) best_over_time.append(parents[0].copy()) if (parents[0]['fitness']) > best['fitness']: best = parents[0].copy() return fitness_over_time # ### Q8: Run and Plot # We discussed in class that the number of individuals participating in a tournament affects the amount of selection pressure it produces, presumably the same is true for the number of individuals selected from that tournament. So let's play around and generate some data to try and get to the bottom of it! In particular, let's run the following four experimental conditions: `10 select 5`, `20 select 10`, `20 select 5`, `50 select 10` (where the first number is how many individuals are in a tournament, and the second number is how many are selected from that tournament). Let's run these on the full-fledged evolutionary_algorithm including both mutation and crossover (for consistency and ease of grading please the original evolutionary algorithm implementation from `Q2` rather than your new implementation in `Q5` by either rerunning the prior code block, or by coping and pasting it in a new code block below). As above, please visualize the resulting fitnes over time and their boostrapped confidence intervals as well. # if wanting to copy the original evolutionary_algorithm implementation here (e.g. so you can run Kernel -> Restart & Run All without having to manually rerun the block above within that) ... # + num_runs = 20 total_generations = 100 num_elements_to_mutate = 1 bit_string_length = 200 num_parents = 50 num_children = 50 fitness_function = np.mean experiment_results['10_select_5'] = [] experiment_results['20_select_10'] = [] experiment_results['50_select_10'] = [] for i in range(num_runs): # 10 select 5 ten_select_5 = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, True, 10, 5) experiment_results['10_select_5'].append(ten_select_5) # 20 select 10 twenty_select_10 = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, True, 20, 10) experiment_results['20_select_10'].append(twenty_select_10) # both crossover and mutation fifty_select_10 = evolutionary_algorithm(fitness_function, total_generations, num_parents, num_children, bit_string_length, num_elements_to_mutate, True, 50, 10) experiment_results['50_select_10'].append(fifty_select_10) # - # plotting plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['10_select_5'])), name='10_select_5', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['20_select_10'])), name='20_select_10', y_label="Fitness", x_label='Generations') plot_mean_and_bootstrapped_ci_over_time(input_data=np.transpose(np.array(experiment_results['50_select_10'])), name='50_select_10', y_label="Fitness", x_label='Generations') # ### Q9: Analysis # What do these results suggest matter about the values of the tournnament size and the number of winners selected? Is this suprising? # **The smaller tournament size and selection seems to do the best as far as accuracy goes. I would think that keeping the tournament size smaller would result in more variation as it seems to do here with the confidence intervals showing a bit more exploratory approach. this could be due to the chances that lower fitness genomes are compared to one another is higher if there aren't as many comparisons being made. Having the tournament size larger means that more items are compared to one another giving a higher chance of being compared to a better performing genome. Having the winners be a smaller fraction of the tournament increases selection pressure and exploitation as well. If we have more individuals being kept from a tournament there is a higher chance of keeping lower performing individuals that are exploring a different space in the landscape. ** # ### Q10: Future Work # Again, we've just scratched the tip of the iceberg in terms of understanding or efficiently employing populations in evolutionary algorithms. If you were to run one more experiment here (i.e. another question in this assignment) what would you test next? If you were to deeply investigate some phenomenon around populations/selection/crossover (i.e. spend 6 weeks on a course project) what broader topic might you dig into? # **I would want to play around with the fitness landscape / problem space. I think that having such a simple fitness landscape makes it difficult to understand the effects of these different selection and variation techniques. We can see some of this if we look back at the last assignment. If I was to deeply investigate some phenomenon herein for a number of weeks, I think I would want to investigate selection and/or crossover when applied to a dynamic fitness landscape. What might be the best technique(s) to use in a scenario where our solutions are coevolving along with the fitness landscape. Should we use a dynamic technique for the dynamic landscapes? Is there a "best" way to go about it? Does it depend on the coevolution that is happening?** # ### Congratulations, you made it to the end! # Nice work -- and hopefully you're starting to get the hang of these! # # Please save this file as a .ipynb, and also download it as a .pdf, uploading **both** to blackboard to complete this assignment. # # For your submission, please make sure that you have renamed this file (and that the resulting pdf follows suit) to replce `[netid]` with your UVM netid. This will greatly simplify our grading pipeline, and make sure that you receive credit for your work. # #### Academic Integrity Attribution # During this assignment I collaborated with: # **Just me**
assignment_4/Assignment_4_Populations_jdonova6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="d7a71f50-2d95-5715-7c97-8e4baf788bfb" # # Analysis of the MNIST dataset # # # # In this analysis we will apply the following methods to the MNIST classification problem: # # 1) Random forest classification # # 2) Principal component analysis (PCA) + k-nearest neighbours (kNN) # + _cell_guid="b79fc28f-94bc-43f8-8aa6-71315abc8adf" # load the modules import numpy as np import pandas as pd import seaborn as sb sb.set_style("dark") import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.decomposition import PCA # %pylab inline # + _cell_guid="8bcd0340-39fa-6b3c-9b8d-3e9a18f079d1" # We use this function in order to evaulate a classifier. It trains on a fraction of the data corresponding to # aplit_ratio, and evaulates on the rest of the data def evaluate_classifier(clf, data, target, split_ratio): trainX, testX, trainY, testY = train_test_split(data, target, train_size=split_ratio, random_state=0) clf.fit(trainX, trainY) return clf.score(testX,testY) # + _cell_guid="a747b6e0-cb46-7b34-8f29-092ef30733cb" # read in the data train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') target = train["label"] train = train.drop("label",1) # + _cell_guid="f121719e-4dfc-1dec-f071-639c7a22a944" # plot some of the numbers figure(figsize(5,5)) for digit_num in range(0,64): subplot(8,8,digit_num+1) grid_data = train.iloc[digit_num].as_matrix().reshape(28,28) # reshape from 1d to 2d pixel array plt.imshow(grid_data, interpolation = "none", cmap = "bone_r") xticks([]) yticks([]) # + _cell_guid="bdc541b8-abb8-3507-483c-ae9940dfb9b2" # check performance of random forest classifier, as function of number of estimators # here we only take 1000 data points to train n_estimators_array = np.array([1,5,10,50,100,200,500]) n_samples = 10 n_grid = len(n_estimators_array) score_array_mu =np.zeros(n_grid) score_array_sigma = np.zeros(n_grid) j=0 for n_estimators in n_estimators_array: score_array=np.zeros(n_samples) for i in range(0,n_samples): clf = RandomForestClassifier(n_estimators = n_estimators, n_jobs=1, criterion="gini") score_array[i] = evaluate_classifier(clf, train.iloc[0:1000], target.iloc[0:1000], 0.8) score_array_mu[j], score_array_sigma[j] = mean(score_array), std(score_array) j=j+1 # + _cell_guid="26b82ddd-10cc-47d5-0c10-bfef4b60b9f1" # it looks like the performace saturates around 50-100 estimators figure(figsize(7,3)) errorbar(n_estimators_array, score_array_mu, yerr=score_array_sigma, fmt='k.-') xscale("log") xlabel("number of estimators",size = 20) ylabel("accuracy",size = 20) xlim(0.9,600) grid(which="both") # + [markdown] _cell_guid="d22b7510-3c48-5b5e-90bc-6e5d8294fb2d" # Are there any feature that are particularly important? We can check this using clf.feature_importances: # + _cell_guid="d4801307-c5fe-f96f-66a3-fc2db8ac7e1e" importances = clf.feature_importances_ indices = np.argsort(importances)[::-1] print("Feature ranking:") for f in range(0,10): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances figure(figsize(7,3)) plot(indices[:],importances[indices[:]],'k.') yscale("log") xlabel("feature",size=20) ylabel("importance",size=20) # + [markdown] _cell_guid="3427c56a-7620-22dc-909a-2cf2fa821ff5" # It looks like there are no significantly important features (i.e., pixels) in the original data. Next, let us try to decompose the data using a principal component analysis (PCA): # + _cell_guid="ee4f8e1c-ccc3-193c-1763-e1fd1d47852b" pca = PCA(n_components=2) pca.fit(train) transform = pca.transform(train) figure(figsize(6,5)) plt.scatter(transform[:,0],transform[:,1], s=20, c = target, cmap = "nipy_spectral", edgecolor = "None") plt.colorbar() clim(0,9) xlabel("PC1") ylabel("PC2") # + [markdown] _cell_guid="6c302666-9ba8-18d9-1129-cc45ec68f715" # It is interesting to see how well PCA separates the feature space into visible clusters already for 2 components. Next, let's look at what happens if we increase the number of components in PCA. In particular, we would like to know how many components are needed to capture most of the variance in the data. For this we will use the pca.explained_variance_ratio function. # + _cell_guid="627aeeab-eddd-1aba-d19a-537e74c453c6" n_components_array=([1,2,3,4,5,10,20,50,100,200,500]) vr = np.zeros(len(n_components_array)) i=0; for n_components in n_components_array: pca = PCA(n_components=n_components) pca.fit(train) vr[i] = sum(pca.explained_variance_ratio_) i=i+1 # + _cell_guid="b737cb19-bcaa-cd24-37b8-6ddb3e3ee684" figure(figsize(8,4)) plot(n_components_array,vr,'k.-') xscale("log") ylim(9e-2,1.1) yticks(linspace(0.2,1.0,9)) xlim(0.9) grid(which="both") xlabel("number of PCA components",size=20) ylabel("variance ratio",size=20) # + [markdown] _cell_guid="695af730-5d79-7c8c-97cb-a1fb2f8068bc" # We see that ~100 PCA components are needed to capture ~90% of the variance in the data. This seems a lot of components. Maybe the more important question is: How good is our prediction as a function of number of components? Let's look at this next. We will train a kNN classifier on the PCA output. # + _cell_guid="cc65a69f-7073-b937-d975-88c98b5c66d2" clf = KNeighborsClassifier() n_components_array=([1,2,3,4,5,10,20,50,100,200,500]) score_array = np.zeros(len(n_components_array)) i=0 for n_components in n_components_array: pca = PCA(n_components=n_components) pca.fit(train) transform = pca.transform(train.iloc[0:1000]) score_array[i] = evaluate_classifier(clf, transform, target.iloc[0:1000], 0.8) i=i+1 # + _cell_guid="68e04bb4-c783-4cba-cd6e-b3db3a5a279a" figure(figsize(8,4)) plot(n_components_array,score_array,'k.-') xscale('log') xlabel("number of PCA components", size=20) ylabel("accuracy", size=20) grid(which="both") # + [markdown] _cell_guid="73193bdb-b039-e0d6-6db4-689799382f53" # The accuracy seems to saturate at ~90% (roughly matching the performance of the random forest classifier) for >~20 PCA components. In fact, the accuracy even seems to drop for much larger numbers, even though a larger number of PCA components captures more of the variance in the data, as seen in the plot above. The drop in accuracy is probably due to overfitting. # # Finally, we will train on the whole training set and prepare a submit file for the Kaggle competition. # + _cell_guid="ed495d33-8254-4058-ef27-dae03ccf4d30" # PCA + kNN pca = PCA(n_components=50) pca.fit(train) transform_train = pca.transform(train) transform_test = pca.transform(test) clf = KNeighborsClassifier() clf.fit(transform_train, target) results=clf.predict(transform_test) # prepare submit file np.savetxt('results.csv', np.c_[range(1,len(test)+1),results], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d') # Kaggle score 0.97343 # + [markdown] _cell_guid="b2aea948-1727-e33c-040a-57e1e749ccc8" # This gives a score on Kaggle of 0.97343 - not too bad! Using the random forest classifier: # + _cell_guid="d441f1ae-7ba7-9001-23b4-c72533568aec" # random forest classification clf = RandomForestClassifier(n_estimators = 100, n_jobs=1, criterion="gini") clf.fit(train, target) results=clf.predict(test) # prepare submit file np.savetxt('results.csv', np.c_[range(1,len(test)+1),results], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d') # Kaggle score ~0.96 # + [markdown] _cell_guid="6d476c2d-e855-0ca5-c444-649ba49af635" # This gives a slightly worse score (0.96). # # Any feedback on my analysis is more than welcome!
2 digit recognizer/comparing-random-forest-pca-and-knn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import HyperGBM `make_experiment` utility and get it's docstring from hypergbm import make_experiment # ?make_experiment
hypergbm/examples/00.get-doc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + text = """<NAME> 1258 yılında Söğüt’te doğdu. Osman Bey 1 Ağustos 1326’da Bursa’da hayatını kaybetmiştir.1281 yılında Osman Bey 23 yaşında iken Ahi teşkilatından olan Şeyh Edebali’nin kızı Malhun Hatun ile evlendi.Bu evlilikten daha sonra Osmanlı Devleti’nin başına geçecek olan Orhan Gazi doğdu.1281 yılında Osman Beyin babası Ertuğrul Bey 90 yaşında vefat etmiştir.1326’da Osman Bey, Bursa’yı kuşattı. Fakat Osman beyin rahatsızlanması üzerine kuşatmaya Orhan Bey devam etti. Bursa alındıktan sonra başkent yapılmıştır.Osman Gazi son yıllarında yaşının ilerlemesi ve gut hastalığı yüzünden beylik idaresini oğlu olan Orhan Bey'e bırakmıştı.Osmanlı Beyliğinin ilk fethettiği ada İmralı Adasıdır. İmralı Adası 1308 yılında Osman Bey tarafından alınmıştır.İlk Osmanlı parası Osman Bey tarafından bakır olarak akçe adı ile 1324 yılında bastırılmıştır.Osmanlı Beyliğinin ilk başkenti Söğüttür.Osmanlı tarihinde ilk savaş, 1284 yılında Bizans tekfurlarıyla yapılan Ermeni Beli savaşıdır.Osman Beyin ele geçirdiği ilk kale 1285 yılında fethedilen Kolca Hisar Kalesi’dir.Osmanlı beyliğinin ilk kadısı <NAME> döneminde atanan Dursun Fakih’tir.Osman Bey 1288 yılında Karacahisarı fethetti. Osman Bey 1299 yılında Bilecik'i fethetti.<NAME>, babası Ertuğrul Gazi'den yaklaşık 4.800 kilometrekare olarak devraldığı Osmanlı toprağını oğlu Orhan Gazi'ye 16.000 kilometrekare olarak devretmiştir.<NAME>'in vefatı sonrası yerine Orhan Bey geçti.""" word = "<NAME>" def find_word_starting_index(word,text): result = text.find(word) print(result) return result # - x = find_word_starting_index(word,text) text[x:] text[717:]
enelpi/py_example_det_index/find_index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Apple stock 'Close' value prediction import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas import datetime import math, time import itertools from sklearn import preprocessing import datetime from operator import itemgetter from sklearn.metrics import mean_squared_error from math import sqrt from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.layers.recurrent import LSTM # ## Stock data function configured to drop all columns except 'Open','High' and 'Close' def get_stock_data(stock_name, normalized=0): col_names = ['Date','Open','High','Low','Close','Volume'] stocks = pd.read_csv("apple.csv", header=0, names=col_names) df = pd.DataFrame(stocks) df.drop(df.columns[[0,3,5]], axis=1, inplace=True) return df # ## Loading Apple stock data for the last 3 years stock_name = 'apple' df = get_stock_data(stock_name,0) df.tail() df['High'] = df['High'] / 1000 df['Open'] = df['Open'] / 1000 df['Close'] = df['Close'] / 1000 df.head(5) # ## Reshape data & dividing it into data set and testing set # ## configure to accept 3 features. # ## Returns training data set, training labels, testing data set, testing labels def load_data(stock, seq_len): amount_of_features = len(stock.columns) data = stock.values#pd.DataFrame(stock) sequence_length = seq_len + 1 result = [] for index in range(len(data) - sequence_length): result.append(data[index: index + sequence_length]) result = np.array(result) row = round(0.9 * result.shape[0]) train = result[:int(row), :] x_train = train[:, :-1] y_train = train[:, -1][:,-1] x_test = result[int(row):, :-1] y_test = result[int(row):, -1][:,-1] x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], amount_of_features)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], amount_of_features)) return [x_train, y_train, x_test, y_test] # ## Building model functions def build_model(layers): d = 0.2 model = Sequential() #units: Positive integer, dimensionality of the output space. #return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. # input_shape = input_dim & input_length #return_sequences=True means this layer output is always fed into the next layer model.add(LSTM(128, input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(d)) model.add(LSTM(64, input_shape=(layers[1], layers[0]), return_sequences=False)) #return_sequences=False since output is going to be fed to the next layer at the end of the sequence as a prediction vector model.add(Dropout(d)) model.add(Dense(16,kernel_initializer='uniform',activation='relu')) #to aggregate the data model.add(Dense(1,kernel_initializer='uniform',activation='relu')) model.compile(loss='mse',optimizer='adam',metrics=['accuracy']) #mean squared error loss function & optimizer adam return model def build_modelrmsprop(layers): d = 0.2 model = Sequential() model.add(LSTM(128, input_shape=(layers[1], layers[0]), return_sequences=True)) model.add(Dropout(d)) model.add(LSTM(64, input_shape=(layers[1], layers[0]), return_sequences=False)) model.add(Dropout(d)) model.add(Dense(16,kernel_initializer='uniform',activation='relu')) model.add(Dense(1,kernel_initializer='uniform',activation='relu')) model.compile(loss='mse',optimizer='rmsprop',metrics=['accuracy']) return model # ## Setting X and Y for training and testing window = 5 X_train, y_train, X_test, y_test = load_data(df[::-1], window)# all items in the array, reversed print("X_train", X_train.shape) print("y_train", y_train.shape) print("X_test", X_test.shape) print("y_test", y_test.shape) # ## Loading the model sequence structure model = build_model([3,window,1]) modelrmsprop = build_modelrmsprop([3,window,1]) # ## Executing the model & RMS/RMSE results # ## root mean square error model.fit( X_train, y_train, batch_size=512, epochs=500, validation_split=0.1, verbose=0) modelrmsprop.fit( X_train, y_train, batch_size=512, epochs=500, validation_split=0.1, verbose=0) # + trainScore = model.evaluate(X_train, y_train, verbose=0) print('Train Score: MSE = ' , trainScore[0], ' RMSE = ' , math.sqrt(trainScore[0])) testScore = model.evaluate(X_test, y_test, verbose=0) print('Test Score: MSE = ' , testScore[0], ' RMSE = ' , math.sqrt(testScore[0])) # - # # TEST THE MODEL p = model.predict(X_test)*1000 y_test *= 1000 pRmsprop = modelrmsprop.predict(X_test)*1000 # ## Predictions vs Real results # + import matplotlib.pyplot as plt2 plt2.plot(pRmsprop,color='red', label='rmsprop prediction') plt2.plot(p,color='green', label='adam prediction') plt2.plot(y_test,color='blue', label='Real Data') plt2.legend(loc='upper left') plt2.show() # -
Original Project-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Load the packages and create the dataframe from the CSV in the Resouces folder # # + # Dependencies and Setup import pandas as pd # File to Load (Remember to Change These) file_to_load = "Resources/purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(file_to_load) purchase_data.head(10) # - # ## Player Count # * Display the total number of players # total = purchase_data["SN"].nunique() print(f'Total Number of Players: {total}') # ## Purchasing Analysis (Total) # * Run basic calculations to obtain number of unique items, average price, etc. # # # * Create a summary data frame to hold the results # # # * Give the displayed data cleaner formatting # # # * Display the summary data frame # # + #Find the number of unique items nuniqueitems=purchase_data["Item ID"].nunique() #Find the Avg price avgprc=purchase_data["Price"].mean() #Find the total Purchase Count numpurch=purchase_data["Purchase ID"].nunique() #Calculate the Total Revenue totrev=purchase_data["Price"].sum() #Create a data frame from each table sumdf=pd.DataFrame({"Number of Unique Items":[nuniqueitems],"Average Price":[avgprc],"Number of Purchases":[numpurch] , "Total Revenue":[totrev]}) #Format the dataframe sumdf["Average Price"]=sumdf["Average Price"].map("${:.2f}".format) sumdf["Total Revenue"]=sumdf["Total Revenue"].map("${:.2f}".format) #Print it below sumdf # - # ## Gender Demographics # * Percentage and Count of Male Players # # # * Percentage and Count of Female Players # # # * Percentage and Count of Other / Non-Disclosed # # # # + #Getting the counts and percentages in tables demo_df=purchase_data.groupby(["Gender"]).nunique() demo_df_tot=demo_df["SN"].sum() demo_df=demo_df["SN"] demo_pct=(demo_df/demo_df_tot) # Merging, formatting, ordering, display demo_sum=pd.merge(demo_df, demo_pct, on='Gender') demo_sum=demo_sum.rename(columns={"SN_x":"Total Count","SN_y":"Percentage of Players"}) demo_sum["Percentage of Players"]=demo_sum["Percentage of Players"].map("{:.2%}".format) demo_sum=demo_sum.sort_values("Total Count", ascending=False) demo_sum # - # # ## Purchasing Analysis (Gender) # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender # # # * Create a summary data frame to hold the results # # # * Give the displayed data cleaner formatting # # # * Display the summary data frame # + # Get the demographics total purchase counts demo_purch=purchase_data.groupby(["Gender"]).nunique() demo_purch=demo_purch["Purchase ID"] # Creat a table for each calculation demo_avgprc=purchase_data.groupby(["Gender"]).mean() demo_avgprc=demo_avgprc["Price"] demo_totpur=purchase_data.groupby(["Gender"]).sum() demo_totpur=demo_totpur["Price"] tot_pur_per_person=purchase_data.groupby(["SN","Gender"]).sum() avg_tot_per_per_demo=tot_pur_per_person.groupby("Gender").mean() avg_tot_per_per_demo=avg_tot_per_per_demo["Price"] # Join them together for the final dataframe demo_sum_purch=pd.merge(demo_purch, demo_avgprc, on='Gender') demo_sum_purch=pd.merge(demo_sum_purch, demo_totpur, on='Gender') demo_sum_purch=pd.merge(demo_sum_purch, avg_tot_per_per_demo, on='Gender') #Format the dataframe demo_sum_purch=demo_sum_purch.rename(columns={"Purchase ID":"Purchase Count","Price_x":"Average Purchase Price" , "Price_y":"Total Purchase Value","Price":"Avg Total Purchase per Person"}) demo_sum_purch["Average Purchase Price"]=demo_sum_purch["Average Purchase Price"].map("${:.2f}".format) demo_sum_purch["Total Purchase Value"]=demo_sum_purch["Total Purchase Value"].map("${:.2f}".format) demo_sum_purch["Avg Total Purchase per Person"]=demo_sum_purch["Avg Total Purchase per Person"].map("${:.2f}".format) demo_sum_purch=demo_sum_purch.sort_values("Purchase Count", ascending=False) #Display the dataframe demo_sum_purch # - # ## Age Demographics # * Establish bins for ages # # # * Categorize the existing players using the age bins. Hint: use pd.cut() # # # * Calculate the numbers and percentages by age group # # # * Create a summary data frame to hold the results # # # * Display Age Demographics Table # # + # Create the bins and group names bins = [0, 9, 14, 19, 24, 29,34,39, 150] grp_names=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"] # Add the groups to the dataframe purchase_data["Groups"]=pd.cut(purchase_data["Age"], bins, labels=grp_names, include_lowest=True) #Get the total for the % calculation tot_people=purchase_data["SN"].nunique() #Created a percentage table age_demo=purchase_data.groupby(["Groups"]).nunique() age_demo=age_demo["SN"] age_demo_pct=(age_demo/tot_people) age_demo_pct # Merged them together and formated the dataframe summary=pd.merge(age_demo, age_demo_pct, on='Groups') summary=summary.rename(columns={"SN_x":"Total Count","SN_y":"Percentage of Players"}) summary["Percentage of Players"]=summary["Percentage of Players"].map("{:.2%}".format) # Display the results summary # - # ## Purchasing Analysis (Age) # * Bin the purchase_data data frame by age # # # * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below # # # * Create a summary data frame to hold the results # # # * Give the displayed data cleaner formatting # # # * Display the summary data frame # + # Create the bins and group names bins = [0, 9, 14, 19, 24, 29,34,39, 150] grp_names=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"] # Apply the groups to the original data frame purchase_data["Groups"]=pd.cut(purchase_data["Age"], bins, labels=grp_names, include_lowest=True) # Calculate the different columns by table pur_cnt=purchase_data.groupby(["Groups"]).nunique() pur_cnt=pur_cnt["Purchase ID"] grp_avgprc=purchase_data.groupby(["Groups"]).mean() grp_avgprc=grp_avgprc["Price"] grp_totprc=purchase_data.groupby(["Groups"]).sum() grp_totprc=grp_totprc["Price"] tot_pur_per_pergrp=purchase_data.groupby(["SN","Groups"]).sum() avg_tot_per_per_grp=tot_pur_per_pergrp.groupby("Groups").mean() avg_tot_per_per_grp=avg_tot_per_per_grp["Price"] # Merge the tables together to form the final dataframe grp_sum_purch=pd.merge(pur_cnt, grp_avgprc, on='Groups') grp_sum_purch=pd.merge(grp_sum_purch, grp_totprc, on='Groups') grp_sum_purch=pd.merge(grp_sum_purch, avg_tot_per_per_grp, on='Groups') # Format the data appropriately grp_sum_purch=grp_sum_purch.rename(columns={"Purchase ID":"Purchase Count","Price_x":"Average Purchase Price" , "Price_y":"Total Purchase Value","Price":"Avg Total Purchase per Person"}) grp_sum_purch["Average Purchase Price"]=grp_sum_purch["Average Purchase Price"].map("${:.2f}".format) grp_sum_purch["Total Purchase Value"]=grp_sum_purch["Total Purchase Value"].map("${:.2f}".format) grp_sum_purch["Avg Total Purchase per Person"]=grp_sum_purch["Avg Total Purchase per Person"].map("${:.2f}".format) # Display the summary data frame grp_sum_purch # - # ## Top Spenders # * Run basic calculations to obtain the results in the table below # # # * Create a summary data frame to hold the results # # # * Sort the total purchase value column in descending order # # # * Give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + # Get the purchase count by SN, sorted to get the highest Purchase Count SN_purch=purchase_data.groupby(["SN"]).nunique() SN_purch=SN_purch.sort_values("Purchase ID", ascending=False) SN_purch=SN_purch["Purchase ID"] # tables for each calculation SN_mean=purchase_data.groupby(["SN"]).mean() SN_mean=SN_mean["Price"] SN_tot=purchase_data.groupby(["SN"]).sum() SN_tot=SN_tot["Price"] # Merge the tables to the final dataframe SN_sum_purch=pd.merge(SN_purch, SN_mean, on='SN') SN_sum_purch=pd.merge(SN_sum_purch, SN_tot, on='SN') # Rename and sort the table, must sort first as formatting affects the ability to use the numbers as numbers SN_sum_purch=SN_sum_purch.rename(columns={"Purchase ID":"Purchase Count","Price_x":"Average Purchase Price" , "Price_y":"Total Purchase Value"}) SN_sum_purch=SN_sum_purch.sort_values("Total Purchase Value", ascending=False) # Format numbers SN_sum_purch["Average Purchase Price"]=SN_sum_purch["Average Purchase Price"].map("${:.2f}".format) SN_sum_purch["Total Purchase Value"]=SN_sum_purch["Total Purchase Value"].map("${:.2f}".format) # Display the summary data frame, using the .head function as the data is sorted to capture the top 5 with this SN_sum_purch.head(5) # - # ## Most Popular Items # * Retrieve the Item ID, Item Name, and Item Price columns # # # * Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value # # # * Create a summary data frame to hold the results # # # * Sort the purchase count column in descending order # # # * Give the displayed data cleaner formatting # # # * Display a preview of the summary data frame # # # + # Get the Purchase count by Item ID, sorted descending to start the table ItemID_purch=purchase_data.groupby(["Item ID"]).nunique() ItemID_purch=ItemID_purch.sort_values("Purchase ID", ascending=False) ItemID_purch=ItemID_purch["Purchase ID"] # Created a table for each calculation ItemID_name=purchase_data.groupby(["Item Name","Item ID"]).count() ItemID_name=ItemID_name.reset_index() # reset the index so that I can use the columns ItemID_name=ItemID_name[["Item Name","Item ID"]] ItemID_name.sort_values("Item ID", ascending=False) ItemID_mean=purchase_data.groupby(["Item ID"]).mean() ItemID_mean=ItemID_mean["Price"] ItemID_tot=purchase_data.groupby(["Item ID"]).sum() ItemID_tot=ItemID_tot["Price"] ItemID_tot # Merge the tables in one dataframe ItemID_purch=pd.merge(ItemID_purch, ItemID_name, on='Item ID') ItemID_sum_purch=pd.merge(ItemID_purch, ItemID_mean, on='Item ID') ItemID_sum_purch=pd.merge(ItemID_sum_purch, ItemID_tot, on='Item ID') # Rename the columns ItemID_sum_purch=ItemID_sum_purch.rename(columns={"Purchase ID":"Purchase Count","Price_x":"Average Purchase Price" , "Price_y":"Total Purchase Value"}) # Sort by Purchase Count descending ItemID_sum_purch=ItemID_sum_purch.sort_values("Purchase Count", ascending=False) # Setting the index to the Item ID and Item Name ItemID_sum_purch.set_index(['Item ID','Item Name'], inplace=True) # Formatting the appropriate columns ItemID_sum_purch["Average Purchase Price"]=ItemID_sum_purch["Average Purchase Price"].map("${:.2f}".format) ItemID_sum_purch["Total Purchase Value"]=ItemID_sum_purch["Total Purchase Value"].map("${:.2f}".format) # Display the top 5 using head as the data is sorted appropriately ItemID_sum_purch.head(5) # - # ## Most Profitable Items # * Sort the above table by total purchase value in descending order # # # * Optional: give the displayed data cleaner formatting # # # * Display a preview of the data frame # # # + # Recreate the table above to not have the formatting done as I need to sort on it ItemID_purch2=pd.merge(ItemID_purch, ItemID_name, on='Item ID') ItemID_sum_purch2=pd.merge(ItemID_purch, ItemID_mean, on='Item ID') ItemID_sum_purch2=pd.merge(ItemID_sum_purch2, ItemID_tot, on='Item ID') # Rename the columns ItemID_sum_purch2=ItemID_sum_purch2.rename(columns={"Purchase ID":"Purchase Count","Price_x":"Average Purchase Price" , "Price_y":"Total Purchase Value"}) #Changed to a float so I can sort on the values ItemID_sum_purch2['Total Purchase Value'] = ItemID_sum_purch2['Total Purchase Value'].astype('float64') # Sort by Total Purchase Count descending ItemID_sum_purch2=ItemID_sum_purch2.sort_values("Total Purchase Value", ascending=False) # Setting the index to the Item ID and Item Name ItemID_sum_purch2.set_index(['Item ID','Item Name'], inplace=True) #Display top 5 using head as the data is sorted correctly ItemID_sum_purch2.head(5) # - # # Three Observable trends from all of this analysis # 1. Women spend $0.40 more per person on average. # 2. The 35-39 age group is willing to spend the most on average and average per person. # 3. The ten years or younger age group is able to get their parents to spend the second highest amount on overall average and per person. #
HeroesOfPymoli/.ipynb_checkpoints/HeroesOfPymoli_main-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # SQLAlchemy, Sqlite, and Dates # + [markdown] slideshow={"slide_type": "slide"} # ## Setup # - import matplotlib from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import pandas as pd # + slideshow={"slide_type": "subslide"} # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect, func # + slideshow={"slide_type": "subslide"} engine = create_engine("sqlite:///../Resources/dow.sqlite", echo=False) # + slideshow={"slide_type": "subslide"} engine.execute('SELECT * FROM dow LIMIT 5').fetchall() # - inspector = inspect(engine) columns = inspector.get_columns('dow') for c in columns: print(c['name'], c["type"]) # + [markdown] slideshow={"slide_type": "slide"} # ## Reflect and query dates # + slideshow={"slide_type": "subslide"} # Reflect Database into ORM class Base = automap_base() Base.prepare(engine, reflect=True) Dow = Base.classes.dow # + slideshow={"slide_type": "fragment"} session = Session(engine) # + [markdown] slideshow={"slide_type": "subslide"} # ## Analysis # - # Analyze the Average prices (open, high, low, close) for all stocks in the Month of May # + nbgrader={"grade": false, "grade_id": "cell-a5033f5b305f26d6", "locked": false, "schema_version": 1, "solution": true} # Query for the stock and average prices (open, high, low, close) # for all stock in the month of May # Sort the result by stock name sel = [Dow.stock, func.avg(Dow.open_price), func.avg(Dow.high_price), func.avg(Dow.low_price), func.avg(Dow.close_price)] may_averages = session.query(*sel).\ filter(func.strftime("%m", Dow.date) == "05").\ group_by(Dow.stock).\ order_by(Dow.stock).all() may_averages # + nbgrader={"grade": false, "grade_id": "cell-8f14868d3f8f3f50", "locked": false, "schema_version": 1, "solution": true} # Plot the Results in a Matplotlib bar chart df = pd.DataFrame(may_averages, columns=['stock', 'open_avg', 'high_avg', 'low_avg', 'close_avg']) df.set_index('stock', inplace=True) df.plot.bar() plt.tight_layout() plt.show() # - # ### Bonus # Calculate the high-low peak-to-peak (PTP) values for `IBM` stock after `2011-05-31`. # * Note: high-low PTP is calculated using `high_price` - `low_price` # * Use a DateTime.date object in the query filter # * Use a list comprehension or numpy's ravel method to unpack the query's list of tuples into a list of PTP values. # * Use matplotlib to plot the PTP values as a boxplot # + nbgrader={"grade": false, "grade_id": "cell-91ca80cc05b37ea7", "locked": false, "schema_version": 1, "solution": true} slideshow={"slide_type": "fragment"} # Design a query to calculate the PTP for stock `IBM` after May, 2011 import datetime as dt import numpy as np date = dt.datetime(2011, 5, 31) results = session.query(Dow.high_price - Dow.low_price).\ filter(Dow.date > date).filter(Dow.stock == 'IBM').all() ptps = list(np.ravel(results)) # List Comprehension Solution # ptps = [result[0] for result in results] ptps # + nbgrader={"grade": false, "grade_id": "cell-d0b673b76e86e46e", "locked": false, "schema_version": 1, "solution": true} # Load the query into a dataframe, set the index to the date, and plot the ptps import numpy as np fig, ax = plt.subplots() x = range(len(ptps)) ax.boxplot(ptps, patch_artist=True) ax.set_title('IBM PTPs') fig.tight_layout() plt.show()
Stu_Dates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import chess import numpy as np class StateBoard(): def __init__(self, board=None): if board is None: self.board = chess.Board() else: self.board = board self.BOARD_SIZE = 64 self.serial_vals = {'P': 1, 'N': 2, 'B': 3, 'R': 4, 'Q': 5, 'K': 6, 'p': 11, 'n': 12, 'b': 13, 'r': 14, 'q': 16, 'k': 17} def serialize(self): assert self.board.is_valid() == True state = np.zeros(self.BOARD_SIZE,dtype=np.uint8) for i in range(self.BOARD_SIZE): aPiece = self.board.piece_at(i) if aPiece is not None: state[i] = self.serial_vals[aPiece.symbol()] if self.board.has_kingside_castling_rights(chess.WHITE): assert state[chess.H1] == self.serial_vals['R'] state[chess.H1] = 28 if self.board.has_queenside_castling_rights(chess.WHITE): assert state[chess.A1] == self.serial_vals['R'] state[chess.A1] = 21 if self.board.has_kingside_castling_rights(chess.BLACK): assert state[chess.H8] == self.serial_vals['r'] state[chess.H8] = 38 if self.board.has_queenside_castling_rights(chess.BLACK): assert state[chess.A8] == self.serial_vals['r'] state[chess.H8] = 31 if self.board.ep_square is not None: assert state[self.board.ep_square] == 0 state[self.board.ep_square] = 41 state = state.reshape(8,8) binary_state = np.zeros((5,8,8)) binary_state[0] = (state>>3)&1 binary_state[1] = (state>>2)&1 binary_state[2] = (state>>1)&1 binary_state[3] = (state>>0)&1 binary_state[4] = (self.board.turn*1.0) return binary_state def moves(self): return list(self.board.legal_moves) def state_features(self): feature_list = [self.board.board_fen(),self.board.turn, self.board.castling_rights, self.board.ep_square] return info_list print(StateBoard().serialize()) print(StateBoard(board=chess.Board("rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2")).serialize())
BoardState.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:batchscoringdl] # language: python # name: conda-env-batchscoringdl-py # --- # # Neural Transfer # # This code is largely based off the work by __<NAME>__ `<https://alexis-jacq.github.io>`, and is the implementation of the paper: _Image Style Transfer Using Convolutional Neural Networks_ `<https://arxiv.org/avs/1508.06576>` developed by __<NAME>__, __<NAME>__, and __<NAME>__. # Use this notebook to interactively apply style transfer onto a set of images. # + from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from PIL import Image import matplotlib.pyplot as plt import torchvision.transforms as transforms import torchvision.models as models import torchvision.utils as util from torch.utils.data import DataLoader import copy import os # - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ## Define Paths to Images # + # location of style image STYLE_IMAGE = "images/style_images/sample_renior.jpg" # location of directory of content images CONTENT_IMAGE_DIR = "./images/sample_content_images" # (optional) Choose specific images in CONTENT_IMAGE_DIR. Will use ALL images in the CONTENT_IMAGE_DIR if None. CONTENT_IMAGE_LIST = ["sample_3.jpg"] # location of output directory (make sure this directory exists, create it if it doesn't) OUTPUT_DIR = "./images/sample_output_images" # - assert os.path.exists(STYLE_IMAGE) assert os.path.exists(CONTENT_IMAGE_DIR) assert os.path.exists(OUTPUT_DIR) assert os.path.isdir(CONTENT_IMAGE_DIR) assert os.path.isdir(OUTPUT_DIR) for image_file in CONTENT_IMAGE_LIST: assert os.path.exists(os.path.join(CONTENT_IMAGE_DIR, image_file)) # ## Define Variables to Tune # + # Content & Style weights STYLE_WEIGHT = 10**8 CONTENT_WEIGHT = 10**0 # steps to create image NUM_STEPS = 80 # image size IMAGE_SIZE = 360 # - # ## Define content dataset class ContentDataset(torch.utils.data.Dataset): ''' This class is used to manage the content images. ''' def __init__(self, root_dir, files=None, transform=None): ''' Args: root_dir (string): dir with all the images files ([string], optional): array of all the images to use in root_dir if not specified, use all images in root_dir transform (callable, optional): Optional transform to be applied on a sample ''' self.root_dir = root_dir self.files = [f for f in (os.listdir(root_dir) if files is None else files) \ if os.path.isfile(os.path.join(root_dir, f))] self.transform = transform def __len__(self): ''' Return total number of files in the dataset ''' return len(self.files) def __getitem__(self, idx): ''' Get the image and image name of the i-th image in the dataset ''' img_name = self.files[idx] img = Image.open(os.path.join(self.root_dir, img_name)) if self.transform: img = self.transform(img) return img, img_name # ## Define classes for Style transfer # + class StyleLoss(nn.Module): def __init__(self, target_feature): super(StyleLoss, self).__init__() self.target = self._gram_matrix(target_feature).detach() def forward(self, input): G = self._gram_matrix(input) self.loss = F.mse_loss(G, self.target) return input def _gram_matrix(self, input): a, b, c, d = input.size() # a=batch size(=1) # b=number of feature maps # (c,d)=dimensions of a f. map (N=c*d) features = input.view(a * b, c * d) # resise F_XL into \hat F_XL G = torch.mm(features, features.t()) # compute the gram product # we 'normalize' the values of the gram matrix # by dividing by the number of element in each feature maps. return G.div(a * b * c * d) class ContentLoss(nn.Module): def __init__(self, target,): super(ContentLoss, self).__init__() # we 'detach' the target content from the tree used # to dynamically compute the gradient: this is a stated value, # not a variable. Otherwise the forward method of the criterion # will throw an error. self.target = target.detach() def forward(self, input): self.loss = F.mse_loss(input, self.target) return input # create a module to normalize input image so we can easily put it in a # nn.Sequential class Normalization(nn.Module): def __init__(self, mean, std): super(Normalization, self).__init__() # .view the mean and std to make them [C x 1 x 1] so that they can # directly work with image Tensor of shape [B x C x H x W]. # B is batch size. C is number of channels. H is height and W is width. self.mean = torch.tensor(mean).view(-1, 1, 1) self.std = torch.tensor(std).view(-1, 1, 1) def forward(self, img): # normalize img return (img - self.mean) / self.std # - # ## Define Functions # + # desired depth layers to compute style/content losses: def get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img, content_layers=['conv_4'], style_layers=['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']): cnn = copy.deepcopy(cnn) # normalization module normalization = Normalization(normalization_mean, normalization_std).to(device) # just in order to have an iterable access to or list of content/syle # losses content_losses = [] style_losses = [] # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential # to put in modules that are supposed to be activated sequentially model = nn.Sequential(normalization) i = 0 # increment every time we see a conv for layer in cnn.children(): if isinstance(layer, nn.Conv2d): i += 1 name = 'conv_{}'.format(i) elif isinstance(layer, nn.ReLU): name = 'relu_{}'.format(i) # The in-place version doesn't play very nicely with the ContentLoss # and StyleLoss we insert below. So we replace with out-of-place # ones here. layer = nn.ReLU(inplace=False) elif isinstance(layer, nn.MaxPool2d): name = 'pool_{}'.format(i) elif isinstance(layer, nn.BatchNorm2d): name = 'bn_{}'.format(i) else: raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) model.add_module(name, layer) if name in content_layers: # add content loss: target = model(content_img).detach() content_loss = ContentLoss(target) model.add_module("content_loss_{}".format(i), content_loss) content_losses.append(content_loss) if name in style_layers: # add style loss: target_feature = model(style_img).detach() style_loss = StyleLoss(target_feature) model.add_module("style_loss_{}".format(i), style_loss) style_losses.append(style_loss) # now we trim off the layers after the last content and style losses for i in range(len(model) - 1, -1, -1): if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): break model = model[:(i + 1)] return model, style_losses, content_losses def get_input_optimizer(input_img): # this line to show that input is a parameter that requires a gradient optimizer = optim.LBFGS([input_img.requires_grad_()], lr=1) return optimizer def run_style_transfer(cnn, normalization_mean, normalization_std, content_img, style_img, input_img, num_steps=300, style_weight=1000000, content_weight=1, tmp_dir=False, log_grain=None): """ Run the style transfer. This function takes a optional tmp_dir parameter. If this parameter is specified, then intermediate results (at every `log_gain` steps) will be saved to the location specified in the parameter. If the parameter is set to False, then no intermediate results will be saved to disk. """ print('Building the style transfer model..') model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img) optimizer = get_input_optimizer(input_img) print('Optimizing..') run = [0] # save content & style loss for each step content_loss = [] style_loss = [] total_loss = [] while run[0] <= num_steps: def closure(): # correct the values of updated input image input_img.data.clamp_(0, 1) optimizer.zero_grad() model(input_img) style_score = 0 content_score = 0 for sl in style_losses: style_score += sl.loss for cl in content_losses: content_score += cl.loss style_score *= style_weight content_score *= content_weight loss = style_score + content_score loss.backward() run[0] += 1 if log_grain: if run[0] % log_grain == 0: print('run #{} - Style Loss : {:4f} Content Loss: {:4f}'.format( run, style_score.item(), content_score.item())) # save tmp folder if tmp_dir: if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) util.save_image(input_img, '{0}/tmp_{1:0>4}.jpg'.format(tmp_dir, run[0])) # save content & style loss content_loss.append(content_score.item()) style_loss.append(style_score.item()) total_loss.append(content_score.item() + style_score.item()) return style_score + content_score optimizer.step(closure) # a last correction... input_img.data.clamp_(0, 1) return input_img, content_loss, style_loss, total_loss # - # ## Main Process: Load content/style images & apply style transfer # Setup image transformations loader = transforms.Compose([ transforms.Resize(IMAGE_SIZE), # scale imported image transforms.CenterCrop(IMAGE_SIZE), # crop on center transforms.ToTensor()]) # transform it into a torch tensor # Setup content image loader content_img_set = ContentDataset(root_dir=CONTENT_IMAGE_DIR, files=CONTENT_IMAGE_LIST, transform=loader) content_img_loader = DataLoader(content_img_set, batch_size=1, shuffle=False, num_workers=1) # Load style image style_img = loader(Image.open(STYLE_IMAGE)).unsqueeze(0).to(device, torch.float) # Run style transfer # + # %%time # use vgg19 cnn = models.vgg19(pretrained=True).features.to(device).eval() # VGG networks are trained on images with each channel # normalized by mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225]. cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) # store image outputs in memory output_imgs = [] content_imgs = [] for content_img_batch, content_filename_batch in content_img_loader: # load image and add image to content image array content_img = content_img_batch[0].unsqueeze(0).to(device, torch.float) content_filename = content_filename_batch[0] content_imgs.append(content_img) # use white noise image as input image: # input_img = torch.randn(content_img.data.size(), device=device) # use content image as input image: input_img = content_img.clone() # style transfer! output, content_loss, style_loss, total_loss = run_style_transfer(\ cnn, cnn_normalization_mean, cnn_normalization_std, content_img, style_img, input_img, num_steps=NUM_STEPS, style_weight=STYLE_WEIGHT, content_weight=CONTENT_WEIGHT, tmp_dir=None, log_grain=None) # add output image to array output_imgs.append(output) # save output image util.save_image(output, os.path.join(OUTPUT_DIR, '{0}.jpg'.format(content_filename.split('.')[0]))) # - # We can plot the content loss, style loss, and total loss with each step of optimization. This will give us a better sense of how many steps we need to run before the improvement is negligible. # + import matplotlib.pyplot as plt import numpy as np fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(2,1,1) color = 'tab:red' ax1.set_xlabel('num_steps') ax1.set_ylabel('content_loss', color=color) ax1.plot(content_loss, color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('style_loss', color=color) ax2.plot(style_loss, color=color) ax2.tick_params(axis='y', labelcolor=color) ax3 = fig.add_subplot(2,1,2) color='tab:green' ax3.set_ylabel('total_loss', color=color) ax3.set_xlabel('num_steps', color=color) ax3.plot(total_loss, color=color) ax3.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.show() # - # ## View the images! (Used to test and tune style transfer parameters) # + unloader = transforms.ToPILImage() # reconvert into PIL image plt.ion() # interactive on def imshow(tensor, title="", ax=None): image = tensor.cpu().clone() # we clone the tensor to not do changes on it image = image.squeeze(0) # remove the fake batch dimension image = unloader(image) plt.title(title) if ax: ax.imshow(image) else: plt.imshow(image) plt.pause(0.001) # pause a bit so that plots are updated # plot style plt.figure() imshow(style_img, title='Style Image') # plot content & output images for i, (c, o) in enumerate(zip(content_imgs, output_imgs)): fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(121) imshow(c, title='content image {}'.format(i), ax=ax1) ax2 = fig.add_subplot(122) imshow(o, title='style transfer image {}'.format(i), ax=ax2) # -
pytorch/style_transfer_interactive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:metis] * # language: python # name: conda-env-metis-py # --- import spotipy from spotipy.oauth2 import SpotifyClientCredentials import json import pandas as pd import time from functools import reduce from sqlalchemy import create_engine # + #initialize connection to spotify API cid='CID' secret='secret' client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret) sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager) sp.trace = False # - def getTrackIDs(user, playlist_id): #returns a list of specific track IDs from a spotify playlist and creator username track_ids = [] playlist = sp.user_playlist(user, playlist_id) for item in playlist['tracks']['items']: track = item['track'] track_ids.append(track['id']) return track_ids # + def getTrackFeatures(id): #returns a list of desired track features from track ID track_info = sp.track(id) features_info = sp.audio_features(id) #track metadata name = track_info['name'] album = track_info['album']['name'] artist = track_info['album']['artists'][0]['name'] release_date = track_info['album']['release_date'] length = track_info['duration_ms'] popularity = track_info['popularity'] #mostly spotify proprietary audio features danceability = features_info[0]['danceability'] energy = features_info[0]['energy'] key = features_info[0]['key'] loudness = features_info[0]['loudness'] mode = features_info[0]['mode'] speechiness = features_info[0]['speechiness'] acousticness = features_info[0]['acousticness'] instrumentalness = features_info[0]['instrumentalness'] liveness = features_info[0]['liveness'] valence = features_info[0]['valence'] tempo = features_info[0]['tempo'] time_signature = features_info[0]['time_signature'] track_data = [name, album, artist, release_date, length, popularity, danceability, energy, key, loudness, mode, speechiness, acousticness, instrumentalness, liveness, valence, tempo, time_signature] return track_data # - def get_dfs(track_ids): #returns pandas dataframe of track features from a list of track IDs track_data_list = [] for i in range(len(track_ids)): time.sleep(.1) try: track_data = getTrackFeatures(track_ids[i]) track_data_list.append(track_data) except: pass df = pd.DataFrame(track_data_list, columns = ['name', 'album', 'artist', 'release_date', 'length', 'popularity', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'time_signature']) return df # + #make list of playlists for each genre from poking around spotify hip_hop_list = ['spotify:playlist:37i9dQZF1DX0XUsuxWHRQd', 'spotify:playlist:37i9dQZF1DX2RxBh64BHjQ', 'spotify:playlist:37i9dQZF1DX6GwdWRQMQpq', 'spotify:playlist:37i9dQZF1DX9iGsUcr0Bpa', 'spotify:playlist:37i9dQZF1DWY4xHQp97fN6', 'spotify:playlist:37i9dQZF1DX9oh43oAzkyx', 'spotify:playlist:37i9dQZF1DX186v583rmzp', 'spotify:playlist:37i9dQZF1DX4SrOBCjlfVi', 'spotify:playlist:37i9dQZF1DX6OgmB2fwLGd', 'spotify:playlist:37i9dQZF1DWT5MrZnPU1zD', 'spotify:playlist:37i9dQZF1DX0HRj9P7NxeE', 'spotify:playlist:37i9dQZF1DWUFmyho2wkQU', 'spotify:playlist:37i9dQZF1DXaxIqwkEGFEh', 'spotify:playlist:37i9dQZF1DX6xZZEgC9Ubl', 'spotify:playlist:37i9dQZF1DX76t638V6CA8', 'spotify:playlist:37i9dQZF1DX0Tkc6ltcBfU', 'spotify:playlist:37i9dQZF1DWT6MhXz0jw61', 'spotify:playlist:37i9dQZF1DX9sQDbOMReFI'] jazz_list = ['spotify:playlist:37i9dQZF1DWTtzPKJEaTC4', 'spotify:playlist:37i9dQZF1DWW2c0C8Vb2IR', 'spotify:playlist:37i9dQZF1DWXIuW81skHVz', 'spotify:playlist:37i9dQZF1DX55dNU0PWnO5', 'spotify:playlist:37i9dQZF1DWTR4ZOXTfd9K', 'spotify:playlist:37i9dQZF1DXbITWG1ZJKYt', 'spotify:playlist:37i9dQZF1DWVqfgj8NZEp1', 'spotify:playlist:37i9dQZF1DX4wta20PHgwo', 'spotify:playlist:37i9dQZF1DWYxwmBaMqxsl', 'spotify:playlist:37i9dQZF1DWUb0uBnlJuTi', 'spotify:playlist:37i9dQZF1DWY3X53lmPYk9', 'spotify:playlist:37i9dQZF1DX71VcjjnyaBQ', 'spotify:playlist:37i9dQZF1DX5YTAi6JhwZm', 'spotify:playlist:37i9dQZF1DX6G7arXBXa3A', 'spotify:playlist:37i9dQZF1DWXSyfX6gqDNp', 'spotify:playlist:37i9dQZF1DX6KyCRJzZneq', 'spotify:playlist:37i9dQZF1DWZZfLKhEkflI'] rock_list = ['spotify:playlist:37i9dQZF1DXcF6B6QPhFDv', 'spotify:playlist:37i9dQZF1DWWJOmJ7nRx0C', 'spotify:playlist:37i9dQZF1DX82GYcclJ3Ug', 'spotify:playlist:37i9dQZF1DWXRqgorJj26U', 'spotify:playlist:37i9dQZF1DX1rVvRgjX59F', 'spotify:playlist:37i9dQZF1DX3oM43CtKnRV', 'spotify:playlist:37i9dQZF1DX1spT6G94GFC', 'spotify:playlist:37i9dQZF1DWWwzidNQX6jx', 'spotify:playlist:37i9dQZF1DX3YMp9n8fkNx', 'spotify:playlist:37i9dQZF1DX11ghcIxjcjE'] pop_list = ['spotify:playlist:37i9dQZF1DXcBWIGoYBM5M', 'spotify:playlist:37i9dQZF1DX0kbJZpiYdZl', 'spotify:playlist:37i9dQZF1DX2L0iB23Enbq', 'spotify:playlist:37i9dQZF1DWUa8ZRTfalHk', 'spotify:playlist:37i9dQZF1DWWvvyNmW9V9a', 'spotify:playlist:37i9dQZF1DXbYM3nMM0oPk', 'spotify:playlist:37i9dQZF1DX0b1hHYQtJjp', 'spotify:playlist:37i9dQZF1DWTwnEm1IYyoj', 'spotify:playlist:37i9dQZF1DWYs83FtTMQFw', 'spotify:playlist:37i9dQZF1DWWEcRhUVtL8n', 'spotify:playlist:37i9dQZF1DXcxvFzl58uP7', 'spotify:playlist:37i9dQZF1DX0s5kDXi1oC5'] classical_list = ['spotify:playlist:37i9dQZF1DWWEJlAGA9gs0', 'spotify:playlist:37i9dQZF1DWV0gynK7G6pD', 'spotify:playlist:37i9dQZF1DWXtBjoO4Thyy', 'spotify:playlist:37i9dQZF1DWVFeEut75IAL', 'spotify:playlist:37i9dQZF1DXdLD1ufgw60J', 'spotify:playlist:37i9dQZF1DWYkztttC1w38', 'spotify:playlist:37i9dQZF1DX2XWJkYVfE4v', 'spotify:playlist:37i9dQZF1DXddGd6mP5X2a', 'spotify:playlist:37i9dQZF1DXah8e1pvF5oE', 'spotify:playlist:37i9dQZF1DWWQxllNWdnsR', 'spotify:playlist:37i9dQZF1DX0DHxkHcf1Tl'] country_list = ['spotify:playlist:37i9dQZF1DX1lVhptIYRda', 'spotify:playlist:37i9dQZF1DXdgnLr18vPvu', 'spotify:playlist:37i9dQZF1DWW7RgkOJG32Y', 'spotify:playlist:37i9dQZF1DX1KHLbhJkg7B', 'spotify:playlist:37i9dQZF1DWTkxQvqMy4WW', 'spotify:playlist:37i9dQZF1DWYvU2z6HruAo', 'spotify:playlist:37i9dQZF1DWVpjAJGB70vU', 'spotify:playlist:37i9dQZF1DXaJXCbmtHVHV', 'spotify:playlist:37i9dQZF1DXdxUH6sNtcDe', 'spotify:playlist:37i9dQZF1DX8WMG8VPSOJC', 'spotify:playlist:37i9dQZF1DWYnwbYQ5HnZU', 'spotify:playlist:37i9dQZF1DXaiEFNvQPZrM', 'spotify:playlist:37i9dQZF1DWXdiK4WAVRUW'] latin_list = ['spotify:playlist:37i9dQZF1DX10zKzsJ2jva', 'spotify:playlist:37i9dQZF1DWUoGbRYcteyC', 'spotify:playlist:37i9dQZF1DWY7IeIP1cdjF', 'spotify:playlist:37i9dQZF1DX1hVRardJ30X', 'spotify:playlist:37i9dQZF1DXbSwbJpH6lAw', 'spotify:playlist:37i9dQZF1DWZoF06RIo9el', 'spotify:playlist:37i9dQZF1DXbdrcAZnP3Cy', 'spotify:playlist:37i9dQZF1DX1QnNyJOBQBv', 'spotify:playlist:37i9dQZF1DX4OjfOteYnH8', 'spotify:playlist:37i9dQZF1DX7Qo2zphj7u3', 'spotify:playlist:37i9dQZF1DX4V5eXk4NKqu', 'spotify:playlist:37i9dQZF1DX4qKWGR9z0LI'] dance_electronic_list = ['spotify:playlist:37i9dQZF1DX4dyzvuaRJ0n', 'spotify:playlist:37i9dQZF1DX8tZsk68tuDw', 'spotify:playlist:37i9dQZF1DXa8NOEUWPn9W', 'spotify:playlist:37i9dQZF1DXdURFimg6Blm', 'spotify:playlist:37i9dQZF1DWSf2RDTDayIx', 'spotify:playlist:37i9dQZF1DX0BcQWzuB7ZO', 'spotify:playlist:37i9dQZF1DX0hvSv9Rf41p', 'spotify:playlist:37i9dQZF1DXaXB8fQg7xif', 'spotify:playlist:37i9dQZF1DX2TRYkJECvfC', 'spotify:playlist:37i9dQZF1DX6GJXiuZRisr', 'spotify:playlist:37i9dQZF1DX8CopunbDxgW'] # - engine = create_engine('postgresql://user:PASSWORD@localhost:5432/project3') def get_data_to_sql(playlists, tablename): #creates a SQL table of desired features from a list of playlists tracklist = [] for playlist in playlists: try: a = getTrackIDs('Spotify', playlist) tracklist.append(a) except: pass #flatten list tracklist = reduce(lambda x,y: x+y, tracklist) #create dataframe of song attributes for each song in track list df = get_dfs(tracklist) #push df to sql df.to_sql(tablename, engine) #getting data for some genres get_data_to_sql(hip_hop_list, 'hiphop') get_data_to_sql(jazz_list, 'jazz') get_data_to_sql(rock_list, 'rock') get_data_to_sql(pop_list, 'pop') get_data_to_sql(classical_list, 'classical') #getting data for some more genres get_data_to_sql(country_list, 'country') get_data_to_sql(latin_list, 'latin') get_data_to_sql(dance_electronic_list, 'electronic') #getting data for the last grenres get_data_to_sql(rock_list, 'rock') get_data_to_sql(pop_list, 'pop') # + #check query = 'SELECT * FROM rock;' pd.read_sql(query, engine) # -
API Pull attempt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Okfr_uhwhS1X" colab_type="text" # # Lambda School Data Science - Making Data-backed Assertions # # This is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it. # + [markdown] id="9dtJETFRhnOG" colab_type="text" # ## Lecture - generating a confounding variable # # The prewatch material told a story about a hypothetical health condition where both the drug usage and overall health outcome were related to gender - thus making gender a confounding variable, obfuscating the possible relationship between the drug and the outcome. # # Let's use Python to generate data that actually behaves in this fashion! # + id="WiBkgmPJhmhE" colab_type="code" outputId="fe37e841-2e7a-4807-e996-6fda9f4ac6e4" colab={"base_uri": "https://localhost:8080/", "height": 1000} import random dir(random) # Reminding ourselves what we can do here # + id="Ks5qFtpnq-q5" colab_type="code" outputId="e453948c-5d97-4355-cfd8-88aac79982ed" colab={"base_uri": "https://localhost:8080/", "height": 34} # Let's think of another scenario: # We work for a company that sells accessories for mobile phones. # They have an ecommerce site, and we are supposed to analyze logs # to determine what sort of usage is related to purchases, and thus guide # website development to encourage higher conversion. # The hypothesis - users who spend longer on the site tend # to spend more. Seems reasonable, no? # But there's a confounding variable! If they're on a phone, they: # a) Spend less time on the site, but # b) Are more likely to be interested in the actual products! # Let's use namedtuple to represent our data from collections import namedtuple # purchased and mobile are bools, time_on_site in seconds User = namedtuple('User', ['purchased','time_on_site', 'mobile']) example_user = User(False, 12, False) print(example_user) # + id="n4yIyIAaotVN" colab_type="code" outputId="734e4497-1275-4686-cba3-d7a53dfd3d47" colab={"base_uri": "https://localhost:8080/", "height": 34} import numpy as np np.random.normal(10,2) # + id="lfPiHNG_sefL" colab_type="code" outputId="0da82189-47ca-438a-c17b-ae3184e62305" colab={"base_uri": "https://localhost:8080/", "height": 334} # And now let's generate 1000 example users # 750 mobile, 250 not (i.e. desktop) # A desktop user has a base conversion likelihood of 10% # And it goes up by 1% for each 15 seconds they spend on the site # And they spend anywhere from 10 seconds to 10 minutes on the site (uniform) # Mobile users spend on average half as much time on the site as desktop # But have three times as much base likelihood of buying something users = [] for _ in range(250): # Desktop users # time_on_site = random.uniform(10, 600) time_on_site = np.random.normal(9.4*60, 3*60) purchased = random.random() < 0.1 + (time_on_site / 1500) users.append(User(purchased, time_on_site, False)) for _ in range(750): # Mobile users # time_on_site = random.uniform(5, 300) time_on_site = np.random.normal(7.5*60, 2.5*60) purchased = random.random() < 0.3 + (time_on_site / 1500) users.append(User(purchased, time_on_site, True)) random.shuffle(users) print(users[:10]) # + id="9gDYb5qGuRzy" colab_type="code" outputId="eaa08589-09c1-4414-9d85-956e0a708c4e" colab={"base_uri": "https://localhost:8080/", "height": 204} # Let's put this in a dataframe so we can look at it more easily import pandas as pd user_data = pd.DataFrame(users) user_data.head() # + id="afWPgTXByawU" colab_type="code" colab={} # + id="lXOqjHX3uxBq" colab_type="code" outputId="3cdd5965-182a-4fd7-9917-4777ea13e9ce" colab={"base_uri": "https://localhost:8080/", "height": 204} user_data['time_on_site_min'] = user_data['time_on_site']/60 user_data.head() # + id="oQ90dcazoijy" colab_type="code" outputId="3cdc592e-3fdc-4f77-d7d0-5023b87c4e2e" colab={"base_uri": "https://localhost:8080/", "height": 354} import matplotlib.pyplot as plt num_bins = 10 plt.hist(user_data['time_on_site'], num_bins, facecolor='blue', alpha=0.5) # + id="t18sTaRLrk3G" colab_type="code" outputId="a21a3158-fe1c-4cb4-94a9-b94e7065163d" colab={"base_uri": "https://localhost:8080/", "height": 398} user_data.groupby('mobile').time_on_site_min.hist(bins=20, alpha=0.5, figsize=(10,6)); # + id="sr6IJv77ulVl" colab_type="code" outputId="cfaec64a-451c-41a7-c743-80793bae5c9f" colab={"base_uri": "https://localhost:8080/", "height": 471} # Let's use crosstabulation to try to see what's going on pd.crosstab(user_data['purchased'], user_data['time_on_site']) # + id="hvAv6J3EwA9s" colab_type="code" outputId="4b846aa0-294c-415c-a1c6-2335ea81bcd6" colab={"base_uri": "https://localhost:8080/", "height": 235} # OK, that's not quite what we want # Time is continuous! We need to put it in discrete buckets # Pandas calls these bins, and pandas.cut helps make them time_bins = pd.cut(user_data['time_on_site_min'], 5) # 5 equal-sized bins pd.crosstab(time_bins, user_data['purchased'], normalize='index') # + id="pjcXnJw0wfaj" colab_type="code" outputId="c5246178-797a-4991-85f6-77f209edd32e" colab={"base_uri": "https://localhost:8080/", "height": 235} # We can make this a bit clearer by normalizing (getting %) pd.crosstab(time_bins, user_data['purchased'], normalize='columns') # + id="C3GzvDxlvZMa" colab_type="code" colab={} # That seems counter to our hypothesis # More time on the site can actually have fewer purchases # But we know why, since we generated the data! # Let's look at mobile and purchased # + id="COqJD5IIw4cE" colab_type="code" outputId="4ab94bbf-0971-4611-df5d-c79309dba908" colab={"base_uri": "https://localhost:8080/", "height": 142} pd.crosstab(user_data['purchased'], user_data['mobile'], normalize='columns') # + id="KQb-wU60xCum" colab_type="code" outputId="007fc5d0-089b-4099-eb8b-7cecaf2578d2" colab={"base_uri": "https://localhost:8080/", "height": 266} # Yep, mobile users are more likely to buy things # But we're still not seeing the *whole* story until we look at all 3 at once # Live/stretch goal - how can we do that? pd.crosstab(time_bins, [user_data['purchased'], user_data['mobile']], normalize='columns') # + [markdown] id="YDRv5PtdzeV4" colab_type="text" # ## Stanford Open Police Project # + id="fBmbdSTv0oKT" colab_type="code" outputId="cf46bc99-e5ea-408c-f202-04cbefcc8e7c" colab={"base_uri": "https://localhost:8080/", "height": 51} # %%time import pandas as pd nj_data = pd.read_csv('nj_statewide_2019_02_25.csv') # + id="R1XE6r-W5I3V" colab_type="code" outputId="79f36c64-c5d8-4b83-b1d2-c9ce06663599" colab={"base_uri": "https://localhost:8080/", "height": 674} print(nj_data.shape) nj_data.head() # + id="sOAHHhkc0_q8" colab_type="code" outputId="f9e70a14-526f-44e6-9765-83ec653a02b0" colab={"base_uri": "https://localhost:8080/", "height": 374} nj_data.isna().sum() # + id="Xzk_gypI2uO1" colab_type="code" outputId="b2b061dc-22ce-40e2-84db-521fbf9a2167" colab={"base_uri": "https://localhost:8080/", "height": 204} nj_data.violation.value_counts().head(10) # + id="qI5Kdhmd2_Si" colab_type="code" outputId="aae4cce7-44d7-4e15-a0fe-3528a406e9c3" colab={"base_uri": "https://localhost:8080/", "height": 204} nj_data.vehicle_make.value_counts(normalize=True).head(10) # + id="bwz-MVEb3bBK" colab_type="code" outputId="cb1433ec-af7a-44ce-fa56-7ac79154947a" colab={"base_uri": "https://localhost:8080/", "height": 204} nj_data[nj_data.violation == '39:4-98 RATES OF SPEED'].vehicle_make.value_counts(normalize=True).head(10) # + id="EtuDl99c4HH_" colab_type="code" outputId="21f23fd1-7440-4695-c2e1-094a3b557f06" colab={"base_uri": "https://localhost:8080/", "height": 204} nj_data[nj_data.violation == '39:4-98 RATES OF SPEED'].vehicle_color.value_counts(normalize=True).head(10) # + [markdown] id="cGeSj5zH6gJN" colab_type="text" # Use %%timeit to optimize code, # import tqdm # + [markdown] id="lOqaPds9huME" colab_type="text" # ## Assignment - what's going on here? # # Consider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people. # # Try to figure out which variables are possibly related to each other, and which may be confounding relationships. # + id="TGUS79cOhPWj" colab_type="code" outputId="21a98e01-2365-441c-e4a2-48bcdea1ddbd" colab={"base_uri": "https://localhost:8080/", "height": 204} # Import the person.csv dataset import pandas as pd person_data = pd.read_csv('https://raw.githubusercontent.com/JimKing100/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module3-databackedassertions/persons.csv', index_col=0) person_data.head() # + id="i28rlhlCAwSL" colab_type="code" outputId="14cea79e-0176-4cc5-eba2-8df4fd507185" colab={"base_uri": "https://localhost:8080/", "height": 829} # Let's take a look at the data using crosstab age_bins = pd.cut(person_data['age'], 10) # 10 equal-sized bins pd.crosstab(age_bins, person_data['weight']) # + id="RMzlypAiVcgb" colab_type="code" outputId="45c8ef70-3dea-4a8b-e1c0-47454b372b09" colab={"base_uri": "https://localhost:8080/", "height": 283} # Let's plot a histogram of the data to look for patterns - weight first import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt num_bins = 10 plt.hist(person_data['weight'], num_bins, facecolor='blue', alpha=0.5) plt.xlabel('Weight') plt.ylabel('Count') plt.show() # + id="IICMJ_VQeTJ8" colab_type="code" outputId="a7e821de-a98b-467d-942f-e9f42ae3ff4e" colab={"base_uri": "https://localhost:8080/", "height": 283} # Let's plot a histogram of the data to look for patterns - age next num_bins = 10 plt.hist(person_data['age'], num_bins, facecolor='blue', alpha=0.5) plt.xlabel('Age') plt.ylabel('Count') plt.show() # + id="QSTLFCrYeX-H" colab_type="code" outputId="3f3b80de-a955-43e8-c1de-fa72b1efd178" colab={"base_uri": "https://localhost:8080/", "height": 283} # Let's plot a histogram of the data to look for patterns - exercise time num_bins = 10 plt.hist(person_data['exercise_time'], num_bins, facecolor='blue', alpha=0.5) plt.xlabel('Exercise Time') plt.ylabel('Count') plt.show() # + id="vNig72GHefWb" colab_type="code" outputId="97b3467f-96b3-4764-e63a-028fba6e58c1" colab={"base_uri": "https://localhost:8080/", "height": 295} # A scatter plot should show if a trend exists - check age and weight plt.scatter(person_data['age'], person_data['weight'], alpha=0.5) plt.title('Scatter plot') plt.xlabel('Age') plt.ylabel('Weight') plt.show() # + id="FOn8XCN9fHVw" colab_type="code" outputId="35737254-3998-430f-f011-a8154f7d2948" colab={"base_uri": "https://localhost:8080/", "height": 295} # A scatter plot should show if a trend exists - check age and exercise time plt.scatter(person_data['age'], person_data['exercise_time'], alpha=0.5) plt.title('Scatter plot') plt.xlabel('Age') plt.ylabel('Exercise Time') plt.show() # + id="1SzKu8sWbnAQ" colab_type="code" outputId="8a3c6322-3299-4118-c5e2-60ffbf3f09dd" colab={"base_uri": "https://localhost:8080/", "height": 295} # A scatter plot should show if a trend exists - exercise time and weight plt.scatter(person_data['exercise_time'], person_data['weight'], alpha=0.5) plt.title('Scatter plot') plt.xlabel('Exercise Time') plt.ylabel('Weight') plt.show() # + [markdown] id="BT9gdS7viJZa" colab_type="text" # ### Assignment questions # # After you've worked on some code, answer the following questions in this text block: # # 1. What are the variable types in the data? # # They are all integer data types. # # 2. What are the relationships between the variables? # # Age and Weight are randomly distributed, no specific relationship appears to exist. # # Exercise Time does appear to decline starting around 60 years old. # # Weight does appear to fall with an increase in Exercise Time. # # 3. Which relationships are "real", and which spurious? # # Exercise Time and Age are "real", # Weight and Exercise Time are "real", # Age and Weight are spurious # # + [markdown] id="_XXg2crAipwP" colab_type="text" # ## Stretch goals and resources # # Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub. # # - [Spurious Correlations](http://tylervigen.com/spurious-correlations) # - [NIH on controlling for confounding variables](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4017459/) # # Stretch goals: # # - Produce your own plot inspired by the Spurious Correlation visualizations (and consider writing a blog post about it - both the content and how you made it) # - Pick one of the techniques that NIH highlights for confounding variables - we'll be going into many of them later, but see if you can find which Python modules may help (hint - check scikit-learn) # + id="PjmD1WU67Vjd" colab_type="code" colab={} # See the last cell for the Spurious Correlation # + id="PlPonbZQmuOw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="6a7d7119-2bde-4196-cb70-18e54740238e" # Picked linear regression to plot the relationship between Exercise Time and Weight import matplotlib.pyplot as plt import pandas as pd from sklearn.linear_model import LinearRegression # Use linear regression model model = LinearRegression() # Show scatter plot of data plt.scatter(person_data['exercise_time'], person_data['weight'], alpha=0.5) plt.title('Scatter plot') plt.xlabel('Exercise Time') plt.ylabel('Weight') plt.show() # + id="mueh2t5VpaPX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="81c49140-11ef-4f0d-c3db-9fb20060a1e3" # Show actual data person_data.head() # + id="lChEMuYGpu8O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="47bddffa-0717-453f-bf5e-1f73b7e9c0e5" # Run the linear regreassion from sklearn.linear_model import LinearRegression features = ['exercise_time'] # Set features to exercise time target = 'weight' # Set target to weight model = LinearRegression() model.fit(person_data[features], person_data[target]) # create values to predict using the exercise range exercise_time = [[w] for w in range(0, 300)] # make predictions based on linear regression model predictions = model.predict(exercise_time) # graph it all plt.scatter(person_data['exercise_time'], person_data['weight'], alpha=0.5) plt.plot(exercise_time, predictions) plt.title('Linear Regression') plt.xlabel('Exercise Time') plt.ylabel('Weight') plt.show() # Show the y = -.19x + 180 linear regression line values model.coef_, model.intercept_ # + id="mKVD7Y1PvVJJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 380} outputId="c033039a-5941-4c7d-c130-1273a83dcc4e" # Import the auto-mpg.csv dataset for a spurious correlation import pandas as pd car_data = pd.read_csv('auto-mpg.csv') car_data.head() # + [markdown] id="ISqIyOveDI53" colab_type="text" # # + id="hVU6xmwiA6Rb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 380} outputId="842b46d7-1033-404f-8a50-2eed433110a3" convert_dict = {'mpg': int} car_data = car_data.astype(convert_dict) car_data.head() # + id="giC20pTA-Wge" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="95ce611e-d5b5-4ec5-d55c-931f50dc014b" # Run the linear regression from sklearn.linear_model import LinearRegression features = ['horsepower'] # Set features to horsepower target = 'mpg' # Set target to mileage model = LinearRegression() model.fit(car_data[features], car_data[target]) # create values to predict using the horsepower range horsepower_values = [[w] for w in range(50, 350)] # make predictions based on linear regression model predictions = model.predict(horsepower_values) # graph it all plt.scatter(car_data['horsepower'], person_data['mpg'], alpha=0.5) plt.plot(exercise_time, predictions) plt.title('Linear Regression') plt.xlabel('Horsepower') plt.ylabel('Mileage') plt.show() # Show the y = -.19x + 180 linear regression line values model.coef_, model.intercept_ # + [markdown] id="yEvKIdlADLgL" colab_type="text" # ## Spurious Correlation! # # Weight and Exercise correlates with Horsepower and Mileage!
module3-databackedassertions/LS_DS_114_Making_Data_backed_Assertions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:bank] # language: python # name: conda-env-bank-py # --- # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import altair as alt from sklearn.model_selection import train_test_split alt.renderers.enable('mimetype') alt.data_transformers.enable('data_server') bank_add_full = pd.read_csv("../data/raw/bank-additional-full.csv", sep=';') df = bank_add_full train_df, test_df = train_test_split(df, test_size = 0.20, random_state=123) alt.data_transformers.disable_max_rows() numeric_features = ["age", "duration", "campaign", "pdays", "previous", "emp.var.rate", "cons.price.idx", "cons.conf.idx", "euribor3m", "nr.employed"] categorical_features = ["job", "marital", "default", "housing", "loan", "poutcome"] ordinal_features = ["education"] education_ordering = ['illiterate', 'basic.4y','basic.6y','basic.9y','high.school', 'professional.course','university.degree', 'unknown'] target = ["y"] # + jobs = list(train_df['job'].unique()) jobs_percent_subscribed = {} for job in jobs: subscribed = train_df.query("job == @job & y == 'yes'").shape[0] total_surveyed = train_df.query("job == @job").shape[0] jobs_percent_subscribed[job] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] jobs_percent_subscribed_df = pd.DataFrame.from_dict(jobs_percent_subscribed, orient='index') jobs_percent_subscribed_df = jobs_percent_subscribed_df.reset_index().rename(columns={"index": "job", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) jobs_plot = alt.Chart(jobs_percent_subscribed_df, title="Jobs").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('job', sort='x') ) # + def discretize(age): """Discretize numeric age into one of three categories.""" if not isinstance(age, (int, float)): raise TypeError("Input should be a number!") if 17 <= age <= 35: return "young adults" elif 36 <= age <= 55: return "middle-aged" elif age > 55: return "older adults" train_df_age_category = train_df train_df_age_category["age_category"] = train_df_age_category["age"].apply(lambda row: discretize(row)) age_categories = list(train_df_age_category['age_category'].unique()) age_percent_subscribed = {} for age_category in age_categories: subscribed = train_df_age_category.query("age_category == @age_category & y == 'yes'").shape[0] total_surveyed = train_df_age_category.query("age_category == @age_category").shape[0] age_percent_subscribed[age_category] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] age_percent_subscribed_df = pd.DataFrame.from_dict(age_percent_subscribed, orient='index') age_percent_subscribed_df = age_percent_subscribed_df.reset_index().rename(columns={"index": "age_category", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) age_categories_plot = alt.Chart(age_percent_subscribed_df, title="Age Category").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('age_category', sort=["older adults", "middle-aged", "young adults"]) ) # + poutcomes = list(train_df['poutcome'].unique()) poutcome_percent_subscribed = {} for poutcome in poutcomes: subscribed = train_df.query("poutcome == @poutcome & y == 'yes'").shape[0] total_surveyed = train_df.query("poutcome == @poutcome").shape[0] poutcome_percent_subscribed[poutcome] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] poutcome_percent_subscribed_df = pd.DataFrame.from_dict(poutcome_percent_subscribed, orient='index') poutcome_percent_subscribed_df = poutcome_percent_subscribed_df.reset_index().rename(columns={"index": "poutcome", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) poutcomes_plot = alt.Chart(poutcome_percent_subscribed_df, title="Previous outcome").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('poutcome', sort='x') ) # + default_values = list(train_df['default'].unique()) default_percent_subscribed = {} for default in default_values: subscribed = train_df.query("default == @default & y == 'yes'").shape[0] total_surveyed = train_df.query("default == @default").shape[0] default_percent_subscribed[default] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] default_percent_subscribed_df = pd.DataFrame.from_dict(default_percent_subscribed, orient='index') default_percent_subscribed_df = default_percent_subscribed_df.reset_index().rename(columns={"index": "default", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) default_plot = alt.Chart(default_percent_subscribed_df, title="Default").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('default', sort='x') ) # + housing_values = list(train_df['housing'].unique()) housing_percent_subscribed = {} for housing in housing_values: subscribed = train_df.query("housing == @housing & y == 'yes'").shape[0] total_surveyed = train_df.query("housing == @housing").shape[0] housing_percent_subscribed[housing] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] housing_percent_subscribed_df = pd.DataFrame.from_dict(housing_percent_subscribed, orient='index') housing_percent_subscribed_df = housing_percent_subscribed_df.reset_index().rename(columns={"index": "housing", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) housing_plot = alt.Chart(housing_percent_subscribed_df, title="Housing").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('housing', sort='x') ) # + loan_values = list(train_df['loan'].unique()) loan_percent_subscribed = {} for loan in loan_values: subscribed = train_df.query("loan == @loan & y == 'yes'").shape[0] total_surveyed = train_df.query("loan == @loan").shape[0] loan_percent_subscribed[loan] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] loan_percent_subscribed_df = pd.DataFrame.from_dict(loan_percent_subscribed, orient='index') loan_percent_subscribed_df = loan_percent_subscribed_df.reset_index().rename(columns={"index": "loan", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) loan_plot = alt.Chart(loan_percent_subscribed_df, title="Loan").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('loan', sort='x') ) # + marital_values = list(train_df['marital'].unique()) marital_percent_subscribed = {} for marital in marital_values: subscribed = train_df.query("marital == @marital & y == 'yes'").shape[0] total_surveyed = train_df.query("marital == @marital").shape[0] marital_percent_subscribed[marital] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] marital_percent_subscribed_df = pd.DataFrame.from_dict(marital_percent_subscribed, orient='index') marital_percent_subscribed_df = marital_percent_subscribed_df.reset_index().rename(columns={"index": "marital", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) marital_plot = alt.Chart(marital_percent_subscribed_df, title="Marital Status").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('marital', sort='x') ) # + education_values = list(train_df['education'].unique()) education_percent_subscribed = {} for education in education_values: subscribed = train_df.query("education == @education & y == 'yes'").shape[0] total_surveyed = train_df.query("education == @education").shape[0] education_percent_subscribed[education] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] education_percent_subscribed_df = pd.DataFrame.from_dict(education_percent_subscribed, orient='index') education_percent_subscribed_df = education_percent_subscribed_df.reset_index().rename(columns={"index": "education", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) education_plot = alt.Chart(education_percent_subscribed_df, title="Education").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('education', sort=education_ordering) ) # + contact_values = list(train_df['contact'].unique()) contact_percent_subscribed = {} for contact in contact_values: subscribed = train_df.query("contact == @contact & y == 'yes'").shape[0] total_surveyed = train_df.query("contact == @contact").shape[0] contact_percent_subscribed[contact] = [total_surveyed, subscribed, round(subscribed/total_surveyed, 2)*100] contact_percent_subscribed_df = pd.DataFrame.from_dict(contact_percent_subscribed, orient='index') contact_percent_subscribed_df = contact_percent_subscribed_df.reset_index().rename(columns={"index": "contact", 0: "total_surveyed", 1 : "subscribed", 2 : "percent_subscribed"}) contact_plot = alt.Chart(contact_percent_subscribed_df, title="Contact").mark_bar().encode( x=alt.X('percent_subscribed', scale=alt.Scale(domain=(0, 100))), y=alt.Y('contact') ) # - categorical_plots = jobs_plot & age_categories_plot & poutcomes_plot & default_plot & housing_plot & loan_plot & marital_plot & education_plot & contact_plot # ## Exploratory analysis on the training data set # # To gain an understanding on which features could be helpful in predicting the positive class, we plotted histograms of numeric features (didn't subscibe: blue and subscibed: orange) and percent subscribed bar graphs for each of the categorical features for all observations in the training data set. Although the histograms distributions for all of the numeric features overlap to a certain degree, they do show a difference in their centres and spreads, for example, `age` histogram. For the categorical features, some features are similar in the proportion subscribed, while others seem to be promising in predicting the positive class. The `poutcome` (previous outcome) feature seem to be the best as previous success is highly associated with the positive class. In addition, the features values (`contact`: cellphone, `education`:illitrate, `age_category`:older adults then young adults, and `job`s:retired and student) seem to be associated with the positive class. # alt.Chart() for feat in numeric_features: ax = train_df.groupby(target)[feat].plot.hist(bins=20, alpha=0.4, legend=True) plt.xlabel(feat) plt.title("Histogram of " + feat) plt.show() categorical_plots # + #categorical_plots.save('categorical_plots.png') # -
z_archive/banking_eda_part3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p align="center"> # <img src="https://github.com/GeostatsGuy/GeostatsPy/blob/master/TCG_color_logo.png?raw=true" width="220" height="240" /> # # </p> # # ## Subsurface Data Analytics # # ### Feature Transformations for Subsurface Data Analytics in Python # # # #### <NAME>, Associate Professor, University of Texas at Austin # # ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) # # ### Subsurface Machine Learning: Feature Transformations for Subsurface Data Analytics # # Here's a demonstration of feature transformations for subsurface modeling in Python. This is part of my Subsuface Machine Learning Course at the Cockrell School of Engineering at the University of Texas at Austin. # # #### Feature Transformations # # There are many reasons that we may want to perform feature transformations. # # * the make the features consistent for visualization and comparison # # * to avoid bias or impose feature weighting for methods (e.g. k nearest neighbours regression) that rely on distances calculated in predictor feature space # # * the method requires the variables to have a specific range or distribution: # # * artificial neural networks may require all features to range from [-1,1] # * parital correlation coefficients require a Gaussian distribution. # * statistical tests may require a specific distribution # * geostatistical sequential simulation requires an indicator or Gaussian transform # # Feature transformations is a common basic building blocks in many machine learning workflows. # # * Let's learn how to perform feature transformations. # # #### Objective # # In the Stochastic Machine Learning class, I want to provide hands-on experience with solving complicated subsurface modeling problems with data analytics, machine learning. Python provides an excellent vehicle to accomplish this. I have coded a package called GeostatsPy with GSLIB: Geostatistical Library (Deutsch and Journel, 1998) functionality that provides basic building blocks for building subsurface modeling workflows. # # The objective is to remove the hurdles of subsurface modeling workflow construction by providing building blocks and sufficient examples. This is not a coding class per se, but we need the ability to 'script' workflows working with numerical methods. # # #### Getting Started # # Here's the steps to get setup in Python with the GeostatsPy package: # # 1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). # 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. # 3. In the terminal type: pip install geostatspy. # 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. # # You will need to copy the data file to your working directory. They are available here: # # * Tabular data - unconv_MV_v4.csv at https://git.io/fhHLT. # # #### Importing Packages # # We will import the GeostatsPy package. # # * we will use a convenient transformation method # # There are examples below with GeostatsPy functions. You can go here to see a list of the available functions, https://git.io/fh4eX, and for other example workflows and source code. import geostatspy.GSLIB as GSLIB # GSLIB utilies, visualization and wrapper import geostatspy.geostats as geostats # GSLIB methods convert to Python # We will also need some standard packages. These should have been installed with Anaconda 3. import numpy as np # ndarrys for gridded data import pandas as pd # DataFrames for tabular data import os # set working directory, run executables import matplotlib.pyplot as plt # for plotting from scipy import stats # summary statistics import math # trigonometry etc. import scipy.signal as signal # kernel for moving window calculation import random # for randon numbers import seaborn as sns # for matrix scatter plots from scipy import linalg # for linear regression from sklearn import preprocessing # #### Declare Functions # # Let's define a single function to streamline plotting correlation matrices. def histograms(df): pormin = min(df['Por']); pormax = max(df['Por']); porname = 'Porosity (%)'; portitle = 'Porosity' # user specified min and max values, and labels for plotting permmin = min(df['Perm']); permmax = max(df['Perm']); permname = 'Permeability (mD)'; permtitle = 'Permeability' AImin = min(df['AI']); AImax = max(df['AI']); AIname = 'Acoustic Impedance (kg/m2s*10^6)'; AItitle = 'Acoustic Impedance' brmin = min(df['Brittle']); brmax = max(df['Brittle']); brname = 'Brittleness Ratio (%)'; brtitle = 'Brittleness' TOCmin = min(df['TOC']); TOCmax = max(df['TOC']); TOCname = 'Total Organic Carbon (%)'; TOCtitle = 'Total Organic Carbon' VRmin = min(df['VR']); VRmax = max(df['VR']); VRname = 'Vitrinite Reflectance (%)'; VRtitle = 'Vitrinite Reflectance' prodmin = min(df['Prod']); prodmax = max(df['Prod']); prodname = 'Normalized Initial Production (MCFPD)'; prodtitle = 'Normalized Initial Production' plt.subplot(331) GSLIB.hist_st(df['Por'].values,pormin,pormax,log=False,cumul = False,bins=20,weights = None,xlabel=porname,title=portitle) plt.subplot(332) GSLIB.hist_st(df['Perm'].values,permmin,permmax,log=False,cumul = False,bins=20,weights = None,xlabel=permname,title=permtitle) plt.subplot(333) GSLIB.hist_st(df['AI'].values,AImin,AImax,log=False,cumul = False,bins=20,weights = None,xlabel=AIname,title=AItitle) plt.subplot(334) GSLIB.hist_st(df['Brittle'].values,brmin,brmax,log=False,cumul = False,bins=20,weights = None,xlabel=brname,title=brtitle) plt.subplot(335) GSLIB.hist_st(df['TOC'].values,TOCmin,TOCmax,log=False,cumul = False,bins=20,weights = None,xlabel=TOCname,title=TOCtitle) plt.subplot(336) GSLIB.hist_st(df['VR'].values,VRmin,VRmax,log=False,cumul = False,bins=20,weights = None,xlabel=VRname,title=VRtitle) plt.subplot(338) GSLIB.hist_st(df['Prod'].values,prodmin,prodmax,log=False,cumul = False,bins=20,weights = None,xlabel=prodname,title=prodtitle) plt.subplots_adjust(left=0.0, bottom=0.0, right=3.0, top=3.5, wspace=0.2, hspace=0.2) plt.savefig('hist_Porosity_Multiple_bins.tif',dpi=600,bbox_inches="tight") plt.show() # #### Set the Working Directory # # I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). os.chdir("c:/PGE383") # set the working directory # #### Loading Tabular Data # # Here's the command to load our comma delimited data file in to a Pandas' DataFrame object. df = pd.read_csv('unconv_MV_v4.csv') # load our data table (wrong name!) # Visualizing the DataFrame would be useful and we already learned about these methods in this demo (https://git.io/fNgRW). # # We can preview the DataFrame by utilizing the 'head' DataFrame member function (with a nice and clean format, see below). With the head command, add parameter 'n=13' to see the first 13 rows of the dataset. df.head(n=13) # we could also use this command for a table preview # This dataset has features from 200 unconventional wells including: # # 0. well index # 1. well average porosity (%) # 2. permeability (mD) # 3. accoustic impedance (kg/m2s*10^6) # 4. brittness ratio (%) # 5. total organic carbon (%) # 6. vitrinite reflectance (%) # 8. normalized initial production 90 day average (MCFPD). # # Note, the dataset is synthetic, but has realistic ranges and general multivariate relationships. # # Ranking features is really an effort to understand the features and their relationships with eachother. We will start with basic data visualization and move to more complicated methods such are partial correlation and recursive feature elimination. # # ### Summary Statistics # # In any multivariate work we should start with the univariate analysis, summary statistics of one variable at a time. The summary statistic ranking method is qualitative, we are asking: # # * are there data issues? # * do we trust the features? do we we trust the features all equally? # * are there issues that need to be taken care of before we develop any multivariate workflows? # # There are a lot of efficient methods to calculate summary statistics from tabular data in DataFrames. The describe() command provides count, mean, minimum, maximum, and quartiles all in a compact data table. We use transpose() command to flip the table so that features are on the rows and the statistics are on the columns. df.describe().transpose() # Let's remove the well index as we will not want to transform it. df = df.iloc[:,1:] df.head() # We should also take a look at the histograms. We will use our convenient function (declared above) to visualize all the distribuitons. histograms(df) # our function to visualize all feature histograms # There are just a couple slighly negative values, let's just truncate them at zero. We can use this command below to set all TOC values in the DataFrame that are less than 0.0 as 0.0, otherwise we keep the original TOC value. # # ### Truncation # # It is possible that the features may extend beyond the palusible range of values. # # * truncation is simply assigning values outside the range with a specific value # # * it is common to assign the minimum permissible value to outliers on the lower tail and visa versa # # Truncation can be handled easily with numpy operators applied to the feature array within the Pandas DataFrame # # ```python # num = df._get_numeric_data() # num[num < data_min] = data_min # num[num > data_max] = data_max # ``` num = df._get_numeric_data() # get the numerical values num[num < 0] = 0 # truncate negative values to 0.0 df.describe().transpose() # get the summary statistics of all features # Let's look at the summary statistics again to confirm that we were successful in truncating TOC to $\ge 0$. df.describe().transpose() # From the summary statistics you can see that the truncation was successful, we now have a minimum of 0.0. # # ### Affine Correction # # The affine correction is the transform of the feature distribution to a new mean and variance. # # * this is a shift and stretch / squeeze of the original property distribution # # * no shape change is assumed # # The following equation is applied to each sample in the original distribution # # \begin{equation} # y = \frac{\sigma_y}{\sigma_x}\left( x - \overline{x} \right) + \overline{y} # \end{equation} # # where $\sigma_x$ is the original standard deviation, $\sigma_y$ is the target standard deviation, $\overline{x}$ is the original mean and $\overline{y}$ is the target mean. # # * there is a affine function in GeostatsPy that we may use # # * the function is not set up to accept multiple features at once so we will just transform a single feature # # Let's transform porosity to have an arbitrary mean and standard deviation ($\overline{x} = 1.0$ and $\sigma_x = 1.0$) df['aPor'] = GSLIB.affine(df['Por'].values,tmean = 1.0,tstdev = 1.0) df.head(n = 13) # Let's check the summary statistics of our new feature. df['aPor'].describe().transpose() # Let's remove the affine transformed feature. df = df.drop(columns = ['aPor']) # ### Standardization # # Standardization is the transform of the feature distribution to a mean of zero and a variance of one. # # * this is a shift and stretch / squeeze of the original property distribution # # * no shape change is assumed # # The transform is effectively a specific case of the affine correction, with $\overline{y} = 0$ and $\sigma_y = 1.0$. # # \begin{equation} # y = \frac{1}{\sigma_x}\left( x - \overline{x} \right) # \end{equation} # # where $\sigma_x$ is the original standard deviation and $\overline{x}$ is the original mean. # # Let's standardize the feature to have: # # * mean = 0.0 # * variance = standard deviation = 1.0 # # To do this we: # # 1. instantiate the StandardScaler from scikit learn. We assign it as 'scaler' so we can use it to conveniently reverse the transformation if we like. We will need to do that to get our predictions back into regular production units. # # ```python # scaler = StandardScaler() # ``` # # 2. we then extract all the values from our DataFrame and apply the by-column standardization. The result is a 2D ndarray # # ```python # sfeatures = scaler.fit_transform(df_mv.values) # ``` # 3. we make an new empty DataFrame # # ```python # df_nmv = pd.DataFrame() # ``` # # 4. then we add the transformed value to the new DataFrame while keeping the sample index and feature names from the old DataFramae # # ```python # df_nmv = pd.DataFrame(sfeatures, index=df_mv.index, columns=df_mv.columns) # ``` # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # instantiate the scaler sfeatures = scaler.fit_transform(df.values) # standardize all the values extracted from the DataFrame df_st = pd.DataFrame() # instantiate a new DataFrame df_st = pd.DataFrame(sfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_st.head() # preview the the new DataFrame # - # Let's close the loop and reverse the transformation and confirm that we get back to the original data. # # To do this we: # # 5. call the 'fit' features' scaler's inverse transform function transformed value to the new DataFrame while keeping the sample index and feature names from the old DataFramae # # ```python # rfeatures = scaler.inverse_transform(df_st.values) # ``` # # the output from this is a 2D numpy array. # # 6. We will put it into a new DataFrame. # # ```python # df_reverse = pd.DataFrame(rfeatures, index=df.index, columns=df.columns) # ``` rfeatures = scaler.inverse_transform(df_st.values) df_reverse = pd.DataFrame(rfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_reverse.head() # You can confirm the result is the same as the original, prior to standardization DatatFrame. # # We were just testing, so let's get rid of (delete) the new DataFrame. We can use the following to delete an instantiated object in Python. # # ```python # del df_reverse # ``` # # For more complicated workflows it may be a good idea to remove intermediate products to save memmory and to prevent clutter! del df_reverse # ### Normalization # # Also known as the min / max transform, recales the features to have a minimum of 0 and a maximum of 1. # # \begin{equation} # y = \frac{x - min(x)}{max(x) - min(x)} # \end{equation} # # where $min(x)$ and $max(x)$ are the minimum and maximum values for each feature. # # * scikit learn has a built in min / max transform method # # ```python # min_max_scaler = preprocessing.MinMaxScaler() # scaled_array = min_max_scaler.fit_transform(float_array) # ``` norm_scaler = preprocessing.MinMaxScaler() # instantiate the scaler nfeatures = norm_scaler.fit_transform(df.values) # standardize all the values extracted from the DataFrame df_n = pd.DataFrame(nfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_n.head() # Let's check the summary statistics. df_n.describe().transpose() # Let's close the loop and reverse the transformation (back-transform) and confirm that we get back to the original data. rfeatures = norm_scaler.inverse_transform(df_n.values) df_reverse = pd.DataFrame() # instantiate a new DataFrame df_reverse = pd.DataFrame(rfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_reverse.head() # You can confirm the result is the same as the original, prior to standardization DataFrame. # # Once again we were just testing, so let's delete the back-transformed DataFrame. del df_reverse # #### L1 / L2 Normalizer # # Another type of normalization is performed independently on each sample to force the $L1$ or $L2$ norm to be 1.0. # # For the L1 norm: # # \begin{equation} # \sum^m_{i,\alpha = 1} x_{i, \alpha} = 1.0, \quad i = 1, \ldots, n # \end{equation} # # where we have $x_{i, \alpha}, \alpha = 1, \dots, m$ features over $i = 1, \dots, n$ samples. # # For the L2 norm: # # \begin{equation} # \sum^m_{i,\alpha = 1}\left( x_{i, \alpha} \right)^2 = 1.0, \quad i = 1, \ldots, n # \end{equation} # # where we have $x_{i, \alpha}, \alpha = 1, \dots, m$ features over $i = 1, \dots, n$ samples. # # * this may be applied in text classification or clustering # # We demonstrate the L1 and L2 normalizer below. # # * there is no reverse to this transform # # Let's start with the $L2$ norm. from sklearn.preprocessing import Normalizer l2normalizer = Normalizer(norm = 'l2') l2features = l2normalizer.fit_transform(df) # standardize all the values extracted from the DataFrame df_nL2 = pd.DataFrame() # instantiate a new DataFrame df_nL2 = pd.DataFrame(l2features, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_nL2.head() # Now let's demonstrate the $L1$ norm. from sklearn.preprocessing import Normalizer l1normalizer = Normalizer(norm = 'l1') l1features = l2normalizer.fit_transform(df) # standardize all the values extracted from the DataFrame df_nL1 = pd.DataFrame() # instantiate a new DataFrame df_nL1 = pd.DataFrame(l1features, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_nL1.head() # ### Binary or Indictor Transform # # For the many problems that we need to perform a binary transform to convert our continuous feature to 0's and 1's based on a threshold, $x_t$ # # * for the binary transform, $x_i = 0$ if $<= x_t$ and $x_i = 1$ otherwise # * for the indicator transform, $x_i = 1$ if $<= x_t$ and $x_i = 0$ otherwise # # There is a scikit-learn function for the binary transform from sklearn.preprocessing import Binarizer binarizer = Binarizer(threshold = 13.0) bPor = binarizer.fit_transform(df['Por'].values.reshape(-1, 1)) # standardize all the values extracted from the DataFrame bPor # ### K Bins Discretization # # With K bins discretization we bin the range of the feature into K bins and then expand each sample for our continuous feature to k features with the assignment of a value of 1 if the sample is within a bin and 0 if outsize the bin # # * strategies include uniform width bins (uniform) and uniform number of data in each bin (quantile) # # Let's make 5 uniform bins and then concatenate the original porosity values so we can compare the original porosity values and our K bins discretization. from sklearn.preprocessing import KBinsDiscretizer kbins = KBinsDiscretizer(n_bins=5, strategy='uniform') kbins_por = kbins.fit_transform(df['Por'].values.reshape(-1, 1)) # standardize all the values extracted from the DataFrame np.concatenate((df['Por'].values.reshape(-1, 1),kbins_por.toarray()),axis=1) # Let's look at the bin boundaries to better understand the result above. kbins.bin_edges_ # Spot check the first sample, $12.08\%$ porosity is $\in [9.95\%,13.35\%]$ so we have a 1 in the second bin (second column in our table) and zeros in the other bins. # # ### Gaussian Transform / Gaussian Anamorphosis # # This is a quantile transformation of the feature to follow a Gaussian distribution. We actually map each of the quantiles from the original feature distribution to the target distribution. This is required for a wide range of statistical methods. # # \begin{equation} # y = G_y^{-1}(F_x(x)) # \end{equation} # # where $G_y$ is the Gaussian cumulative distribution function and $F_x$ is the original feature cumulative distribution function. from sklearn.preprocessing import QuantileTransformer nscore = QuantileTransformer(n_quantiles=100, random_state=73, output_distribution = 'normal') nsfeatures = nscore.fit_transform(df) # standardize all the values extracted from the DataFrame df_ns = pd.DataFrame() # instantiate a new DataFrame df_ns = pd.DataFrame(nsfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_ns.head() # Let's visualize the Gaussian transformed feature histograms. # # * we should see a nice bell shape centered on 0.0 with a range of about -4 to 4. histograms(df_ns) # Once again, let's check out the reverse transform. We just reverse the previous quantile transform # # \begin{equation} # x = F_x^{-1}(G_y(y)) # \end{equation} # rfeatures = nscore.inverse_transform(df_ns.values) df_reverse = pd.DataFrame() # instantiate a new DataFrame df_reverse = pd.DataFrame(rfeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_reverse.head() # We can also perform a uniform transform. Let's show the quntile transformation to a uniform distribution. from sklearn.preprocessing import QuantileTransformer uniform = QuantileTransformer(n_quantiles=100, random_state=73, output_distribution = 'uniform') unifeatures = uniform.fit_transform(df) # standardize all the values extracted from the DataFrame df_uni = pd.DataFrame() # instantiate a new DataFrame df_uni = pd.DataFrame(unifeatures, index=df.index, columns=df.columns) # copy the standardized values into the new DataFrame df_uni.head() # Let's visualize the uniform transformed feature histograms. # # * we should see a uniform frequencies (with some noise due to limited sampling) $\in [0,1]$. histograms(df_uni) # We now have features with uniform distributions $[0,1]$. # # ### Custom Transformers # # We can also create our own custom transform. We can specify our own transform within a scikit learn preprocessing function # # * this allows us to have a convenient method for forward and reverse transforms of our features as we have seen above # # Let's demonstrate with the natural log for the foward transform and the exponential for the reverse transform. # # # \begin{equation} # y = log(x) # \end{equation} # # \begin{equation} # x = exp(y) # \end{equation} # # ```python # custom_transformer = FunctionTransformer(func = np.log, inverse_func = np.exp) # ``` # # from sklearn.preprocessing import FunctionTransformer custom_transformer = FunctionTransformer(func = np.log, inverse_func = np.exp, check_inverse = True, validate=True) custom_features = custom_transformer.fit_transform(df['Perm'].values.reshape(-1, 1)) # standardize all the values extracted from the DataFrame df_custom = pd.DataFrame(custom_features, columns = ['LogPerm']) # instantiate a new DataFrame df_custom.head() # Let's demonstrate that our custom transform is reversible. rfeatures = custom_transformer.inverse_transform(df_custom.values) df_reverse = pd.DataFrame(rfeatures, index=df_custom.index, columns=['Perm']) # copy the standardized values into the new DataFrame df_reverse.head() # Compare the back-transformed permeability values to the original dataset. The reverse transform that we speficied with our custom transformation works! # # #### Comments # # This was a basic demonstration of feature transformations. This might not seem very interesting at this time, but these transformations are important building blocks of workflow construction for data analytics and machine learning! # # I have other demonstrations on the basics of working with DataFrames, ndarrays, univariate statistics, plotting data, declustering, data transformations, trend modeling and many other workflows available at https://github.com/GeostatsGuy/PythonNumericalDemos and https://github.com/GeostatsGuy/GeostatsPy. # # I hope this was helpful, # # *Michael* # # #### The Author: # # ### <NAME>, Associate Professor, University of Texas at Austin # *Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions* # # With over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development. # # For more about Michael check out these links: # # #### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) # # #### Want to Work Together? # # I hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate. # # * Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you! # # * Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems! # # * I can be reached at <EMAIL>. # # I'm always happy to discuss, # # *Michael* # # <NAME>, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin # # #### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) #
SubsurfaceDataAnalytics_Feature_Transformations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Perform A/B Test using REST Endpoints # # TODO: This notebook requires that model.tar.gz already contains code/inference.py # # # This won't work unless that's the case. (I can't seem to specify entry_point and soruce_dir_ # You can test and deploy new models behind a single SageMaker Endpoint with a concept called “production variants.” These variants can differ by hardware (CPU/GPU), by data (comedy/drama movies), or by region (US West or Germany North). You can shift traffic between the models in your endpoint for canary rollouts and blue/green deployments. You can split traffic for A/B tests. And you can configure your endpoint to automatically scale your endpoints out or in based on a given metric like requests per second. As more requests come in, SageMaker will automatically scale the model prediction API to meet the demand. # <img src="img/model_ab.png" width="80%" align="left"> # We can use traffic splitting to direct subsets of users to different model variants for the purpose of comparing and testing different models in live production. The goal is to see which variants perform better. Often, these tests need to run for a long period of time (weeks) to be statistically significant. The figure shows 2 different recommendation models deployed using a random 50-50 traffic split between the 2 variants. # + import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name sm = boto3.Session().client(service_name="sagemaker", region_name=region) cw = boto3.Session().client(service_name="cloudwatch", region_name=region) # - # # Clean Up Previous Endpoints to Save Resources # %store -r autopilot_endpoint_name try: autopilot_endpoint_name sm.delete_endpoint(EndpointName=autopilot_endpoint_name) print("Autopilot Endpoint has been deleted to save resources. This is good.") except: print("Endpoints are cleaned up. This is good. Keep moving forward!") # %store -r training_job_name print(training_job_name) # # Copy the Model to the Notebook # !aws s3 cp s3://$bucket/$training_job_name/output/model.tar.gz ./model.tar.gz # !mkdir -p ./model/ # !tar -xvzf ./model.tar.gz -C ./model/ # # Show the Prediction Signature # !saved_model_cli show --all --dir ./model/tensorflow/saved_model/0/ # # Show `inference.py` # !pygmentize ./model/code/inference.py # # Create Variant A Model From the Training Job in a Previous Section # # Notes: # * `primary_container_image` is required because the inference and training images are different. # * By default, the training image will be used, so we need to override it. # * See https://github.com/aws/sagemaker-python-sdk/issues/1379 # * If you are not using a US-based region, you may need to adapt the container image to your current region using the following table: # # https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html inference_image_uri = sagemaker.image_uris.retrieve( framework="tensorflow", region=region, version="2.3.1", py_version="py37", instance_type="ml.m5.4xlarge", image_scope="inference", ) print(inference_image_uri) # + import time timestamp = "{}".format(int(time.time())) model_a_name = "{}-{}-{}".format(training_job_name, "varianta", timestamp) sess.create_model_from_job( name=model_a_name, training_job_name=training_job_name, role=role, image_uri=inference_image_uri ) # - # # Create Variant B Model From the Training Job in a Previous Section # Notes: # * `primary_container_image` is required because the inference and training images are different. # * By default, the training image will be used, so we need to override it. See https://github.com/aws/sagemaker-python-sdk/issues/1379 # * If you are not using a US-based region, you may need to adapt the container image to your current region using the following table: # # https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html # + model_b_name = "{}-{}-{}".format(training_job_name, "variantb", timestamp) sess.create_model_from_job( name=model_b_name, training_job_name=training_job_name, role=role, image_uri=inference_image_uri ) # - # # Canary Rollouts and A/B Testing # Canary rollouts are used to release new models safely to only a small subset of users such as 5%. They are useful if you want to test in live production without affecting the entire user base. Since the majority of traffic goes to the existing model, the cluster size of the canary model can be relatively small since it’s only receiving 5% traffic. # Instead of `deploy()`, we can create an `Endpoint Configuration` with multiple variants for canary rollouts and A/B testing. # + from sagemaker.session import production_variant timestamp = "{}".format(int(time.time())) endpoint_config_name = "{}-{}-{}".format(training_job_name, "abtest", timestamp) variantA = production_variant( model_name=model_a_name, instance_type="ml.m5.4xlarge", initial_instance_count=1, variant_name="VariantA", initial_weight=50, ) variantB = production_variant( model_name=model_b_name, instance_type="ml.m5.4xlarge", initial_instance_count=1, variant_name="VariantB", initial_weight=50, ) endpoint_config = sm.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=[variantA, variantB] ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpointConfig/{}">REST Endpoint Configuration</a></b>'.format( region, endpoint_config_name ) ) ) # + model_ab_endpoint_name = "{}-{}-{}".format(training_job_name, "abtest", timestamp) endpoint_response = sm.create_endpoint(EndpointName=model_ab_endpoint_name, EndpointConfigName=endpoint_config_name) # - # # Store Endpoint Name for Next Notebook(s) # %store model_ab_endpoint_name # # Track the Deployment Within our Experiment # %store -r experiment_name print(experiment_name) # %store -r trial_name print(trial_name) # + from smexperiments.trial import Trial timestamp = "{}".format(int(time.time())) trial = Trial.load(trial_name=trial_name) print(trial) # + from smexperiments.tracker import Tracker tracker_deploy = Tracker.create(display_name="deploy", sagemaker_boto_client=sm) deploy_trial_component_name = tracker_deploy.trial_component.trial_component_name print("Deploy trial component name {}".format(deploy_trial_component_name)) # - # # Attach the `deploy` Trial Component and Tracker as a Component to the Trial trial.add_trial_component(tracker_deploy.trial_component) # # Track the Endpoint Name # + tracker_deploy.log_parameters( { "endpoint_name": model_ab_endpoint_name, } ) # must save after logging tracker_deploy.trial_component.save() # + from sagemaker.analytics import ExperimentAnalytics lineage_table = ExperimentAnalytics( sagemaker_session=sess, experiment_name=experiment_name, metric_names=["validation:accuracy"], sort_by="CreationTime", sort_order="Ascending", ) lineage_df = lineage_table.dataframe() lineage_df.shape # - lineage_df # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">REST Endpoint</a></b>'.format( region, model_ab_endpoint_name ) ) ) # - # # _Wait Until the Endpoint is Deployed_ waiter = sm.get_waiter("endpoint_in_service") waiter.wait(EndpointName=model_ab_endpoint_name) # # _Wait Until the ^^ Endpoint ^^ is Deployed_ # # Simulate a Prediction from an Application # + from sagemaker.tensorflow.model import TensorFlowPredictor from sagemaker.serializers import JSONLinesSerializer from sagemaker.deserializers import JSONLinesDeserializer predictor = TensorFlowPredictor( endpoint_name=model_ab_endpoint_name, sagemaker_session=sess, model_name="saved_model", model_version=0, content_type="application/jsonlines", accept_type="application/jsonlines", serializer=JSONLinesSerializer(), deserializer=JSONLinesDeserializer(), ) # - # ### Waiting for the Endpoint to be ready to Serve Predictions # + import time time.sleep(30) # - # # Predict the `star_rating` with Ad Hoc `review_body` Samples # + inputs = [{"features": ["This is great!"]}, {"features": ["This is bad."]}] predicted_classes = predictor.predict(inputs) for predicted_class in predicted_classes: print("Predicted star_rating: {}".format(predicted_class)) # - # # Predict the `star_rating` with `review_body` Samples from our TSV's # + import csv df_reviews = pd.read_csv( "./data/amazon_reviews_us_Digital_Software_v1_00.tsv.gz", delimiter="\t", quoting=csv.QUOTE_NONE, compression="gzip", ) df_sample_reviews = df_reviews[["review_body", "star_rating"]].sample(n=50) df_sample_reviews = df_sample_reviews.reset_index() df_sample_reviews.shape # + import pandas as pd def predict(review_body): inputs = [{"features": [review_body]}] predicted_classes = predictor.predict(inputs) return predicted_classes[0]["predicted_label"] df_sample_reviews["predicted_class"] = df_sample_reviews["review_body"].map(predict) df_sample_reviews.head(5) # - # # Review the REST Endpoint Performance Metrics in CloudWatch # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">REST Endpoint Performance Metrics</a></b>'.format( region, model_ab_endpoint_name ) ) ) # - # # Review the REST Endpoint Performance Metrics in a Dataframe # # Amazon SageMaker emits metrics such as Latency and Invocations (full list of metrics [here](https://alpha-docs-aws.amazon.com/sagemaker/latest/dg/monitoring-cloudwatch.html)) for each variant in Amazon CloudWatch. Let’s query CloudWatch to get the InvocationsPerVariant to show how invocations are split across variants. # + from datetime import datetime, timedelta import boto3 import pandas as pd def get_invocation_metrics_for_endpoint_variant( endpoint_name, namespace_name, metric_name, variant_name, start_time, end_time ): metrics = cw.get_metric_statistics( Namespace=namespace_name, MetricName=metric_name, StartTime=start_time, EndTime=end_time, Period=60, Statistics=["Sum"], Dimensions=[{"Name": "EndpointName", "Value": endpoint_name}, {"Name": "VariantName", "Value": variant_name}], ) if metrics["Datapoints"]: return ( pd.DataFrame(metrics["Datapoints"]) .sort_values("Timestamp") .set_index("Timestamp") .drop("Unit", axis=1) .rename(columns={"Sum": variant_name}) ) else: return pd.DataFrame() def plot_endpoint_metrics_for_variants(endpoint_name, namespace_name, metric_name, start_time=None): try: start_time = start_time or datetime.now() - timedelta(minutes=60) end_time = datetime.now() metrics_variantA = get_invocation_metrics_for_endpoint_variant( endpoint_name=model_ab_endpoint_name, namespace_name=namespace_name, metric_name=metric_name, variant_name=variantA["VariantName"], start_time=start_time, end_time=end_time, ) metrics_variantB = get_invocation_metrics_for_endpoint_variant( endpoint_name=model_ab_endpoint_name, namespace_name=namespace_name, metric_name=metric_name, variant_name=variantB["VariantName"], start_time=start_time, end_time=end_time, ) metrics_variants = metrics_variantA.join(metrics_variantB, how="outer") metrics_variants.plot() except: pass # - # # Show the Metrics for Each Variant # If you see `Metrics not yet available`, please be patient as metrics may take a few mins to appear in CloudWatch. # # Also, make sure the predictions ran successfully above. # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(20) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="/aws/sagemaker/Endpoints", metric_name="CPUUtilization" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="Invocations" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="InvocationsPerInstance" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="ModelLatency" ) # - # # Shift All Traffic to Variant B # _**No downtime** occurs during this traffic-shift activity._ # # This may take a few minutes. Please be patient. updated_endpoint_config = [ { "VariantName": variantA["VariantName"], "DesiredWeight": 0, }, { "VariantName": variantB["VariantName"], "DesiredWeight": 100, }, ] sm.update_endpoint_weights_and_capacities( EndpointName=model_ab_endpoint_name, DesiredWeightsAndCapacities=updated_endpoint_config ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">REST Endpoint</a></b>'.format( region, model_ab_endpoint_name ) ) ) # - # # _Wait for the ^^ Endpoint Update ^^ to Complete Above_ # This may take a few minutes. Please be patient. waiter = sm.get_waiter("endpoint_in_service") waiter.wait(EndpointName=model_ab_endpoint_name) # # Run Some More Predictions df_sample_reviews["predicted_class"] = df_sample_reviews["review_body"].map(predict) df_sample_reviews.head(5) # # Show the Metrics for Each Variant # If you see `Metrics not yet available`, please be patient as metrics may take a few mins to appear in CloudWatch. # # Also, make sure the predictions ran successfully above. # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(20) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="/aws/sagemaker/Endpoints", metric_name="CPUUtilization" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="Invocations" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="InvocationsPerInstance" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="ModelLatency" ) # - # # Remove Variant A to Reduce Cost # Modify the Endpoint Configuration to only use variant B. # # _**No downtime** occurs during this scale-down activity._ # # This may take a few mins. Please be patient. # + import time timestamp = "{}".format(int(time.time())) updated_endpoint_config_name = "{}-{}".format(training_job_name, timestamp) updated_endpoint_config = sm.create_endpoint_config( EndpointConfigName=updated_endpoint_config_name, ProductionVariants=[ { "VariantName": variantB["VariantName"], "ModelName": model_b_name, # Only specify variant B to remove variant A "InstanceType": "ml.m5.4xlarge", "InitialInstanceCount": 1, "InitialVariantWeight": 100, } ], ) # - sm.update_endpoint(EndpointName=model_ab_endpoint_name, EndpointConfigName=updated_endpoint_config_name) # # _If You See An ^^ Error ^^ Above, Please Wait Until the Endpoint is Updated_ # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">REST Endpoint</a></b>'.format( region, model_ab_endpoint_name ) ) ) # - # # _Wait for the ^^ Endpoint Update ^^ to Complete Above_ # This may take a few minutes. Please be patient. waiter = sm.get_waiter("endpoint_in_service") waiter.wait(EndpointName=model_ab_endpoint_name) # # Run Some More Predictions df_sample_reviews["predicted_class"] = df_sample_reviews["review_body"].map(predict) df_sample_reviews # # Show the Metrics for Each Variant # If you see `Metrics not yet available`, please be patient as metrics may take a few mins to appear in CloudWatch. # # Also, make sure the predictions ran successfully above. # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(20) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="/aws/sagemaker/Endpoints", metric_name="CPUUtilization" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="Invocations" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="InvocationsPerInstance" ) # + import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' time.sleep(5) plot_endpoint_metrics_for_variants( endpoint_name=model_ab_endpoint_name, namespace_name="AWS/SageMaker", metric_name="ModelLatency" ) # - # # More Links # * Optimize Cost with TensorFlow and Elastic Inference # https://aws.amazon.com/blogs/machine-learning/optimizing-costs-in-amazon-elastic-inference-with-amazon-tensorflow/ # # * Using API Gateway with SageMaker Endpoints # https://aws.amazon.com/blogs/machine-learning/creating-a-machine-learning-powered-rest-api-with-amazon-api-gateway-mapping-templates-and-amazon-sagemaker/ # # Release Resources sm.delete_endpoint(EndpointName=model_ab_endpoint_name) # + language="html" # # <p><b>Shutting down your kernel for this notebook to release resources.</b></p> # <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> # # <script> # try { # els = document.getElementsByClassName("sm-command-button"); # els[0].click(); # } # catch(err) { # // NoOp # } # </script> # + language="javascript" # # try { # Jupyter.notebook.save_checkpoint(); # Jupyter.notebook.session.delete(); # } # catch(err) { # // NoOp # }
09_deploy/05_Perform_AB_Test_Reviews_BERT_TensorFlow_REST_Endpoints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alirezash97/GenericDecoding2018/blob/main/DS2018.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="4CVtkGYYYm6X" # # Download the data from openneuro using aws # + id="jSJi919H_8v3" # # !pip install awscli # + id="HlrioVew-qLq" # # !aws s3 sync --no-sign-request s3://openneuro.org/ds001246 ds001246-download/ # + [markdown] id="iUtWRsbEYgxR" # # Visualization and description of different types of the data # + id="IqfKUH44DzJ-" # import nibabel as nib # import matplotlib.pyplot as plt # def show_slices(slices): # """ Function to display row of image slices """ # fig, axes = plt.subplots(1, len(slices)) # for i, slice in enumerate(slices): # axes[i].imshow(slice.T, cmap="gray", origin="lower") # anat_img = nib.load('/content/ds001246-download/derivatives/preproc-spm/output/sub-01/ses-anatomy/anat/sub-01_ses-anatomy_T1w_preproc.nii.gz') # anat_img_data = anat_img.get_fdata() # print(anat_img_data.shape) # show_slices([anat_img_data[120, :, :], # anat_img_data[:, 120, :], # anat_img_data[:, :, 120]]) # plt.suptitle("Center slices for anatomical image") # + id="8vrj_BxYNWtq" # mid_slice_x = anat_img_data[120, :, :] # print(mid_slice_x.shape) # plt.imshow(mid_slice_x.T, cmap='gray', origin='lower') # plt.xlabel('First axis') # plt.ylabel('Second axis') # plt.colorbar(label='Signal intensity') # plt.suptitle("Center slice for anatomical image") # plt.show() # + id="s4_3AyqbEGRB" # f_img = nib.load('/content/ds001246-download/derivatives/preproc-spm/output/sub-01/ses-imageryTest01/func/sub-01_ses-imageryTest01_task-imagery_run-01_bold_preproc.nii.gz') # f_img_data = f_img.get_fdata() # print(f_img_data.shape) # show_slices([f_img_data[38, :, :, 0], # f_img_data[:, 38, :, 0], # f_img_data[:, :, 25, 0]]) # plt.suptitle("Center slices for functional image") # + id="G1yxrpUlMxCE" # mid_slice_x_fmri = f_img_data[38, :, :, 0] # x = 38, t = 0 # print("Shape of slice: %s" % (mid_slice_x_fmri.shape,)) # plt.imshow(mid_slice_x_fmri.T, cmap='gray', origin='lower') # plt.xlabel('First axis') # plt.ylabel('Second axis') # plt.colorbar(label='Signal intensity') # plt.suptitle("Center slice for functional image") # plt.show() # + id="YKerinsV8kGr" # mask_img = nib.load('/content/ds001246-download/sourcedata/sub-01/anat/sub-01_mask_RH_V1d.nii.gz') # mask_img_data = mask_img.get_fdata() # print(mask_img_data.shape) # show_slices([mask_img_data[32, :, :], # mask_img_data[:, 32, :], # mask_img_data[:, :, 25]]) # plt.suptitle("Brain Regions Mask For sub-01") # + id="6TdTGWGw-UjW" # import pandas as pd # events_df = pd.read_csv("/content/ds001246-download/sub-01/ses-imageryTest01/func/sub-01_ses-imageryTest01_task-imagery_run-01_events.tsv", sep='\t') # events_df.head(5) # + id="w39ACExAk6ms" # events_df2 = pd.read_csv("/content/ds001246-download/sub-01/ses-perceptionTest01/func/sub-01_ses-perceptionTest01_task-perception_run-01_events.tsv", sep="\t") # events_df2.head(5) # + [markdown] id="QoY-ahhlZvjI" # # creating a map for data generator # + id="MLgNuoyohGcJ" import os import os.path def get_paths(dir_addr): " ..:: here we create a file with all event addresses ::.. " file_paths = list() for dirpath, dirnames, filenames in os.walk(dir_addr): for filename in [f for f in filenames if f.endswith(".tsv")]: file_paths.append(os.path.join(dirpath, filename)) return file_paths # + id="kLDeUHB8JMbX" import pandas as pd def create_map(file_path): "..:: A dataframe which contains all the informations needed for data generator::.. " Map = pd.DataFrame(columns=["Onset", "Offset", "Stimuli/Category_ID", "Event_Type", "Image_Address"]) for addr in file_paths: tsv_file = pd.read_csv(addr, sep="\t") image_addr = "/content/ds001246-download/derivatives/preproc-spm/output/" + addr[27:-10] + "bold_preproc.nii.gz" for index, row in tsv_file.iterrows(): onset = row["onset"] offset = onset + row["duration"] event_type = row["event_type"] if "imagery" in image_addr: try: id = int(row["category_id"]) except ValueError: id = 0 elif "perception" in image_addr: try: id = int(row["stim_id"]) except ValueError: id = 0 Map = Map.append({"Onset":onset, "Offset":offset, "Stimuli/Category_ID":id, "Event_Type":event_type, "Image_Address":image_addr}, ignore_index=True) return Map # + id="cFKLOgFycj0-" outputId="8792e4cb-c53c-4e14-c8e8-adc53d1d0f2d" colab={"base_uri": "https://localhost:8080/", "height": 206} file_paths = get_paths("/content/ds001246-download/") map = create_map(file_paths) map.head(5) # + id="EjDTRrl-gQSt" # print("Number of Unique Image Addresses: ", len(map["Image_Address"].unique())) # print("Number of Unique IDs: ", len(map["Stimuli/Category_ID"].unique())) # print("Here is Top 10 (in terms of frequency)\n", map["Stimuli/Category_ID"].value_counts()[1:11]) # print("Number of total events", len(map)) # + [markdown] id="uiyLJMpP5ZWE" # # Here we select prefered categories and event types # + id="yihqpPwpgRSx" colab={"base_uri": "https://localhost:8080/"} outputId="7328a5d3-aa9f-4715-9921-3ca2e5a27593" categories = list(map["Stimuli/Category_ID"].value_counts()[1:11].index) # most frequent categories f_map = map.loc[ (map['Event_Type'].isin(['imagery', 'stimulus'])) & (map['Stimuli/Category_ID'].isin(categories))] len(f_map) # + id="e9_kmkCEfSeT" outputId="50cf8c65-5e5d-4fbe-8ca5-b11210580f67" colab={"base_uri": "https://localhost:8080/", "height": 409} from sklearn.preprocessing import OneHotEncoder #creating instance of one-hot-encoder encoder_df = pd.get_dummies(f_map['Stimuli/Category_ID']) final_map = f_map.join(encoder_df) print(final_map['Event_Type'].value_counts()) final_map = final_map.sample(frac=1, random_state=14) final_map.head(5) # + id="EusliSR3lgZT" final_map = final_map.sample(n=24) ################################ # + [markdown] id="5KJfBSQDSBlU" # # Preparing DataLoader # + id="_J1xaizFP3oq" outputId="3fd5020d-04ca-4316-a735-796290f9f43e" colab={"base_uri": "https://localhost:8080/"} import nibabel as nib import matplotlib.pyplot as plt from __future__ import print_function, division import torch from skimage import io, transform import numpy as np import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils # + id="Azfl6sbmSbwV" # for stimulus experiment, there are 175 samples for 534 seconds # for imagery experiment, there are 210 samples for 639 seconds # sampling rate ==> 1 sample every 3 seconds (fMRI limitation) import random import math import torchvision.transforms as transforms class Gdecoder(Dataset): def __init__(self, map, transform=None): """ Args: """ self.map = map self.transform = transform def __len__(self): return len(self.map) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() image_path = self.map.iloc[idx]['Image_Address'] onset = math.floor((self.map.iloc[idx]['Onset'])/3) offset = math.floor((self.map.iloc[idx]['Offset']/3)) image_obj = nib.load(image_path) event_type = self.map.iloc[idx]['Event_Type'] # here we select a sequence of 3 numbers in imagery image time points which is 5 # in order to solve input size incompatibility issue if event_type == 'imagery': imagery_onset = random.randint(onset, offset-3) imagery_offset = imagery_onset + 3 image = image_obj.dataobj[:, :, :, imagery_onset:imagery_offset] else: image = image_obj.dataobj[:, :, :, onset:offset] label = np.array(self.map.iloc[idx, -10:], dtype=float) label = torch.from_numpy(label) # label = torch.unsqueeze(label, 0) # ============> # label = label.repeat(3, 1) # ============> label = label.type(torch.FloatTensor) if image.shape[-1] != 3: # correction image = np.array(image[:, :, :, :], dtype=float) rpt_image = image[:, :, :, 0:1] image = np.concatenate((image, rpt_image), axis=3) if self.transform: image = np.array(image[:, :, :, :], dtype=np.float16) sample = {'image' : torch.from_numpy((image.T)), 'event type': event_type, 'label': label} else: sample = {'image' : image.T, 'event type': event_type, 'label': label} return sample # + id="mVs2zhF8Q5VC" # transform = transforms.Compose([transforms.ToTensor(), # transforms.Normalize()]) # dataset = Gdecoder(map=final_map, transform = transform) # loader = DataLoader( # dataset, # batch_size=4, # num_workers=2, # shuffle=False # ) # mean = 0. # std = 0. # nb_samples = 0. # for data in loader: # data = data['image'] # batch_samples = data.size(0) # data = data.view(batch_samples, data.size(1), -1) # mean += data.mean(2).sum(0) # std += data.std(2).sum(0) # nb_samples += batch_samples # mean /= nb_samples # std /= nb_samples # print(mean) # print(std) # + id="UbsbEo_WXoRX" # dataset = Gdecoder(map=final_map, transform = None) # + id="i16Pq7caUUQM" # for i in range(len(dataset)): # if dataset[i]['event type'] == 'stimulus': # sample = dataset[i] # print('one hot of the label:\n ', sample['label']) # print("Event Type: ", sample['event type']) # f_img_data = sample['image'].T # print("transposed sample shapes: ", f_img_data.shape) # show_slices([f_img_data[38, :, :, 2], # f_img_data[:, 38, :, 2], # f_img_data[:, :, 25, 2]]) # plt.suptitle("Center slices for functional image (stimulus experiment)") # break # + id="9MhvesSimBHQ" # for i in range(len(dataset)): # if dataset[i]['event type'] == 'imagery': # sample = dataset[i] # print('one hot of the label:\n ', sample['label']) # print("Event Type: ", sample['event type']) # f_img_data = sample['image'].T # print("transposed sample shapes: ", f_img_data.shape) # show_slices([f_img_data[38, :, :, 2], # f_img_data[:, 38, :, 2], # f_img_data[:, :, 25, 2]]) # plt.suptitle("Center slices for functional image (imagery experiment)") # break # + id="yIIRoTgRhJER" from torch.utils.data.sampler import SubsetRandomSampler import torchvision.transforms as transforms transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((258.7493, 258.8581, 258.8850), (347.9486, 348.1261, 388.1579))]) dataset = Gdecoder(map=final_map, transform = transform) batch_size = 4 test_split = .1 valid_split = .1 shuffle_dataset = True random_seed= 42 # Creating data indices for training and test splits: dataset_size = len(dataset) indices = list(range(dataset_size)) split_test = int(np.floor(test_split * dataset_size)) split_valid = int( split_test + (np.floor(valid_split * dataset_size))) if shuffle_dataset : np.random.seed(random_seed) np.random.shuffle(indices) test_indices = indices[:split_test] valid_indices = indices[split_test:split_valid] train_indices = indices[split_valid:] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(valid_indices) test_sampler = SubsetRandomSampler(test_indices) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) valid_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler) test_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) # + id="vFJR3DCkNS4H" from torch import nn from torchvision import models import torch.nn.functional as F class SubUnet_orig(nn.Module): def __init__(self,hidden_size,n_layers,dropt,bi,N_classes): super(SubUnet_orig, self).__init__() self.hidden_size=hidden_size self.num_layers=n_layers self.dim_feats = 512 # self.cnn=models.alexnet(pretrained=True) # self.cnn.classifier[-1]=Identity() self.conv1 = nn.Conv2d(50, 128, 7) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(128, 512, 7) self.fc1 = nn.Linear(512 * 11 * 11, 4096) self.fc2 = nn.Linear(4096, 1024) self.fc3 = nn.Linear(1024, 512) self.rnn = nn.LSTM( input_size=self.dim_feats, hidden_size=self.hidden_size, bias = True, num_layers=self.num_layers, dropout=dropt, bidirectional=True, batch_first=True) self.n_cl=N_classes self.softmax = nn.Softmax(dim=1) self.sigmoid = nn.Sigmoid() self.Leaky_ReLU = nn.LeakyReLU() self.last_linear = nn.Linear(2*self.hidden_size, self.n_cl) def forward(self, x): batch_size, timesteps, C,H, W = x.size() c_in = x.view(batch_size * timesteps, C, H, W) # c_out = self.cnn(c_in) h = self.pool(self.Leaky_ReLU(self.conv1(c_in))) h = self.pool(self.Leaky_ReLU(self.conv2(h))) h = torch.flatten(h, 1) h = self.Leaky_ReLU(self.fc1(h)) h = self.Leaky_ReLU(self.fc2(h)) c_out = self.Leaky_ReLU(self.fc3(h)) r_out, (h_n, h_c) = self.rnn(c_out.view(batch_size, 3, self.dim_feats)) r_out = r_out[:, -1, :] # get rid of temporal dimension r_out2 = self.last_linear(r_out) return r_out2 # + id="bsVU45fzXkYz" net = SubUnet_orig(hidden_size=128, n_layers=8, dropt=0.1, bi=True, N_classes=10 ) # + id="3qEG3zK-afnh" # # initialize model # def init_weights(m): # if isinstance(m, nn.Linear): # torch.nn.init.kaiming_uniform(m.weight) # m.bias.data.fill_(0.01) # net.apply(init_weights) # + id="5XlWBrM_YgC9" import torch.optim as optim # def one_hot_ce_loss(outputs, targets): # criterion = nn.CrossEntropyLoss() # # _, labels = torch.max(targets, dim=1) # return criterion(outputs, labels) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.01) # + id="7sMVOgG9a94m" outputId="9e34dfe7-829b-488b-9f2e-816058921324" colab={"base_uri": "https://localhost:8080/", "height": 1000} for epoch in range(20): # loop over the dataset multiple times corrects = 0 running_loss = 0.0 for i, data in enumerate(train_loader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data['image'], data['label'].type(torch.float16) # forward + backward + optimize outputs = net(inputs.float()) print(outputs.shape) print(labels.shape) loss = criterion(outputs, labels) # zero the parameter gradients optimizer.zero_grad() loss.backward() optimizer.step() prediction = torch.max(outputs, dim=1).indices target = torch.max(labels, dim=1).indices corrects =+ torch.eq(prediction, target).sum() print("label: ", labels) print("outputs: ", outputs) print("prediction", prediction) print("target: ", target) print("corrects: ", corrects) # print statistics running_loss += loss.item() if i % 1 == 0: # print every 2000 mini-batches print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 5:.10f}') accuracy = torch.Tensor.numpy( (corrects / (target.shape[0] * 5))) # batch_size x 5 print("Accuracy = {}".format(accuracy)) running_loss = 0.0 corrects = 0 print('Finished Training') # + id="OvUz_lz4nkd2"
DS2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jchzener/Algorithms-Notebooks/blob/master/discrete_2d.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="6VDRCmQPIyno" # Here, we implement one-step actor-critic with neural networks and test it on a simple 2D reaching environment. We will start with the environment first. The agent is confined in a 2D arena defined by state $S \in [-1, +1]^2$. An example of states is then $[0.5, -0.2]^\top$. Agent has eight actions available that change the state by one of following eight possible increments $f(A)$: $[-b, -b], [-b, 0], [-b, 1], [0, -b], [0, 0], [0, b], [b, -b], [b, 0], \text{ and } [b, b]$, where $b>0$ is a small scalar. After choosing an action $A_t$, the state is changed as follows: $S_{t+1} = S_t + f(A_t)$, except when the state goes outside the arena, in which case it remains within the arena. # + [markdown] id="ZXK014t0TKCA" # It is an episodic task. Each episode, the agent is spawned from a uniformly randomly drawn state from the arena. The episode terminates when the agent reaches a small square around $(0, 0)$. The goal of the agent is to reach the terminal state as soon as possible. We formulate this goal by giving $-1$ rewards every time step. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="uP9UUy6jIwcb" outputId="56793b0e-bb39-4f11-b1e4-e5ad3fee05a6" # Actor critic agent # %reset -f import torch as tor import matplotlib.pyplot as plt # Problem tor.manual_seed(1) nacts = 8 aval = 0.03*tor.tensor([ [-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 0], [0, 1], [1, -1], [1, 0], [1, 1] ], dtype=tor.float32) LB = tor.tensor([-1., -1.]) UB = tor.tensor([1., 1.]) # Agent nhid = 10 alpha = 0.0003 actor_body = tor.nn.Sequential(tor.nn.Linear(2, nhid), tor.nn.ReLU(), tor.nn.Linear(nhid, nhid), tor.nn.ReLU() ) actor_pref = tor.nn.Sequential(tor.nn.Linear(nhid, nacts)) actor_pref[-1].weight.data[:] = 0; actor_pref[-1].bias.data[:] = 0 critic = tor.nn.Sequential(tor.nn.Linear(2, nhid), tor.nn.ReLU(), tor.nn.Linear(nhid, nhid), tor.nn.ReLU(), tor.nn.Linear(nhid, 1)) popt = tor.optim.Adam(list(actor_body.parameters())+list(actor_pref.parameters()),lr=alpha) copt = tor.optim.Adam(critic.parameters(), lr=10*alpha) # Experiment EP = 600 rets = [] Slogs = [] i = 0 for ep in range(EP): Slogs.append([]) S = tor.rand((1, 2))*(UB-LB) + LB Slogs[-1].append(S) ret = 0 while True: # Take action feat = actor_body(S) pref = actor_pref(feat) try: pol = tor.distributions.Categorical(logits=pref) except: print("E", pref) Ai = pol.sample() # Receive reward and next state noise = tor.rand(2)*0.06-0.03 SP = tor.clamp(S + aval[Ai] + noise, LB, UB) R = -0.01 # Learning done = tor.allclose(SP, tor.zeros(2), atol=0.2) vs = critic(S); vsp = critic(SP) pobj = pol.log_prob(Ai)*(R + (1-done)*vsp - vs).detach() ploss = -pobj closs = (R + (1-done)*vsp.detach() - vs)**2 popt.zero_grad() ploss.backward() popt.step() copt.zero_grad() closs.backward() copt.step() # Log Slogs[-1].append(SP) ret += R # Termination if done: print(vsp) rets.append(ret) i += 1 print(i, len(Slogs[-1])) break S = SP # Plotting plt.plot(-100*tor.tensor(rets)) plt.figure() colors = ["tab:blue", "tab:green", "tab:orange", "tab:purple", "tab:red", "tab:brown"] for i in range(-min(30, EP), 0): color = colors[i%len(colors)] Slog = tor.cat(Slogs[i]) for i in range(Slog.shape[0]-1): plt.plot(Slog[i:i+2,0], Slog[i:i+2,1], alpha=(i+1)/Slog.shape[0], color=color) plt.xlim([LB[0], UB[0]]) plt.ylim([LB[1], UB[1]]) plt.gca().set_aspect('equal', adjustable='box') plt.grid() plt.show() # + id="LKGe3DGXUH2X"
discrete_2d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PROJ_irox_oer] * # language: python # name: conda-env-PROJ_irox_oer-py # --- # # Obtaining the indices of the atoms that make up the active octahedra # --- # ### Import Modules # + import os print(os.getcwd()) import sys import time; ti = time.time() import pickle import random import numpy as np import pandas as pd pd.set_option("display.max_columns", None) # # ####################################################### from misc_modules.pandas_methods import reorder_df_columns # ######################################################### from methods import ( get_df_jobs_anal, get_df_atoms_sorted_ind, get_df_active_sites, get_df_octa_info, get_df_struct_drift, get_df_jobs, get_df_init_slabs, ) # ######################################################### from local_methods import get_octahedra_atoms # - from methods import isnotebook isnotebook_i = isnotebook() if isnotebook_i: from tqdm.notebook import tqdm verbose = True else: from tqdm import tqdm verbose = False root_dir = os.path.join( os.environ["PROJ_irox_oer"], "workflow/octahedra_info", ) # ### Read Data # + df_jobs_anal = get_df_jobs_anal() df_jobs_anal_i = df_jobs_anal df_atoms_sorted_ind = get_df_atoms_sorted_ind() df_active_sites = get_df_active_sites() df_octa_info_prev = get_df_octa_info() df_jobs = get_df_jobs() df_init_slabs = get_df_init_slabs() df_struct_drift = get_df_struct_drift() # + # df_octa_info_prev[df_octa_info_prev.index.duplicated(keep=False)] # + # assert df_octa_info_prev.index.is_unique, "SIDFISDI" # - # ### Filtering down to `oer_adsorbate` jobs # + df_ind = df_jobs_anal.index.to_frame() df_jobs_anal = df_jobs_anal.loc[ df_ind[df_ind.job_type == "oer_adsorbate"].index ] df_jobs_anal = df_jobs_anal.droplevel(level=0) df_ind = df_atoms_sorted_ind.index.to_frame() df_atoms_sorted_ind = df_atoms_sorted_ind.loc[ df_ind[df_ind.job_type == "oer_adsorbate"].index ] df_atoms_sorted_ind = df_atoms_sorted_ind.droplevel(level=0) # + active="" # # # # + sys.path.insert(0, os.path.join( os.environ["PROJ_irox_oer"], "workflow/feature_engineering")) from feature_engineering_methods import get_df_feat_rows df_feat_rows = get_df_feat_rows( df_jobs_anal=df_jobs_anal, df_atoms_sorted_ind=df_atoms_sorted_ind, df_active_sites=df_active_sites, ads_to_include=["o", "oh", "bare", ], ) df_feat_rows = df_feat_rows.set_index([ "compenv", "slab_id", "ads", # "active_site_orig", "att_num", "from_oh", "active_site", "att_num", "from_oh", ], drop=False) # - # ######################################################### data_dict_list = [] indices_to_process = [] indices_to_not_process = [] # ######################################################### iterator = tqdm(df_feat_rows.index, desc="1st loop") for i_cnt, index_i in enumerate(iterator): # ##################################################### row_i = df_feat_rows.loc[index_i] # ##################################################### compenv_i = row_i.compenv slab_id_i = row_i.slab_id ads_i = row_i.ads active_site_orig_i = row_i.active_site_orig att_num_i = row_i.att_num job_id_max_i = row_i.job_id_max active_site_i = row_i.active_site from_oh_i = row_i.from_oh # ##################################################### index_i = (compenv_i, slab_id_i, ads_i, active_site_i, att_num_i, from_oh_i, ) if index_i in df_octa_info_prev.index: indices_to_not_process.append(index_i) else: indices_to_process.append(index_i) # + # # TEMP # print(222 * "TEMP | ") # # # DO NUMBER OF RANDOM SYSTEMS # # indices_to_process = random.sample(indices_to_not_process, 20) # # # DO NUMBER OF RANDOM SYSTEMS # # indices_to_process = random.sample(indices_to_process, 20) # # # DO EVERYTHING # # indices_to_process = indices_to_not_process # # # DO SPECIFIC SYSTEMS # # indices_to_process = [ # # ('sherlock', 'sifebelo_94', 'o', 63.0, 1, False), # # ('sherlock', 'sifebelo_94', 'o', 63.0, 1, True), # # ("sherlock", "kapapohe_58", "oh", 29.0, 0, True, ), # # ("sherlock", "kamevuse_75", "o", 49.0, 1, False, ), # # ] # - # ### Main Loop df_feat_rows_2 = df_feat_rows.loc[ indices_to_process ] # + # ######################################################### data_dict_list = [] # ######################################################### iterator = tqdm(df_feat_rows_2.index, desc="1st loop") for i_cnt, index_i in enumerate(iterator): # print(20 * "-") # print(index_i) # ##################################################### row_i = df_feat_rows.loc[index_i] # ##################################################### compenv_i = row_i.compenv slab_id_i = row_i.slab_id ads_i = row_i.ads active_site_orig_i = row_i.active_site_orig att_num_i = row_i.att_num job_id_max_i = row_i.job_id_max active_site_i = row_i.active_site from_oh_i = row_i.from_oh # ##################################################### # ################################################# df_struct_drift_i = df_struct_drift[df_struct_drift.job_id_0 == job_id_max_i] if df_struct_drift_i.shape[0] == 0: df_struct_drift_i = df_struct_drift[df_struct_drift.job_id_1 == job_id_max_i] # ################################################# octahedra_atoms_i = None if df_struct_drift_i.shape[0] > 0: octahedra_atoms_i = df_struct_drift_i.iloc[0].octahedra_atoms # ################################################# if active_site_orig_i == "NaN": from_oh_i = False else: from_oh_i = True # ################################################# name_i = ( row_i.compenv, row_i.slab_id, row_i.ads, row_i.active_site_orig, row_i.att_num, ) # ################################################# row_atoms_i = df_atoms_sorted_ind.loc[name_i] # ################################################# atoms_i = row_atoms_i.atoms_sorted_good # ################################################# data_out = get_octahedra_atoms( df_jobs=df_jobs, df_init_slabs=df_init_slabs, atoms_0=atoms_i, job_id_0=job_id_max_i, active_site=active_site_i, compenv=compenv_i, slab_id=slab_id_i, ads_0=ads_i, active_site_0=active_site_orig_i, att_num_0=att_num_i, ) # ################################################# data_dict_i = dict() # ################################################# data_dict_i["job_id_max"] = job_id_max_i data_dict_i["from_oh"] = from_oh_i data_dict_i["active_site"] = active_site_i data_dict_i["compenv"] = compenv_i data_dict_i["slab_id"] = slab_id_i data_dict_i["ads"] = ads_i data_dict_i["active_site_orig"] = active_site_orig_i data_dict_i["att_num"] = att_num_i # ################################################# data_dict_i.update(data_out) # ################################################# data_dict_list.append(data_dict_i) # ################################################# # ######################################################### df_octa_info = pd.DataFrame(data_dict_list) col_order_list = ["compenv", "slab_id", "ads", "active_site", "att_num"] df_octa_info = reorder_df_columns(col_order_list, df_octa_info) if df_octa_info.shape[0] > 0: df_octa_info = df_octa_info.set_index([ "compenv", "slab_id", "ads", # "active_site_orig", "att_num", ], # "active_site_orig", "att_num", "from_oh", ], "active_site", "att_num", "from_oh", ], drop=True) # ######################################################### # - # ### Combine previous and current `df_octa_info` to create new one # + # # TEMP # print(111 * "TEMP | ") # # Set save current version of df_octa_info # df_octa_info_new = df_octa_info # - df_octa_info_new = pd.concat([ df_octa_info, df_octa_info_prev, ], axis=0) # ### Save data to pickle # ######################################################### # Pickling data ########################################### directory = os.path.join( root_dir, "out_data") if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, "df_octa_info.pickle"), "wb") as fle: pickle.dump(df_octa_info_new, fle) # ######################################################### # ######################################################### print(20 * "# # ") print("All done!") print("Run time:", np.round((time.time() - ti) / 60, 3), "min") print("get_octahedra_atoms.ipynb") print(20 * "# # ") # ######################################################### # + active="" # # #
workflow/octahedra_info/get_octahedra_atoms-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np import pandas as pd import os print(os.listdir("../input")) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" df_train = pd.read_csv("../input/train_V2.csv") df_test = pd.read_csv("../input/test_V2.csv") df_train = df_train.dropna(axis=0) df_test = df_test.dropna(axis=0) features = df_train.columns.drop(["winPlacePerc", "Id", "groupId", "matchId"]) train_X = df_train[features] train_y = df_train['winPlacePerc'] test_X = df_test[features] #one hot encode train_X = pd.get_dummies(train_X) test_X = pd.get_dummies(test_X) # + _uuid="18ac82dcd67776dae7bc8fa0b9b925b2cbc7ee35" from xgboost import XGBRegressor # learningrate 0.01, maxdepth 6, colsamplebytree 1, nestimators 1000 XGBRegressor_model = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bytree=1, gamma=0, learning_rate=0.01, max_delta_step=0, max_depth=6, min_child_weight=1, missing=None, n_estimators=1000, n_jobs=1, nthread=None, objective='reg:linear', random_state=42, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=0, subsample=1 ) XGBRegressor_model.fit(train_X, train_y, verbose=False) predict_y = XGBRegressor_model.predict(test_X) # + _uuid="ee0cd0d6701e41d300dfb309f117822e04236919" output = pd.DataFrame({'Id': df_test.Id, 'winPlacePerc': predict_y}) output.to_csv('submission.csv', index=False) output
pubg-finish-placement-prediction-xgb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solutions to the Rootfinding Chapter Exercises # 1: Write down as many questions as you can, about this section. # What is the oldest method for finding roots of equations? What is the best way to solve polynomials? Can you solve them analytically? _Is_ there a best way to solve them? How do you solve multivariate polynomials (more than one equation in more than one unknown)? Can you _count_ the number of solutions independently so that you know when you have found them all? What's the difference between an equation and an identity? Can there be infinitely many solutions to a single equation? (Sure: $\sin(x)=0$ has solutions $k\pi$ for any integer $k$) Can an equation have no solutions? (Sure: $\exp(z) = 0$ has no finite solutions). Can an equation have _accumulation points_ of solutions? (An accumulation point is a place where things pile up infinitely; an example is the set $1/k$ for integers $k$, which has an accumulation point at $0$). Can we solve every polynomial equation in terms of continued fractions? Are there polynomial matrix equations, and can they be solved by Newton's method? What happens with _multiple_ roots, such as $z^2-2z+1=0$? If there are two solutions to an equation, which one is the right one? Can you find a formula for the _integer_ solutions of an equation? # 2: Sometimes Newton iteration is "too expensive"; a cheaper alternative is the so-called _secant iteration_, which goes as follows: $z_{n+1} = z_n - f(z_n)(z_{n}-z_{n-1})/(f(z_n) - f(z_{n-1}))$. You need not one, but _two_ initial approximations for this. Put $f(z) = z^2-2$ and start with the two initial approximations $z_0 = 1$, $z_1 = 3/2$. Carry out several steps of this (in exact arithmetic is better). Convert each rational $z_n$ to continued fraction form. Discuss what you find. # + import numpy as np niter = 10 z = np.zeros(niter) y = np.zeros(niter) f = lambda t: t**2 - 2.0 z[0] = 1.0 y[0] = f(z[0]) z[1] = 1.5 y[1] = f(z[1]) for k in range(2,niter): z[k] = z[k-1] - y[k-1]*(z[k-1]-z[k-2])/(y[k-1]-y[k-2]) y[k] = f( z[k] ) print( z, y ) # - # If you do this problem using exact arithmetic, and convert the rational answers to continued fraction form, you get $[1]$, $[1;2]$, $[1;2,2]$, $[1;2,2,2,2]$, and so on, similar to Newton's method. But there are $1$, $2$, $3$, $5$, $8$, $13$, $21$, and so on partial quotients---these are Fibonacci numbers! Newton's method converges _quadratically_ in that the number of correct entries in the continued fraction doubles every time (or nearly; it's off by one, maybe). The secant method converges at the rate of $\phi = (1+\sqrt{5})/2 \approx 1.618$ because the ratio of successive Fibonacci numbers approaches $\phi$ (quite quickly, really). # 3: Try Newton and secant iteration on some functions of your own choosing. You should see that Newton iteration usually takes fewer iterations to converge, but since it needs a derivative evaluation while the secant method does not, each iteration is "cheaper" in terms of computational cost(if $f(z)$ is at all expensive to evaluate, $f'(z)$ usually is too; there are exceptions, of course). The consensus seems to be that the secant method is a bit more practical; but in some sense it is just a variation on Newton's method. # This question doesn't need an _answer_ per se, because there are lots of examples all over the place, but perhaps your solutions need someone to look at them. Things that can happen: first, and most usual, your code works and the roots get approximated well quite quickly, and the residuals behave something like $r_n = f(z_n) \approx C r_{n-1}^2$ where $C$ is some constant. This is usually visible. If your residuals do _not_ get small quickly, then you might have a bug in your code, or you might have a multiple root. Sometimes you can divide by zero (if $f'(z)=0$ anywhere near), and sometimes the iteration can go off to infinity or minus infinity (if $f(z) = \exp(z)$ then it could well go to $-\infty$). # 4: Both the Newton iteration and the secant iteration applied to $f(z) = z^2-a^2$ can be _solved analytically_ by the transformation $z = a\coth \theta$. [Hyperbolic functions](https://en.wikipedia.org/wiki/Hyperbolic_functions) The iteration $z_{n+1} = (z_n + a^2/z_n)/2$ becomes (you can check this) $\coth \theta_{n+1} = \cosh 2\theta_n/\sinh 2\theta_n = \coth 2\theta_n$, and so we may take $\theta_{n+1} = 2\theta_n$. This can be solved to get $\theta_n = 2^n\theta_0$ and so we have an analytical formula for each $z_n = a \coth( 2^n \theta_0 )$. Try this on $a^2=2$; you should find that $\theta_0 = \mathrm{invcoth}(1/\sqrt{2})$. By "invcoth" we mean the functional inverse of coth, i.e.: $\coth\theta_0 = 1/\sqrt{2}$. It may surprise you that that number is complex. Nevertheless, you will find that all subsequent iterates are real, and $\coth 2^n\theta_0$ goes to $1$ very quickly. # # NB This was inadvertently difficult. Neither numpy nor scipy has an invcoth (or arccoth) function. The Digital Library of Mathematical Functions says (equation 4.37.6) that arccoth(z) = arctanh(1/z). Indeed we had to go to Maple to find out that invcoth$(1/\sqrt{2}) = \ln(1+\sqrt{2}) - i\pi/2$. niter = 5 r2 = np.sqrt(2) x = 1; #theta0 = np.arctanh(r2) # Apparently this is real only? theta0 = np.log(1+r2) - (np.pi/2)*1j coth = lambda z: np.cosh(z)/np.sinh(z) # Amazing that even this has to be defined z = r2*coth(theta0) print( r2, x, theta0, z ) for k in range(niter): x = (x + 2/x)/2 z = r2*coth(2**(k+1)*theta0) print( x, z ) # We see numerically that the formula matches the iteration, up to rounding error. # 5: Try the above with $a^2=-1$. NB the initial guess $z_0 = 1$ fails! Try $z_0 = e = \exp(1) = 2.71828...$ instead. For this, the $\theta_0 = 1j\arctan(e^{-1})$. Then you might enjoy reading Gil Strang's lovely article [A Chaotic Search for $i$](https://www.jstor.org/stable/2686733). niter = 5 x = np.exp(1); #theta0 = np.arctanh(r2) # Apparently this is real only? theta0 = 1j*np.arctan(np.exp(-1)) # strange initial guess coth = lambda z: np.cosh(z)/np.sinh(z) # Amazing that even this has to be defined z = 1j*coth(theta0) print( 1j, x, theta0, z ) for k in range(niter): x = (x - 1/x)/2 z = 1j*coth(2**(k+1)*theta0) print( x, z ) # Again we see that the formula matches the iteration up to roundoff error, although it's working with complex numbers. If we went for a few more iterations, though, we would see the rounding errors take over---this is a case where the iteration _depends sensitively on initial conditions_. This is a hallmark of [chaos](https://en.wikipedia.org/wiki/Chaos_theory). # 6: Try to solve the _secant_ iteration for $z^2-a^2$ analytically. You should eventually find a connection to Fibonacci numbers. # This is a fun exercise in trig identities. The solution can be found in the paper cited in problem 8 below. # 7: People keep inventing new rootfinding iterations. Usually they are reinventions of methods that others have invented before, such as so-called _Schroeder_ iteration and _Householder_ iteration. One step along the way is the method known as _Halley iteration_, which looks like this: # \begin{equation*} # z_{n+1} = z_n - \frac{f(z_n)}{f'(z_n) - \frac{f(z_n)f''(z_n)}{2f'(z_n)}} # \end{equation*} # which, as you can see, also involves the _second_ derivative of $f$. When it works, it works quickly, typically converging in fewer iterations than Newton (although, typically, each step is more expensive computationally). Try the method out on some examples. It may help you to reuse your code (or Maple's code) if you are told that Newton iteration on $F(z) = f(z)/\sqrt{f'(z)}$ turns out to be identical to Halley iteration on $f(z)$. # We've done a lot of these. In the next unit, you will see fractal images from both Newton's method and from Halley's method. Here is Eunice with the resulting pictures from twenty functions, solved using Newton's method and using Halley's method. # # ```{image} ../Figures/Rootfinding/FortyFractals.jpg # :height: 300px # :alt: Eunice and the Forty Fractals. # :align: center # ``` # 8: Try to solve Halley's iteration for $x^2-a$ analytically. Then you might enjoy reading [Revisiting <NAME>ang's "A Chaotic Search for i"](https://doi.org/10.1145/3363520.3363521) by <NAME> and <NAME>; Ao was a (graduate) student in the first iteration of this course at Western, and she solved&mdash;in class!&mdash;what was then an _open_ problem (this problem!). # The complete solution is in the paper linked above.
book/Solutions/Solutions to the Rootfinding Chapter Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # APIs: requêtes HTTP # ## Imports import json import requests # ## Utiliser l'Open Data Wallonie Bruxelles pour retrouver le nom actuel des écoles de la ville de Bruxelles sur base de l'adresse # https://www.odwb.be/ # + """Retrieve data from Open Data Wallonie Bruxelles of the school based on Rue des Eburons n°46""" url = "https://www.odwb.be/api/records/1.0/search/?dataset=signaletique-fase&q=&lang=fr&facet=adresse_de_l_etablissement&refine.adresse_de_l_etablissement=Rue+des+Eburons+n%C2%B046" data = {'format': 'json'} resp = requests.get(url, data) json_list = json.loads(resp.text) json_list
TP1/TP1_API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day1: Exercises # #### Dataset link: https://github.com/skathirmani/datasets import pandas as pd import nltk import numpy as np from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import CountVectorizer # ### Analysis on Amazon Reviews amazon_data_path = 'https://github.com/skathirmani/datasets/raw/master/amazon_reviews_11.zip' amazon = pd.read_csv(amazon_data_path) amazon['reviewText'] = amazon['reviewText'].fillna('') #amazon.head(1) # ### Exercises # 1. Create word cloud using wordclouds api # 2. Create a bar chart for bag of word analysis # 3. Building Document Term Matrix # 4. Identify top 5 large documents # 5. Identify percentage of zeros in the document term matrix # 6. Create a bar chart for top 20 bigrams # # ### Optional # 7. Correlated Features # - Identify top 50 unigrams # - Extract the relevant columns from DTM # - Create a correlation matrix. Identify highly correlated features # ## Twitter Analysis # 1. Identify all hashtags used by Modi # 2. Identify top 5 hashtags # 3. Pick one hashtags which is among top 5 and see its monthwise trending (i.e. how many times he has used the hashtags in each month) docs = tweets['text'] docs = docs.str.lower() docs = docs.str.replace('[^a-z#@ ]', '') docs = docs.str.split(' ') words_rows = docs.tolist() hashtags = [] for row in words_rows: row_hashtags = [word for word in row if word.startswith('#')] hashtags.extend(row_hashtags) top_hashtags = pd.Series(hashtags).value_counts().head() top_hashtags top_hashtags.index tweets['date'] = pd.to_datetime(tweets['created_at']) tweets['date'].head() tweets['date'].min(), tweets['date'].max() tweets['month'] = tweets['date'].dt.month tweets['day'] = tweets['date'].dt.day tweets['year'] = tweets['date'].dt.year tweets['month_name'] = tweets['date'].dt.strftime('%b') tweets['weekday'] = tweets['date'].dt.strftime('%A') tweets['month_name'].value_counts().head() tweets['text'] = tweets['text'].str.lower().str.replace('[^a-z#@ ]', '') tweets['mannkibaat'] = tweets['text'].apply(lambda v: 1 if '#mannkibaat' in v.split(' ') else 0) tweets['mannkibaat'].value_counts() tweets.index = tweets['date'] tweets.resample('1M')['mannkibaat'].sum().plot.line()
Notebooks/Day 1/.ipynb_checkpoints/Day1_Exercises-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #import required libraries import matplotlib.pyplot as plt from matplotlib import style # %matplotlib inline #define temp, wind, humidity, precipitation data and Time hrs data temp_data = [79,75,74,75,73,81,77,81,95,93,95,97,98,99,98,98,97,92,94,92,83,83,83,81] wind_data = [14,12,10,13,9,13,12,13,17,13,17,18,18,7,25,10,10,16,0,16,9,9,9,5] time_hrs = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24] humidity_data = [73,76,78,81,81,84,84,79,71,64,58,55,51,48,46,46,45,46,48,53,60,67,69,73] precipitation_data = [26,42,69,48,11,16,5,11,3,48,26,21,58,69,74,53,79,53,58,26,74,95,69,58] #draw subplots for (1,2,1) and (1,2,2) plt.figure(figsize=(8,4)) plt.subplots_adjust(hspace=.25) plt.subplot(1,2,1) plt.title('Temp') plt.plot(time_hrs,temp_data,color ='b',linestyle = '-',linewidth=1) plt.subplot(1,2,2) plt.title('Wind') plt.plot(time_hrs, wind_data, color= 'r',linestyle='-',linewidth=1) #draw subplots for (2,1,1) and (2,1,2) plt.figure(figsize =(6,6)) plt.subplots_adjust(hspace=.25) plt.subplot(2,1,1) plt.title('Humidity') plt.plot(time_hrs,humidity_data, color='b',linestyle='-',linewidth=1) plt.subplot(2,1,2) plt.title('Prescription') plt.plot(time_hrs, precipitation_data,color='r',linestyle='-',linewidth=1) plt.show() #draw subplots for (2,2,1), (2,2,2), (2,2,3) and (2,2,4) plt.figure(figsize=(9,9)) plt.subplots_adjust(hspace=.3) plt.subplot(2,2,1) plt.title('Temp (F)') plt.plot(time_hrs,temp_data,color ='b',linestyle = '-',linewidth=1) plt.subplot(2,2,2) plt.title('Wind (MPH)') plt.plot(time_hrs, wind_data, color= 'r',linestyle='-',linewidth=1) plt.subplot(2,2,3) plt.title('Humidity (%)') plt.plot(time_hrs,humidity_data, color='g',linestyle='-',linewidth=1) plt.subplot(2,2,4) plt.title('Prescription (%)') plt.plot(time_hrs, precipitation_data,color='y',linestyle='-',linewidth=1) plt.show()
Matplot_Subplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 2 # Add the specified code for each code cell, running the cells _in order_. # In this exercise, you'll practice working with a data API: specifically, the [New York Times API](https://developer.nytimes.com/) for movie reviews. To learn more about the API, see the [developer console](https://developer.nytimes.com/movie_reviews_v2.json). # You will need to register with the NYT site at **<https://developer.nytimes.com/signup>** in order to get an API Key. Fill out the form (you can use fake information and a single-use email if you wish). Under "API" select **"Movie Reviews API"** (and note other APIs available for future projects)! The API key should be emailed to you once you sign up. # # In order to utilize this key in your script, create a **separate* Python script called `apikey.py` inside this directory (the one with this Jupyter notebook). Assign the key that was emailed to you to a variable: # # ```python # nyt_apikey = "<KEY>" # ``` # # You should also modify the included `.gitignore` file inside the directory to list your `apikey.py` script so that it doesn't get committed! # Import your `nyt_apikey` variable from your `apikey` script. Print out the _length_ of the key (but not the key itself!) to confirm that it has been imported. # + ##Update "developer console" link above: https://developer.nytimes.com/docs/movie-reviews-api/1/overview ##Try running without first, get the error, ## Call out, need to actually create the file. Don't do it manually ## "use command line!" then in the command line demonstrate: # #echo "nyt_apikey = '...akikey...'" > apikey.py ##article on hiding your gitignore file: http://www.blacktechdiva.com/hide-api-keys/ from apikey import nyt_apikey print(len(nyt_apikey)) # - # Create a variable **`movie_name`** that contains the name of a movie of your choice. movie_name = "Reservoir Dogs" # Construct an HTTP request to search for reviews for that movie. # - The base URI is `https://api.nytimes.com/svc/movies/v2/` # - The resource is `reviews/search.json` # - Your request will need two query parameters: the movie name as the `query` and YOUR api key as the `api-key` parameter. # # Then send the HTTP request and output its `status_code` to demonstrate that it worked. # + import requests base_uri = "https://api.nytimes.com/svc/movies/v2/" resource = "reviews/search.json" query_params = {'query': movie_name, 'api-key': nyt_apikey} response = requests.get(base_uri + resource, params = query_params) response.status_code # - # Access the response JSON content as a Python data structure, saving it as a variable (e.g., `data`). Use the `type()` function to determine if the content is a list or a dictionary. data = response.json() type(data) # Since the `data` is a dictionary, check its keys and identify the one with the value of interest&mdash;where is the _list of dictionaries_ with movie review data? Store the first item from that list in a variable `movie_review` and print it out. data.keys() movie_review = data['results'][0] print(movie_review) # Print out the _headline_, _short summary_, and a _link to the full article_ of the `movie_review`. print(movie_review['headline']) print(movie_review['summary_short']) print(movie_review['link']['url'])
exercise-2/exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from qft import qft_framework from dft import dft_framework from fft import fft_framework from frontend import signal, transform, primeTime # + print("Initializing Signal") y = signal(samplingRate=16000, amplification=1, duration=1, nSamples=65536) y.addFrequency(600) y.addFrequency(800) y.addFrequency(1000) y.addFrequency(2000) y.addFrequency(5000) y.show() # + print("Processing DFT") try: dft = transform(dft_framework) y_hat, f = dft.forward(y) dft.show(y_hat, f) except Exception as e: print(e) # + print("Processing FFT") fft = transform(fft_framework) y_hat, f = fft.forward(y) fft.show(y_hat, f) # + print("Processing QFT") qft = transform(qft_framework) y_hat, f = qft.forward(y) qft.show(y_hat, f)
sc_harmonicSignal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp synth # - # # Synth Dataset Generator # # > Module to generate Synthetic Datasets to perform tests # This module is used to initialize datasets to test the utils from this library # hide import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) # + # export import pandas as pd import numpy as np import random from lightfm import LightFM from lightfm.data import Dataset from fastcore.all import * random.seed(42) # - # export class SynthDataset(): ''' Instance of two states of a dataset, one at time **T** and the other at time **T+1** where some users and items could had been added, deleted, and also their feautes ''' def __init__(self): self.all_users = [] self.all_items = [] self.all_user_features = [] self.all_item_features = [] self.before = {} self.after = {} self.users_added = [] self.users_deleted = [] # export def gen_values(n_values=10, prefix='u'): "Generates a list of values that will be used for generate the dataset" l = [] for i in range(n_values): l.append(prefix + str(i)) return l gen_values() # export def gen_added_n_deleted(l_values, max_added=3, max_deleted=3): ''' Generates two lists of values, one list will contain the values that will be deleted from the dataset, and the second one will contain the values that will be added to the dataset. ''' deleted = [] added = [] for i in l_values: r = random.random() if len(deleted) < max_deleted and r < 0.8: deleted.append(i) elif len(added) < max_added and r > 0.2: added.append(i) return added, deleted gen_added_n_deleted(gen_values()) # export def exclude_element(l, values_to_exclude, shuffle=False): "Excludes the elements from **values_to_exclude** from **l**" new_l = [x for x in l if set(values_to_exclude).issuperset({x}) == False] if shuffle: new_l.shuffle() return new_l # + # hide a = [1,2,3,4] b = [2,3] assert exclude_element(a, b) == [1,4], 'Both lists should be equal' # - # export def build_interactions(l1, l2, l1_col_name='user_id', l2_col_name='item_id', sparsity=0.5): ''' Builds interactions between l1 and l2. The sparsity determines how sparse this interactions will be. ''' interactions = {l1_col_name:[], l2_col_name:[]} for i in l1: for j in l2: if random.random() < sparsity: interactions[l1_col_name].append(i) interactions[l2_col_name].append(j) return pd.DataFrame(interactions) # hide build_interactions(gen_values(prefix='u'), gen_values(prefix='i')) # export def build_features_from_df(feature_interactions_df, element_id_column, feature_column, tolist=True): ''' Builds tuples of elements and its features to build the dataset ''' unique_elements = feature_interactions_df[element_id_column].unique() tuples = [] for e in unique_elements: filtered_rows = feature_interactions_df[feature_interactions_df[element_id_column] == e] feature_list = filtered_rows[feature_column].unique() if tolist: feature_list = feature_list.tolist() tuples.append((e, feature_list)) return tuples # + # hide user_features = build_interactions(gen_values(n_values=4, prefix='u'), gen_values(n_values=4, prefix='f'), l1_col_name='user_id', l2_col_name='feature_id', sparsity=0.5 ) build_features_from_df(user_features, 'user_id', 'feature_id') # - # hide users = gen_values(prefix='u') items = gen_values(prefix='i') all_user_features = gen_values(prefix='uf') all_item_features = gen_values(prefix='if') user_features = build_features_from_df( build_interactions( users, all_user_features, l1_col_name='user_id', l2_col_name='feature_id', sparsity=0.5 ), element_id_column='user_id', feature_column='feature_id' ) item_features = build_features_from_df( build_interactions( items, all_item_features, l1_col_name='item_id', l2_col_name='feature_id', sparsity=0.5 ), element_id_column='item_id', feature_column='feature_id' ) users, items, all_user_features, all_item_features, user_features, item_features #export @patch def build_synth_dataset(self:SynthDataset, n_users=10, n_items=10, max_added=3, max_deleted=3, print_added_n_deleted=False): ''' This function generates two **datasets** to simulate changes through time from one dataset. The first generated **dataset** is the state from the data in a time *t* and the second dataset simulates the state from the data at a time *t+1* where some users and items where added and deleted, and their metadata could be also updated (new metadata that expresses better the characteristics from that item, or just corrections) ''' self.all_users = gen_values(n_values=n_users, prefix='u') self.all_items = gen_values(n_values=n_items, prefix='i') self.all_user_features = gen_values(prefix='uf') self.all_item_features = gen_values(prefix='if') self.users_added, self.users_deleted = gen_added_n_deleted(self.all_users, max_added=max_added, max_deleted=max_deleted) if print_added_n_deleted: print('added users: {}\t deleted users: {}'.format(self.users_added, self.users_deleted)) self.items_added, self.items_deleted = gen_added_n_deleted(self.all_items, max_added=max_added, max_deleted=max_deleted) if print_added_n_deleted: print('added items: {}\t deleted items: {}'.format(self.items_added, self.items_deleted)) self.before['user_id'] = exclude_element(self.all_users, self.users_added) self.before['item_id'] = exclude_element(self.all_items, self.items_added) self.after['user_id'] = exclude_element(self.all_users, self.users_deleted) self.after['item_id'] = exclude_element(self.all_items, self.items_deleted) if print_added_n_deleted: print('users before:\t{}\nusers after:\t{}'.format(self.before['user_id'], self.after['user_id'])) if print_added_n_deleted: print('items before:\t{}\nitems after:\t{}'.format(self.before['item_id'], self.after['item_id'])) x = SynthDataset() x.build_synth_dataset(print_added_n_deleted=True) # x.before['user_id'], x.after['user_id'], x.before['item_id'], x.after['item_id']
notebooks/01_synth_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" id="MQoxjirY4S5o" # Installing `caer` and `canaro` since they don't come pre-installed # !pip install --upgrade caer canaro # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" id="_yldS7bG4S58" import os import caer import canaro import numpy as np import cv2 as cv import gc #pylint:disable=no-member (Removes linting problems with cv) # + id="TuC64gTq4S6K" IMG_SIZE = (80,80) channels = 1 char_path = r'../input/the-simpsons-characters-dataset/simpsons_dataset' # + id="RD5OHUE84S6U" # Creating a character dictionary, sorting it in descending order char_dict = {} for char in os.listdir(char_path): char_dict[char] = len(os.listdir(os.path.join(char_path,char))) # Sort in descending order char_dict = caer.sort_dict(char_dict, descending=True) char_dict # + id="OQ09DqmI4S6g" # Getting the first 10 categories with the most number of images characters = [] count = 0 for i in char_dict: characters.append(i[0]) count += 1 if count >= 10: break characters # + id="X4UnTWk74S6q" # Create the training data train = caer.preprocess_from_dir(char_path, characters, channels=channels, IMG_SIZE=IMG_SIZE, isShuffle=True) # + id="ZaSuzC2J4S6z" # Number of training samples len(train) # + id="hSw-V2H24S7A" # Visualizing the data (OpenCV doesn't display well in Jupyter notebooks) import matplotlib.pyplot as plt plt.figure(figsize=(30,30)) plt.imshow(train[0][0], cmap='gray') plt.show() # + id="arO-90034S7J" # Separating the array and corresponding labels featureSet, labels = caer.sep_train(train, IMG_SIZE=IMG_SIZE) # + id="Sl8VnLCY4S7O" from tensorflow.keras.utils import to_categorical # Normalize the featureSet ==> (0,1) featureSet = caer.normalize(featureSet) # Converting numerical labels to binary class vectors labels = to_categorical(labels, len(characters)) # + id="pzXXrqbt4S7S" from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(featureSet, labels, test_size=0.2) # + id="emsrpYWZ4S7W" del train del featureSet del labels gc.collect() # + id="NkS1ceD94S7a" # Useful variables when training BATCH_SIZE = 32 EPOCHS = 10 # + id="_atEyygG4S7g" # Image data generator (introduces randomness in network ==> better accuracy) datagen = canaro.generators.imageDataGenerator() train_gen = datagen.flow(x_train, y_train, batch_size=BATCH_SIZE) # + id="Y8fjXBuH4S7m" # Create our model (returns a compiled model) model = canaro.models.createSimpsonsModel(IMG_SIZE=IMG_SIZE, channels=channels, output_dim=len(characters), loss='binary_crossentropy', decay=1e-7, learning_rate=0.001, momentum=0.9, nesterov=True) # + id="CepcT54J4S7t" model.summary() # + id="AknG90ch4S7-" # Training the model from tensorflow.keras.callbacks import LearningRateScheduler callbacks_list = [LearningRateScheduler(canaro.lr_schedule)] training = model.fit(train_gen, steps_per_epoch=len(x_train)//BATCH_SIZE, epochs=EPOCHS, validation_data=(x_val,y_val), validation_steps=len(y_val)//BATCH_SIZE, callbacks = callbacks_list) # + id="CUifBqOG4S8G" characters # + [markdown] id="qiNY_zSy6Quj" # ## Testing # + id="ETnmB3DC4S8M" test_path = r'../input/the-simpsons-characters-dataset/kaggle_simpson_testset/kaggle_simpson_testset/charles_montgomery_burns_0.jpg' img = cv.imread(test_path) plt.imshow(img) plt.show() # + id="yJjmMuvj4S8T" def prepare(image): image = cv.cvtColor(image, cv.COLOR_BGR2GRAY) image = cv.resize(image, IMG_SIZE) image = caer.reshape(image, IMG_SIZE, 1) return image # + id="hZTjUKKn4S8a" predictions = model.predict(prepare(img)) # + id="3a4AW8qT4S8g" # Getting class with the highest probability print(characters[np.argmax(predictions[0])])
Section #4 - Capstone/simpsons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Requirements # - Artefacts: # - Models are uploaded in a Cloud Storage or Repo # - required Container are pushed to a Container Registry # - Clean Kubernetes Cluster (e.g. minikube, Microk8s, Azure AKS, AWS EKS, ...) # - minikube start --cpus 10 --memory 17000 --kubernetes-version=v1.17.11 -p demo # - Python Dependencies: [requirements.txt](./requirements.txt) # - kubectl Access: # - az aks get-credentials --resource-group myResourceGroup --name myAKSCluster # - aws eks update-kubeconfig --name cluster_name # # Install KFServing Standalone # See: https://github.com/kubeflow/kfserving#install-kfserving # ### Install Istio # %%writefile ./istio/istio_ns.yaml apiVersion: v1 kind: Namespace metadata: name: istio-system labels: istio-injection: disabled # !kubectl apply -f ./istio/istio_ns.yaml # %%writefile ./istio/istio-minimal-operator.yaml apiVersion: install.istio.io/v1alpha1 kind: IstioOperator spec: values: global: proxy: autoInject: disabled useMCP: false # The third-party-jwt is not enabled on all k8s. # See: https://istio.io/docs/ops/best-practices/security/#configure-third-party-service-account-tokens jwtPolicy: first-party-jwt addonComponents: pilot: enabled: false tracing: enabled: false kiali: enabled: false prometheus: enabled: false grafana: enabled: false components: ingressGateways: - name: istio-ingressgateway enabled: true - name: cluster-local-gateway enabled: true label: istio: cluster-local-gateway app: cluster-local-gateway k8s: service: type: ClusterIP ports: - port: 15020 name: status-port - port: 80 name: http2 - port: 443 name: https # + import time import platform import subprocess os_system = platform.system() os_machine = platform.machine() start = time.time() # Install Istio if os_system == 'Windows': # !curl -L https://github.com/istio/istio/releases/download/1.6.2/istioctl-1.6.2-win.zip -o istioctl-1.6.2-win.zip # !tar -xf istioctl-1.6.2-win.zip elif os_system == 'Linux': if os_machine == 'AMD64': # !curl -L https://github.com/istio/istio/releases/download/1.6.2/istioctl-1.6.2-linux-amd64.tar.gz -o istioctl-1.6.2-linux.tar.gz if os_machine == 'armv7l': # !curl -L https://github.com/istio/istio/releases/download/1.6.2/istioctl-1.6.2-linux-armv7.tar.gz -o istioctl-1.6.2-linux.tar.gz if os_machine == 'aarch64': print('Not supported') # !tar -zxvf istioctl-1.6.2-linux.tar.gz subprocess.run(["istioctl.exe", "manifest", "apply", "-f", "./istio/istio-minimal-operator.yaml"]) end = time.time() print(end-start) # + start = time.time() # Install Knative-Serving # !kubectl apply --filename https://github.com/knative/serving/releases/download/v0.18.0/serving-crds.yaml # !kubectl apply --filename https://github.com/knative/serving/releases/download/v0.18.0/serving-core.yaml # !kubectl apply --filename https://github.com/knative/net-istio/releases/download/v0.18.0/release.yaml # Install Cert Manager # !kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.1/cert-manager.yaml # !kubectl wait --for=condition=available --timeout=600s deployment/cert-manager-webhook -n cert-manager # Install KFServing # !kubectl apply -f https://raw.githubusercontent.com/kubeflow/kfserving/master/install/v0.5.0/kfserving_crds.yaml # !kubectl apply -f https://raw.githubusercontent.com/kubeflow/kfserving/master/install/v0.5.0/kfserving.yaml # Install Knative-Eventing # !kubectl apply --filename https://github.com/knative/eventing/releases/download/v0.18.0/eventing.yaml # Install Knative-Monitoring # !kubectl apply --filename https://github.com/knative/serving/releases/download/v0.18.0/monitoring.yaml end = time.time() print(end-start) # - # ## Deploy InfluxDB with Helm # + start = time.time() # !helm repo add influxdata https://helm.influxdata.com/ # !helm repo update # !helm search repo influxdata # !helm install --name-template release-influxdb stable/influxdb end = time.time() print(end-start) # - # ## Deploy ServiceAccount to store AWS Credentials for S3 Bucket Access # + import os from IPython.core.magic import register_line_cell_magic from dotenv import load_dotenv load_dotenv() @register_line_cell_magic def writetemplate(line, cell): with open(line, 'w') as f: f.write(cell.format(**globals())) AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] # - # %%writetemplate ./credentials/aws-secret_serviceaccount.yaml apiVersion: v1 kind: Secret metadata: name: aws-secret namespace: kfserving-test type: Opaque stringData: AWS_ACCESS_KEY_ID: {AWS_ACCESS_KEY_ID} AWS_SECRET_ACCESS_KEY: {AWS_SECRET_ACCESS_KEY} --- apiVersion: v1 kind: ServiceAccount metadata: name: sa namespace: kfserving-test secrets: - name: aws-secret # !kubectl create ns kfserving-test # !kubectl apply -f ./credentials/aws-secret_serviceaccount.yaml # ## Deploy docker-registry secret to access the private Gitlab Container Registry # !kubectl create secret docker-registry gitlab \ # --docker-server=https://registry.gitlab.com/\ # --docker-username=%DOCKER_USERNAME%\ # --docker-password=%DOCKER_PASSWORD%\ # -n kfserving-test # ## Architektur: # <img src="./architektur.png" width="650"> # ## Deploy Knative Broker # %%writefile ./broker.yaml apiVersion: eventing.knative.dev/v1 kind: broker metadata: name: product-recommender namespace: kfserving-test # !kubectl create -f ./broker.yaml # # Deploy Product Recommender # %%writefile ./tf-deployment-recommender.yaml apiVersion: "serving.kubeflow.org/v1beta1" kind: "InferenceService" metadata: namespace: "kfserving-test" name: "product-recommender" spec: transformer: containers: - image: registry.gitlab.com/felix.exel/container_registry/kfserving/model-performance-monitoring name: user-container imagePullPolicy: Always imagePullSecrets: - name: gitlab predictor: serviceAccountName: "sa" # service account for aws credentials minReplicas: 1 # if 0: replica will scale down to 0 when there are no requests tensorflow: runtimeVersion: "2.4.0" #TensorFlow Serving Version storageUri: "s3://bucket-fex/0/719f2437c2a147d89ab6268cf7379cda/artifacts/saved_model/tfmodel/" # subfolder must contain numbers only for tf serving logger: mode: all url: http://broker-ingress.knative-eventing.svc.cluster.local/kfserving-test/product-recommender # !kubectl apply -f ./tf-deployment-recommender.yaml # # Deploy Anomaly Detection (Autoencoder) # %%writefile ./outlier_detection/outlier-detection.yaml apiVersion: serving.kubeflow.org/v1beta1 kind: InferenceService metadata: namespace: kfserving-test name: autoencoder-recommender spec: transformer: containers: - image: registry.gitlab.com/felix.exel/container_registry/kfserving/outlier-detection name: user-container imagePullPolicy: Always imagePullSecrets: - name: gitlab predictor: serviceAccountName: "sa" # service account for aws credentials minReplicas: 1 # if 0: replica will scale down to 0 when there are no requests tensorflow: runtimeVersion: "2.4.0" #TensorFlow Serving Version storageUri: "s3://bucket-fex/autoencoder_recommender/d052e637a7314c14a092585baf512672/" # subfolder must contain numbers only for tf serving # !kubectl apply -f ./outlier_detection/outlier-detection.yaml # ### Trigger Anomaly Detection (Autoencoder) # %%writefile ./outlier_detection/trigger.yaml apiVersion: eventing.knative.dev/v1 kind: Trigger metadata: name: outlier-trigger namespace: kfserving-test spec: broker: product-recommender filter: attributes: type: org.kubeflow.serving.inference.request subscriber: uri: http://autoencoder-recommender-transformer-default.kfserving-test/v1/models/autoencoder-recommender:predict # !kubectl apply -f ./outlier_detection/trigger.yaml # <img src="./architektur.png" width="650"> # # Grafana # %%writefile ./istio/loadbalancer.yaml apiVersion: v1 kind: Service metadata: name: grafana-load-balancer namespace: knative-monitoring spec: type: LoadBalancer selector: app: grafana ports: - protocol: TCP port: 3000 targetPort: 3000 # !kubectl apply -f ./istio/loadbalancer.yaml # + cluster_type = 'aws' # 'azure', 'aws', 'local' if cluster_type == 'azure': # azure aks # INGRESS_HOST_LIST = !kubectl -n istio-system get service istio-ingressgateway -o jsonpath={.status.loadBalancer.ingress[0].ip} INGRESS_HOST = INGRESS_HOST_LIST[0] INGRESS_PORT = 80 # GRAFANA_HOST_LIST = !kubectl -n knative-monitoring get service grafana-load-balancer -o jsonpath={.status.loadBalancer.ingress[0].ip} GRAFANA_HOST = GRAFANA_HOST_LIST[0] GRAFANA_PORT = 3000 elif cluster_type == 'aws': # aws eks # INGRESS_HOST_LIST = !kubectl -n istio-system get service istio-ingressgateway -o jsonpath={.status.loadBalancer.ingress[0].hostname} INGRESS_HOST = INGRESS_HOST_LIST[0] INGRESS_PORT = 80 # GRAFANA_HOST_LIST = !kubectl -n knative-monitoring get service grafana-load-balancer -o jsonpath={.status.loadBalancer.ingress[0].hostname} GRAFANA_HOST = GRAFANA_HOST_LIST[0] GRAFANA_PORT = 3000 elif cluster_type == 'local': # e.g. minikube or microk8s # INGRESS_HOST_LIST = !kubectl get po -l istio=ingressgateway -n istio-system -o jsonpath={.items[0].status.hostIP} INGRESS_HOST = INGRESS_HOST_LIST[0] #eg. '192.168.52.86' # INGRESS_PORT_LIST = !kubectl get svc -l istio=ingressgateway -n istio-system -o jsonpath={.items[0].spec.ports[1].nodePort} INGRESS_PORT = int(INGRESS_PORT_LIST[0]) GRAFANA_HOST = INGRESS_HOST # GRAFANA_PORT_LIST = !kubectl -n knative-monitoring get service grafana-load-balancer -o jsonpath={.spec.ports[0].nodePort} GRAFANA_PORT = GRAFANA_PORT_LIST[0] print(f"http://{GRAFANA_HOST}:{GRAFANA_PORT}/d/drTDt1LGz/model-performance?orgId=1&refresh=10s&from=now-5m&to=now") # - # # Test the ML-Service # ### Load Test Data # + import pandas as pd import numpy as np import time import json import requests import urllib3 from IPython.core.interactiveshell import InteractiveShell urllib3.disable_warnings() InteractiveShell.ast_node_interactivity = "all" np.set_printoptions(precision=5) sessions_padded = np.load('list_sessions_padded.npy') print(sessions_padded.shape) last_clicked = np.load('list_last_clicked.npy') print(last_clicked.shape) id_mapping = pd.read_csv('ID_Mapping.csv') # + def request_kf_serving_http(np_array, ground_truth, MODEL_NAME, NAMESPACE, INGRESS_HOST, INGRESS_PORT): data = json.dumps({"instances": np_array.tolist(), 'id': ground_truth.tolist()}) headers = {"content-type": "application/json", 'Host': f'{MODEL_NAME}.{NAMESPACE}.example.com'} json_response = requests.post( f'http://{INGRESS_HOST}:{INGRESS_PORT}/v1/models/{MODEL_NAME}:predict', data=data, headers=headers) try: predictions = json.loads(json_response.text)['predictions'] except Exception as e: print(json_response.text) raise e return np.array(predictions).astype(np.float32) NAMESPACE = 'kfserving-test' MODEL_NAME = 'product-recommender' # - # ## HTTP Request # + idx = 15 # 15, 169, 14 anomaly: 169 start = time.time() pred = request_kf_serving_http(sessions_padded[idx:idx+1], last_clicked[idx:idx+1], MODEL_NAME, NAMESPACE, INGRESS_HOST, INGRESS_PORT) end = time.time() print(f'Time required in Seconds: {end - start}') # top 5 predictions top = pred.argsort()[0][::-1][:5] print("Session:") session = pd.DataFrame() session['category_code'] = [id_mapping['category_code'][int(i)-1] for i in sessions_padded[idx,:,0] if i>0] session['Item_ID'] = [id_mapping['Item_ID'][int(i)-1] for i in sessions_padded[idx,:,0] if i>0] session['Item_ID_Mapped'] = [int(i) for i in sessions_padded[idx,:,0] if i>0] session print("Prediction:") prediction = pd.DataFrame() prediction['category_code'] = [id_mapping['category_code'][int(i)-1] for i in top if i>0] prediction['Item_ID'] = [id_mapping['Item_ID'][int(i)-1] for i in top if i>0] prediction['Item_ID_Mapped'] = [int(i) for i in top if i>0] prediction['probability'] = pred[0, top] prediction print("Ground Truth:", last_clicked[idx]) # - # ## Grafana Dashboard print(f"http://{GRAFANA_HOST}:{GRAFANA_PORT}/d/drTDt1LGz/model-performance?orgId=1&refresh=10s&from=now-5m&to=now")
install_and_deploy_m3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Jupyter Widgets (Optional) # + # In this brief lecture I want to introduce you to one of the more advanced features of the # Jupyter notebook development environment called widgets. Sometimes you want # to interact with a function you have created and call it multiple times with different # parameters. For instance, if we wanted to draw a red box around a portion of an # image to try and fine tune the crop location. Widgets are one way to do this quickly # in the browser without having to learn how to write a large desktop application. # # Lets check it out. First we want to import the Image and ImageDraw classes from the # PILLOW package from PIL import Image, ImageDraw # Then we want to import the interact class from the widgets package from ipywidgets import interact # We will use interact to annotate a function. Lets bring in an image that we know we # are interested in, like the storefront image from a previous lecture image=Image.open('readonly/storefront.jpg') # Ok, our setup is done. Now we're going to use the interact decorator to indicate # that we want to wrap the python function. We do this using the @ sign. This will # take a set of parameters which are identical to the function to be called. Then Jupyter # will draw some sliders on the screen to let us manipulate these values. Decorators, # which is what the @ sign is describing, are standard python statements and just a # short hand for functions which wrap other functions. They are a bit advanced though, so # we haven't talked about them in this course, and you might just have to have some faith @interact(left=100, top=100, right=200, bottom=200) # Now we just write the function we had before def draw_border(left, top, right, bottom): img=image.copy() drawing_object=ImageDraw.Draw(img) drawing_object.rectangle((left,top,right,bottom), fill = None, outline ='red') display(img) # + # Jupyter widgets is certainly advanced territory, but if you would like # to explore more you can read about what is available here: # https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html
course_5/module_2-lec6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # + from sklearn import datasets X, y = datasets.make_moons(n_samples=500, noise=0.3, random_state=42) # - plt.scatter(X[y==0, 0], X[y==0, 1]) plt.scatter(X[y==1, 0], X[y==1, 1]) plt.show() # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # - # ### AdaBoosting # # + from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier ada_clf = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth=2), n_estimators=500, random_state=666) ada_clf.fit(X_train, y_train) # - ada_clf.score(X_test, y_test) # ### Gradient Boosting # # > 1. 训练一个模型m1, 产生错误e1 # > 2. 针对e1训练第二个模型m2, 产生错误e2 # > 3. 以此类推,最终结果是 m1 + m2 + ... # + from sklearn.ensemble import GradientBoostingClassifier gb_clf = GradientBoostingClassifier(max_depth=2, n_estimators=30, random_state=666) gb_clf.fit(X_train, y_train) # - gb_clf.score(X_test, y_test)
ml/ensemble/adaboosting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Step 1: Load the Data/Filtering for Chosen Zipcodes # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import warnings warnings.filterwarnings("ignore") # - #load the data df = pd.read_csv('zillow_data.csv') df.head() df.shape # ## Step 1(i) : Filter specific Zip codes according to their Sizerank # # #### "SizeRank" is a value given to every zipcode based on the Urbanization. "More Urbanization = Higher SizeRank" # + #filter zipcodes based of sizeRank and keep the top 15%. #Find the Cutoff value for Sizerank df['SizeRank'].quantile(0.15) # + #apply the cutoff value to filter zipcodes and keep 2209 only Filtered_df = df[df['SizeRank']<2209] Filtered_df.head() # - Filtered_df.shape # + #drop extra FEATURES that we think are not helpful filtered_zipcopes = Filtered_df.drop(['RegionID','City','State','Metro','CountyName','SizeRank'],axis =1) filtered_zipcopes.head() # + #add a column which shows MEDIAN house price for last two years in that specific zipcode filtered_zipcopes['Average_Price_Last_2_Years ']= filtered_zipcopes.iloc[:,-24:].median(skipna=True, axis=1) filtered_zipcopes.head() # + # check the Overall median house price for our zipcodes filtered_zipcopes['Average_Price_Last_2_Years '].median() # - # ### " We Prefer zipcodes where average house Price for last 2 years fall between 40th percentile and 60th percentile. We select values that are closer to our mean and fall inside a Normal distribution. This helps avoid outliers and events that are less common" # + #Calculate 60th percentile q_60 = filtered_zipcopes['Average_Price_Last_2_Years '].quantile(0.60) q_60 # + #Calculate 40th percentile q_40 = filtered_zipcopes['Average_Price_Last_2_Years '].quantile(0.40) q_40 # + #apply the cutoff Percentile values to our dataset Prefferd_zip = filtered_zipcopes[(filtered_zipcopes['Average_Price_Last_2_Years ']<q_60) & (filtered_zipcopes['Average_Price_Last_2_Years ']>q_40)] # - Prefferd_zip.shape # #### " We would futhur filter our data based on the 'Risk Management" & 'Return on Investment (Roi)" for every zip code" # ## Note: # ### ' I have Filtered my dataset to Past 9 years to be more Realistic about our Predictions Because of the Housing Market Crash in 2008'. # #### For risk management, we would count COEFFICIENT of Variance based on returns Since 2009 for each zip code # + #Add a column to Show the (%) historical returns since 2009 for our Zipcodes Prefferd_zip['ROI'] = (Prefferd_zip['2018-04']/Prefferd_zip['2009-04'])-1 # - Prefferd_zip.head() Prefferd_zip['Std']=Prefferd_zip.loc[:,'2009-04':'2018-04'].std(skipna=True, axis=1) # + #Add a column for Mean montly return values Prefferd_zip['Mean']=Prefferd_zip.loc[:,'2009-04':'2018-04'].mean(skipna=True, axis=1) Prefferd_zip.head() # + #Finally we can Calculate the coefficient of variance (C.V) Prefferd_zip['C.V']=Prefferd_zip['Std']/Prefferd_zip['Mean'] # + #Calculated values Prefferd_zip[['RegionName','Mean','Std','ROI','C.V']].head() # - # ### For risk management we should pick zipcodes where 'C.V' is less than 60th percentile. Picking a C.V value close to Mean increases the probability of more steady returns on our investment and less Fluctuation. # + #Calculate 60th percentile value for C.V Prefferd_zip["C.V"].quantile(0.60) # + #filter zipcodes based on (C.V < 0.1602) Best_zipcodes = Prefferd_zip[Prefferd_zip['C.V']<0.16024] # - Best_zipcodes.head() # ## Filter the TOP 5 Zipcodes Based on highest ROI TOP_5_Zipcodes = Best_zipcodes.sort_values('ROI',ascending=False)[:5] # ## Top 5 ZIPCODES TOP_5_Zipcodes[['RegionName','ROI','C.V']] # + #we have our dataframe with top 5 Zipcodes TOP_5_Zipcodes.head() # - # ## STEP 2 : Data Preprocessing # + #Drop extra columns and convert data to time series format TS_format = TOP_5_Zipcodes.drop(['Average_Price_Last_2_Years ', 'ROI', 'Std', 'Mean', 'C.V'],axis =1) TS_format.head() # + #Filter data from 2009 to 2018 TS_data_after_2009 = TS_format.loc[:,'2009-01':'2018-04'] TS_data_after_2009.dropna().head() # + #add zipcodes column back to the dataset TS_data_after_2009['Zipcode'] = TS_format['RegionName'] # - TS_data_after_2009.head() # ## Step 3: Reshape from Wide to Long Format df_melted = pd.melt(TS_data_after_2009, id_vars=['Zipcode'],var_name='Date',value_name = 'Price') df_melted.head() # + #convert date to datetime format df_melted['Date'] = pd.to_datetime(df_melted['Date']) # + #set date as index df_melted.set_index('Date',inplace=True) # + #i haven't been able to convert the (Time Freq) from None to MS #df_melted = df_melted.asfreq('MS') # - df_melted.head() # ## Step 4 : EDA and data Visualizations # ### Visualize the historical returns on our zipcodes df_melted.groupby('Zipcode')['Price'].plot(legend= True,figsize=(12,8)) # ## Step 5: ARIMA Modeling import warnings warnings.filterwarnings("ignore") from statsmodels.tsa.arima_model import ARMA, ARIMA,ARMAResults,ARIMAResults from statsmodels.graphics.tsaplots import plot_acf,plot_pacf from pmdarima import auto_arima # + from statsmodels.tsa.stattools import adfuller def adf_test(series,title=''): """ Pass in a time series and an optional title, returns an ADF report """ print(f'Augmented Dickey-Fuller Test: {title}') result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data labels = ['ADF test statistic','p-value','# lags used','# observations'] out = pd.Series(result[0:4],index=labels) for key,val in result[4].items(): out[f'critical value ({key})']=val print(out.to_string()) # .to_string() removes the line "dtype: float64" if result[1] <= 0.05: print("Strong evidence against the null hypothesis") print("Reject the null hypothesis") print("Data has no unit root and is stationary") else: print("Weak evidence against the null hypothesis") print("Fail to reject the null hypothesis") print("Data has a unit root and is non-stationary") # + #run the Dickey_Fuller test to check if data is stationary adf_test(df_melted['Price'],'Price') # + #Data is non stationary we will use Grid search(auto-arima) to find what kind of model we should use auto_arima(df_melted['Price'],seasonal=True).summary() # - len(df_melted) # ### Split data into train & test sets # + # Set one year for testing train = df_melted.iloc[:520] test = df_melted.iloc[520:] # - # ### Fit our Model: SARIMAX from statsmodels.tsa.statespace.sarimax import SARIMAX model = SARIMAX(train['Price'],order=(2,1,3)) results = model.fit() results.summary() # + # Obtain predicted values start=len(train) end=len(train)+len(test)-1 predictions = results.predict(start=start, end=end,) #dynamic=False).rename('SARIMA(2,1,3) Predictions') predictions.head() # - # + #Plot predictions for our test data predictions.plot() # + # Plot Actual Values from our Test dataset test['Price'].plot() # - df_melted.tail() # + from sklearn.metrics import mean_absolute_error absolute_error = mean_absolute_error(test['Price'],predictions) print('Absolute Error',absolute_error) from statsmodels.tools.eval_measures import rmse error = rmse(test['Price'], predictions) print(f'SARIMA(2,1,3) RMSE Error: {error:}') # - # ### OUTCOME : Our model is predicting at an average of (15k Dollars) above or below the actual price. # ## Retrain the model on entire data to forecast 2 years into future model = SARIMAX(df_melted['Price'],order=(2,1,3)) results = model.fit() forecast = results.predict(len(df_melted),len(df_melted)+24,typ='levels') # + # Plot predictions against known values title = 'Price Predictions for next 2 years' ##xlabel='Year' #ax = df_melted['Price'].plot(legend=True,figsize=(12,6))#,title=title) forecast.plot(title=title,figsize=(6,6)) plt.xlabel('Month') plt.ylabel('Price') #ax.autoscale(axis='x',tight=True) #ax.set(xlabel=xlabel, ylabel=ylabel); # + # I could not run the code above to plot my predictions against Real values, #because my Time freq is still None, i could't convert it to Monthly format. # - forecast.head(12)
MOD_4_PROJECT_NOTEBOOK_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GLOBAL GENDER GAP RANKING - 2006-2016 # The analysis below is being prepared by the dataset provided by 1 Million Women to Tech at https://github.com/1mwtt-wod1/gender-gap-datathon. # # The dataset is compiled and/or recorded by World Economic Forum (WEF) to measure Global Gender Gap in countries (http://www3.weforum.org/docs/WEF_GenderGap_Report_2006.pdf). The dataset has four subindexes; Global Gender Gap Economic Participation and Opportunity, Global Gender Gap Educational Attainment, Global Gender Gap Health and Survival and Global Gender Gap Political Empowerment in addition to Overall Global Gender Gap index. In 2016, a new subindex measuring Wage equality between women and men for similar work is also added to the dataset. # # In 2016, a new methodology was added in the second Global Gender Gap Report to capture the size of the gap more efficiently. Here is a short excerpt from that report that we would like to include in this report: # # <i>"One particular societal and economic challenge is the persistent gap between women and men in their access to resources and opportunities.This gap not only undermines the quality of life of one half of the world’s population but also poses a significant risk to the long-term growth and well-being of nations: countries that do not capitalize on the full potential of one half of their human resources may compromise their competitive potential.(http://www3.weforum.org/docs/WEF_GenderGap_Report_2006.pdf)</i> # # There are many positive steps taken by many entities around the world. In 2010, United Nations General Assembly founded UN Women to address issues that women are facing. In 2014, UN Women launched He for She, a global campaing to include men in the gender equality dicsussions, as well. For UN Women, please go to http://www.unwomen.org/en/about-us/about-un-women and for the He for She website please go to https://www.heforshe.org/en. # # In this report, we will focus on improvements and positive changes in the ranking of countries between the years 2006 and 2016. After providing some information about the ranking in the world, we will look at countries who improved their rank significantly. We will define improving rank concept in three different ways: The first one compares overall gender gap ranks between 2006 and 2016 only. After creating a dataframe, we will look at the top three countries who improved their ranking the most. # # Looking at the difference in ranks between 2006 and 2016 is very useful to determine the countries who improved their ranking. However, a quick examination of the dataset shows that ranks of many countries fluctuated a lot during the years 2006 to 2016. A countries rank might be improved significantly but then rank might get worse during recent years. So, to find the factors influencing the ranking, we will look at the maximum positive difference occurred between successsive years during 2006-2016. We will create a new column by comparing ranks of successive years and then recording the max positive difference between successive years in a new column. In addition, the years when the max difference occured are also coded in another newly created column. After sorting the dataset according to the max difference column, we will look at Top 3 countries who impoved their overall rank the most according to this criteria. # # Lastly, we will look at how many times a country improved their rank or preserved their rank between 2006-2016. This might give us information about countries' effort in improving or preserving their ranks and whether these changes were temporary or have long-lasting effects. # # Among the research problems explored are # # <ol> # <li> Are there any differences in the general trend in the overall ranking between recent years and the years when WEF started measuring and publishing their findings?</li> # <li> What are the similarities and differences in the Top 20 countries in 2016? </li> # <li> When we compare overall ranks of countries in 2006 and 2016, which countries improved their rank the most? What are the factors influencing these improvements in the Top 3 countries when the dataset is sorted according to this criteria?</li> # <li> When we compare overall ranks of countries in successive years, which countries improved their rank the most and when does that improvement happened? What are the factors influencing these improvements in the Top 3 countries when the dataset is sorted according to this criteria?</li> # <li> Which countries improved their overall rank or preserved their rank the most between 2006-2016? What are the factors influencing these improvements in the Top 3 countries when the dataset is sorted according to this criteria?</li> # </ol> # # # + import pandas as pd import matplotlib.pyplot as plt import numpy as np import plotly import plotly.graph_objs as go from plotly.offline import init_notebook_mode, plot, iplot from plotly import tools init_notebook_mode(connected=True) #to start offline plotly gender=pd.read_csv('globalgendergap2016.csv') gender1=gender[gender["Subindicator Type"]=="Rank"] ranking=gender1[gender1["Indicator"]=="Overall Global Gender Gap Index"] ranking.head() # - # From now on, we will be working with this dataset including overall rankings only. This report is organized as follows: In Section 1, we will be looking at some general ... about the ranking of all countries. In Section 2, we will try to answer some specific questions. For this, we will create new datasets with some new comparison tools. After that, we will focus on countries that are on top of those lists and try to find out more about their ranking and factors influencing their ranking. We will try to support our findings with external .... In Section 3, some conclusions will be stated in addition to the problems and issues that we encountered in this analysis. Future directions and interesting related topics will be explored, as well. # # # Section 1: General Information # # ## Comparison of data related to different years # # In the plots below, we will compare the ranking of countries in 2016(the last year in the dataset) with the ranking in 2006 to explore the differences between two datasets. We believe that this would provide a general overview of the ranking of countries. # + colors=['red','darkred','green','darkgreen'] years=['2006','2007','2014','2015'] f=tools.make_subplots(rows=2,cols=2, subplot_titles=('2006 vs 2016','2007 vs 2016','2014 vs 2016','2015 vs 2016')) for i in range(0,4): if i<2: scatter=go.Scatter(dict(x=ranking[years[i]], y=ranking['2016'], marker=dict(color=colors[i]), text=ranking['Country Name'], name=years[i]+' vs 2016', mode='markers')) f.append_trace(scatter,1,i+1) if 2<=i<4: scatter=go.Scatter(dict(x=ranking[years[i]], y=ranking['2016'], marker=dict(color=colors[i]), text=ranking['Country Name'], name=years[i]+' vs 2016', mode='markers')) f.append_trace(scatter,2,i-1) f['layout'].update(dict(title='Overall Global Gender Gap Rank'),showlegend=False) iplot(f) # - # As it can be seen from these scatterplots, the plots 2006 vs 2016 and 2007 vs 2016 looks very different than 2014 vs 2016 and 2015 vs 2016. The last plot (2015 vs 2016) shows that ranking of countries is becoming more stable, i.e., new ranks in 2016 are close to that of 2015. The reason of the data becoming more stable might be due to the fact that countries are getting used to their rating. It might be the case that when WEF started measuring the gap for the first time in 2005, the ranking created awareness and many countries took concrete steps in decreasing gap parity between women and men. That may explain the difference in plots between the early years (2006 and 2007) and later years (2015 and 2016). # ## Average Overall Global Gender Rank across years 2006-2016 # # Since a lot of changes and fluctuations are observed in the dataset, we decided to look at the average rank to capture any interesting patterns in the data. The starting point is getting a general idea of the global ranking of countries. For this purpose, we first create a new column, Average Rank, and then look at the world map. # + overall=ranking.copy() years=[] for i in range(2006,2017): years.append(str(i)) overall['Average Rank']=overall[years].mean(numeric_only=True,axis=1).astype(int) overall.sort_values(by='Average Rank', ascending=True).head(10) # - # The dataset shows that when we sort the data according to the average overall rank, the first seven countries are the first seven countries in 2015 and 2016. Denmark's overall average rank is eight even though their overall rank gradually decreased in 2015 and 2016. # + def world_rank_map(rank): color_scale=[[0.40,'rgb(75, 75, 0)'],[0.30,'rgb(75, 38, 255)'],[0.60,'rgb(0,0,0)'],[0.90,'rgb(255, 238, 255)']] data=[dict(type='choropleth', text=overall['Country Name'], locations=overall['Country ISO3'], z=overall[rank].astype(float), marker=dict(line=dict(color='rgb(180,180,180)',width=0.5)), autocolorscale=False, reversescale=True, colorbar={'title':'Rank'})] #Now, let's plot the values: fig={'data': data,'layout':{'title':'Overall Global Gender Gap Index - '+ rank, 'geo':{'scope':'world', 'projection':{'type': 'equirectangular'}}, 'showlegend':True}} return iplot(fig) world_rank_map('Average Rank') # - # <b>Average Ranking and Continents:</b> # # <ul> # <li> Most African countries have ranks greater than 100 except few counties in Southern parts of Africa. There are only two countries with rank less than 20 in Africa: Rwanda (the only country in Top 10 with rank=6) and South Africa(rank=15). Note that there are many countries with no records in the Central Africa. </li> # <li> According to Quartz Africa, one third of all world's population would be living in Africa by 2100. (https://qz.com/africa/1099546/population-growth-africans-will-be-a-third-of-all-people-on-earth-by-2100/). This is another reason why it is imperative to improve the gender gap in the African countries with high ranks. </li> # # <li> Only Philippines rank is in Top 10 in Asia.</li> # # <li> The map shows that most Western European countries have high ranking while most Eastern European counties have average ranks between 50-100. </li> # # <li> Both United States and Canada's overall ranks increased in 2015 and then again in 2016 but their average ranks are still less than 30. Even though Mexico's average overall rank is 82, they decreased their overall global gender rank in both 2015 and 2016. </li> # # <li> Average rank of all countries in South America are less than 100. Argentina has the highest rank (31) while Suriname has the lowest average rank (93). </li> # </ul> # # Next, we look at the Top 20 countries in year 2016. # # ## Top 20 in the year 2016 # # Next, let's explore Top 20 ranking in the year 2016. # + overall_ranked=overall.sort_values(by="2016", ascending=True) overall_ranked_top20=overall_ranked[:20] overall_ranked_top20=overall_ranked_top20.reset_index() overall_ranked_top20.head(5) # + bar2015=go.Bar(x=overall_ranked_top20['Country Name'], y=overall_ranked_top20['Average Rank'], marker=dict(color='darkblue'), opacity=0.5, name='Average Rank') bar2016=go.Bar(x=overall_ranked_top20['Country Name'], y=overall_ranked_top20['2016'], marker=dict(color='darkred'), name='2016') bars=[bar2015,bar2016] fig={'data': bars,'layout':{'title':'Top 20 Ranked Countries in 2016', 'xaxis':{'title':'Country'}, 'yaxis':{'title':'Rank'}}} iplot(fig) # - # <b> What are the similarities and differences in the Top 20 countries in 2016?</b> # # This dataframe and following barplot shows the ranking sorted in an ascending order according to their 2016 ranking. In addition, the barplot includes average rank of countries with ranks in Top 20. Note that the newly created column Average Rank provides average rank of each country over the years 2006 and 2016. A high average rank means that that country's rank was very high at some time in the past and improved significantly later so that the country was in Top 20 list in 2016. # # In 2016, Iceland has rank 1 which is followed by other Nordic countries Finland, Norway and Sweden. Since 2006, these four countries always had ranks between 1 and 4. In 2006 and 2007, Sweden's rank was number one but later increased to four while Iceland's ranking was initially four and decreased to one and stays the same from 2009 to 2016. Rwanda's records were available from 2014 onwards and gradually decreased to rank five. # # When we look at the countries in Top 20 in 2016, we see that there are thirteen countries in Europe, four in Africa, one in Asia, one in Ocenia and one in North America. # # It is worth noting that while rank of Nicaragua, the only North American country in Top 20, was 62 in 2006, its rank decreased to 10 in 2016 (it was even 6 in 2014 and its average rank is 34). # # Another notable example is Slovenia (rank=8 in 2016 and average rank=36) who improved their rank significantly. According to UN Women, Slovenia committed to improve the gender gap in their country and took several positive steps including a call for support from men and boys to support policies and activities regarding gender gap issues (http://www.unwomen.org/en/get-involved/step-it-up/commitments/slovenia) # # In addition to Nicaragua and Slovenia, Namibia(rank=14 in 2016 and average rank=31) and France (rank=17 in 2016 and average rank=36) also took many positive steps to increase their ranking. According to a report published by the European Parliement http://www.europarl.europa.eu/RegData/etudes/IDAN/2015/510024/IPOL_IDA(2015)510024_EN.pdf ...... # # # Section 2: Which countries improved their ranking the most? What are the factors influencing these positive changes? # # There are several ways to find out which countries improved their ranking the most. Below, each one is explained in detail. # # After creating a new column according to each critera, we will look at Top 3 countries in detail by using their subindexes measuring economic participation, educational attainments, health and survival and political empowerment. To get more information about the factors influencing these positive changes in these countries, we will provide two different types of plots. # # The following two functions are defined to plot data related to subindexes. The first function helps us to see the change in each subindex by using a line graph. Note that the plots still uses ranks rather than indexes because it is more clear to show how the overall ranking depend on subindexes. However, this does not capture all information about the subindexes. Some steep increases or decreases might be due to other factors, such as several countries sharing the same rank. Therefore, a boxplot will be provided to get a better picture of the ranking and to support the information presented in the line graph. # + def country_scatterplot(country): index_list=['Economic Participation', 'Education', 'Health', 'Political','Overall', 'Wage Equality'] gender_country=gender[(gender['Country Name']==country) & (gender['Subindicator Type']=='Rank')] gender_country.index=index_list scatters_list=[] colors=['red','blue','green','brown','purple','orange'] for i in range(0,len(index_list)): scat=go.Scatter(x=years, y=gender_country.loc[index_list[i],years], marker=dict(color=colors[i]), name=index_list[i], mode='lines') scatters_list.append(scat) fig={'data': scatters_list,'layout':{'title':country, 'xaxis':{'title':'Years'}, 'yaxis':{'title':'Rank'}}} return iplot(fig) def country_boxplot(country): index_list=['Economic Participation', 'Education', 'Health', 'Political','Overall','Wage Equality'] gender_country=gender[(gender['Country Name']==country) & (gender['Subindicator Type']=='Rank')] gender_country.index=index_list box_list=[] colors=['red','blue','green','black','purple','orange'] for i in range(0,len(index_list)): boxp=go.Box(y=gender_country.loc[index_list[i],years], boxpoints='all', marker=dict(color=colors[i]), name=index_list[i]) box_list.append(boxp) fig={'data': box_list,'layout':{'title':country, #'xaxis':{'title':'Subindexes'}, 'yaxis':{'title':'Rank'}}} return iplot(fig) # - # ### 1.Comparison of overall ranks in 2006 and 2016 # # One way is finding a rough estimate by using data related to the years 2006 and 2006. For this, two columns are created by using two newly defined functions: One columns gives information whether a country improved their ranking in 2016. If 2006 rank is greater than 2016 rank, the change is positive, otherwise it is negative. Unfortunately, there is no report for some countries in 2006, or 2016 or both. These are recorded as missing 2006, missing 2016 and missing both. # + def changed(row): if pd.isna(row["2006"])==True & pd.notna(row["2016"])==True: return "missing 2006" elif pd.notna(row["2006"])==True & pd.isna(row["2016"])==True: return "missing 2016" elif row["2006"]>row["2016"]: return "positive" elif row["2006"]<row["2016"]: return "negative" def change_val(row): if pd.isna(row["2006"])==True & pd.notna(row["2016"])==True: return np.nan elif pd.notna(row["2006"])==True & pd.isna(row["2016"])==True: return np.nan else: return row["2006"]-row["2016"] def change_val2(row): if pd.isna(row["2015"])==True & pd.notna(row["2016"])==True: return np.nan elif pd.notna(row["2015"])==True & pd.isna(row["2016"])==True: return np.nan else: return row["2015"]-row["2016"] overall["Change 2006-2016"]=overall.apply(lambda row: changed(row),axis=1) overall["Numerical Change 2006-2016"]=overall.apply(lambda row: change_val(row),axis=1) overall=overall.sort_values(by="Numerical Change 2006-2016",ascending=False) overall_count=overall.groupby('Change 2006-2016').count() overall_mean=overall.groupby('Change 2006-2016').mean() overall.head() # - world_rank_map('Numerical Change 2006-2016') # The world map provides a more clear picture about the overall change in the world. For example, when we look at the difference between years 2006 and 2016, we see that Bolivia is the country that improved their ranking the most followed by France and Nicaragua. The world map also shows that many countries increased their ranks (brownish colors). [We defined change positive if their 2016 rank is less than their 2006 rank.] # + overall_stat=overall_mean.copy() overall_stat=overall_stat[['2016','Average Rank','Numerical Change 2006-2016']] overall_stat=overall_stat.rename(columns={'2016':'mean 2016','Average Rank':'mean of Average Rank', 'Numerical Change 2006-2016':'mean change'}) overall_stat['count']=overall_count['Country Name'] overall_stat # - # Statistics table above confirms our observation: There are 83 countries whose ranks got worse while there are 30 countries whose ranks got better. The mean change in rank for the countries who changed their rank positively is 17.4 while the mean change in rank of the countries whose rank changed negatively is -22.42. # # Next, we look at the Top 3 countries who decreased their ranking the most in detail and try to find the factors contributing to the positive change in their ranking. # ### Bolivia country_scatterplot('Bolivia') country_boxplot('Bolivia') # For Bolivia, we see that the overall ranking (purple color) change more like the changes in their political empowerment subindex(brown). This is more clear between the years 2011 and 2012. Note that only political empowerment ranking improved while all other subindexes were either increased or stayed the same. Accordingly, overall rank got better. As for other years, this trends continues. The other subindex affecting the overall rank is economic participation. # ### France country_scatterplot('France') country_boxplot('France') # The second country who improved their rank the most is France. Line plot clearly shows that their health and education parity is excellent (rank=1) over all years. The line plot clearly shows that overall ranking closely follows political empowerment subindex. For more information about France's progress, please look at the report at http://www.europarl.europa.eu/RegData/etudes/IDAN/2015/510024/IPOL_IDA(2015)510024_EN.pdf. # ### Nicaragua country_scatterplot('Nicaragua') country_boxplot('Nicaragua') # In Nicaragua, the line plot is very complicated than Bolivia and France. It seems like the improvement in their overall ranking is a result of improvements in health and political empowerment. Educational attainment ranking fluctuates a lot but it may also have some positive effect on the overall ranking. Nicaragua's overall ranking decreased and stayed around 10 even though economic participation ranking increased between the years 2011 and 2015. It seems like the increase in their rank from 2014 to 2015 was due to the increase in the economic participation subindex. # ## 2. Maximum positive difference in the ranking # So far, we focused on changes between the years 2006 and 2016 and looked at the factors leading up to that change in top 3 counties in that list. However, this gives us a rough information about the ranking. As we can see from the dataset, there might be some fluctations over the years, i.e., a country's ranking might change significantly from year to year. A possible reason contributing to this change could be the change in ranking of other countries. It might also be due to a significant event or a policy change in that country. To explore sudden changes in ranks over the years, we will create a new column, max difference, which gives us the max positive change observed during the years 2006 and 2016. In addition, we create a new column showing the years to find out whether the change occurred is more recent or not. # + df_max_diff=ranking.copy() lst=[] lst2=[] for index,row in df_max_diff.iterrows(): max_difference=0 year='2006' for i in range(4,14): if pd.notna(row[i])==True and pd.notna(row[i+1])==True: if row[i]>row[i+1]: x=row[i]-row[i+1] if max_difference<x: max_difference=x year=(df_max_diff.columns[i]+'-'+df_max_diff.columns[i+1]) else: continue else: max_difference=max_difference lst.append(max_difference) lst2.append(year) df_max_diff["max difference"]=lst df_max_diff["max difference-years"]=lst2 df_max_diff.sort_values(by='max difference', ascending=False).head() # - # ### Kenya country_scatterplot('Kenya') country_boxplot('Kenya') # When the overall ranks are sorted according to the newly created maximum difference column, we see that Kenya and Estonia decreased their ranks by 41 points (Kenya in 2013-2014 and Estonia in 2014-2015). Over the years since 2006, we see that Kenya's overall rank got worse until 2011 (when its rank=99). After 2011, their overall rank got better mainly due to the decrease in economic participation rank. After 2011, political empowerment and economic participation rank graphs follow a similar trend. Accordingly, graph for their overall rank also follows a similar pattern. After 2014, their rank started to increase again and it was 63 in 2016. # ### Estonia country_scatterplot('Estonia') country_boxplot('Estonia') # Estonia's overall rank decreased from 62 to 21 in 2015, which was the maximum drop in any given successive years. This improvement might be due to their commitment to achieving gender equality by promoting women's rights, taking measures to reduce and prevent violence agaist women and increasing their efforts in closing the gender pay gap (http://www.unwomen.org/en/get-involved/step-it-up/commitments/estonia). The fact that their rank was still around 21 in 2016 might be an indication that they are working on addressing these issues but more data is needed to support this guess. # ### Ghana country_scatterplot('Ghana') country_boxplot('Ghana') # In Ghana, the line plots shows that the overall rank is affected by the economic participation and health and survival subindexes more than the other subindexes. In 2014, all of its subindexes got worse, economic participation subindex increased to 64 from 24 and overall rank increased to 101 from 76. However, after 2014, their index gradually improved to 38 in 2016. Their rank in 2016 was better than all other ranks in years 2006-2015. # ## 3. Which countries improved their ranking or preserved it multiple times between years 2006 and 2016? # + df_pos_count=ranking.copy() dict_ch2={} for index,row in df_pos_count.iterrows(): count=0 for i in range(2006,2016): if pd.notna(row[str(i)])==True and pd.notna(row[str(i+1)])==True: if row[str(i)]>=row[str(i+1)]: count+=1 else: count=count dict_ch2[index]=count df_pos_count["count_positive"]=dict_ch2.values() df_pos_count_sorted=df_pos_count.sort_values(by='count_positive',ascending=False) df_pos_count_sorted.head() # - # ### Iceland country_scatterplot('Iceland') country_boxplot('Iceland') # For the last 8 years, Iceland has the highest global overall rank. From the line graph above, we see that once they improved their educational attainment rank, they were able to preserve their rank as number one. # # According to President Johannesson's statement, gender pay gap in Iceland is around 5.7-18.3% (https://www.heforshe.org/en/impact). This shows that even in the number one country, the gender gap is around 10% on average. For more details, please look at https://www.weforum.org/agenda/2017/11/why-iceland-ranks-first-gender-equality/. # ### Philippines country_scatterplot('Philippines') country_boxplot('Philippines') # Philippines was always in Top 10 since 2006 in the overall ranking. Between 2006 and 2016, their overall rank got worse only two times in 2009 and in 2014. According to the line graph above, we see that Philippines overall rank graph is very similar to their political empowerment rank graph. Another factor contributing to their overall rank would be economic participation. # # Note that their overall rank increased by 2 points in 2015 even though their educational attainment rank dropped by 34 points in 2015. It seems like the improvement in their overall rank is due to the improvement in their economic participation rank (from 24 to 16) since political empowerment and health and survival ranks stayed the same in that year. # # For more details about their efforts, we refer to the State of Filipino Women Report at (https://pcw.gov.ph/sites/default/files/documents/resources/ESTADO%20NI%20JUANA_THE%20STATE%20OF%20FILIPINO%20WOMEN%20REPORT.pdf) # ### Finland country_scatterplot('Finland') country_boxplot('Finland') # Finland is one of the counties that are always in Top 3 in overall ranking. It seems like their overall rank is more dependent on their political empowerment rank since overall rank didn't get affected by their educational attainment rank, which varied a lot between years 2006 and 2016, and their economic participation rank,which was always between 8 and 22. For more information, we refer the reader to http://www.stat.fi/tup/tasaarvo/index_en.html. # country_scatterplot('United States') country_boxplot('United States') # Overall Global Gender Gap rank of United States was 45 in 2016, which was 22 points less than the Overall Rank in 2006. United States' overall rank got worse in 2015 and 2016 even though there was improvement several times between 2006-2016. Overall rank line graph above looks more similar to the line graph for the economic participation but its values were not as low as economic participation ranking values. It seems like high political empowerment ranking also affected overall gender gap ranking. However, more data is needed to support this claim as the political empowerment ranking fluctuates a lot. overall[overall["Country Name"]=='United States'] # # CONCLUSION # # In this report, we focused on positive changes and improvements and explored some factors influencing these changes. A similar study can be done by exploring the countries who decreased their ranking and explore and the events leading up to the decrease in their ranks. # # Since WEF started publishing their reports for the first time in 2005, there are many positive steps taken in many countries to decrease the gap in their countries as well as on a global level. However, even in Iceland, #1 in the last 8 years, the gender gap is around 5.7-18.3% (https://www.heforshe.org/en/impact). # #
GenderGap-CodeSY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Variational Recurrent Network (VRNN) # # Implementation based on Chung's *A Recurrent Latent Variable Model for Sequential Data* [arXiv:1506.02216v6]. # # ### 1. Network design # # There are three types of layers: input (x), hidden(h) and latent(z). We can compare VRNN sided by side with RNN to see how it works in generation phase. # # - RNN: $h_o + x_o -> h_1 + x_1 -> h_2 + x_2 -> ...$ # - VRNN: with $ h_o \left\{ # \begin{array}{ll} # h_o -> z_1 \\ # z_1 + h_o -> x_1\\ # z_1 + x_1 + h_o -> h_1 \\ # \end{array} # \right .$ # with $ h_1 \left\{ # \begin{array}{ll} # h_1 -> z_2 \\ # z_2 + h_1 -> x_2\\ # z_2 + x_2 + h_1 -> h_2 \\ # \end{array} # \right .$ # # It is clearer to see how it works in the code blocks below. This loop is used to generate new text when the network is properly trained. x is wanted output, h is deterministic hidden state, and z is latent state (stochastic hidden state). Both h and z are changing with repect to time. # # ### 2. Training # # The VRNN above contains three components, a latent layer genreator $h_o -> z_1$, a decoder net to get $x_1$, and a recurrent net to get $h_1$ for the next cycle. # # The training objective is to make sure $x_0$ is realistic. To do that, an encoder layer is added to transform $x_1 + h_0 -> z_1$. Then the decoder should transform $z_1 + h_o -> x_1$ correctly. This implies a cross-entropy loss in the "tiny shakespear" or MSE in image reconstruction. # # Another loose end is $h_o -> z_1$. Statistically, $x_1 + h_0 -> z_1$ should be the same as $h_o -> z_1$, if $x_1$ is sampled randomly. This constraint is formularize as a KL divergence between the two. # # >#### KL Divergence of Multivariate Normal Distribution # >![](https://wikimedia.org/api/rest_v1/media/math/render/svg/8dad333d8c5fc46358036ced5ab8e5d22bae708c) # # Now putting everything together for one training cycle. # # $\left\{ # \begin{array}{ll} # h_o -> z_{1,prior} \\ # x_1 + h_o -> z_{1,infer}\\ # z_1 <- sampling N(z_{1,infer})\\ # z_1 + h_o -> x_{1,reconstruct}\\ # z_1 + x_1 + h_o -> h_1 \\ # \end{array} # \right . $ # => # $ # \left\{ # \begin{array}{ll} # loss\_latent = DL(z_{1,infer} | z_{1,prior}) \\ # loss\_reconstruct = x_1 - x_{1,reconstruct} \\ # \end{array} # \right . # $ # # + import torch from torch import nn, optim from torch.autograd import Variable class VRNNCell(nn.Module): def __init__(self): super(VRNNCell,self).__init__() self.phi_x = nn.Sequential(nn.Embedding(128,64), nn.Linear(64,64), nn.ELU()) self.encoder = nn.Linear(128,64*2) # output hyperparameters self.phi_z = nn.Sequential(nn.Linear(64,64), nn.ELU()) self.decoder = nn.Linear(128,128) # logits self.prior = nn.Linear(64,64*2) # output hyperparameters self.rnn = nn.GRUCell(128,64) def forward(self, x, hidden): x = self.phi_x(x) # 1. h => z z_prior = self.prior(hidden) # 2. x + h => z z_infer = self.encoder(torch.cat([x,hidden], dim=1)) # sampling z = Variable(torch.randn(x.size(0),64))*z_infer[:,64:].exp()+z_infer[:,:64] z = self.phi_z(z) # 3. h + z => x x_out = self.decoder(torch.cat([hidden, z], dim=1)) # 4. x + z => h hidden_next = self.rnn(torch.cat([x,z], dim=1),hidden) return x_out, hidden_next, z_prior, z_infer def calculate_loss(self, x, hidden): x_out, hidden_next, z_prior, z_infer = self.forward(x, hidden) # 1. logistic regression loss loss1 = nn.functional.cross_entropy(x_out, x) # 2. KL Divergence between Multivariate Gaussian mu_infer, log_sigma_infer = z_infer[:,:64], z_infer[:,64:] mu_prior, log_sigma_prior = z_prior[:,:64], z_prior[:,64:] loss2 = (2*(log_sigma_infer-log_sigma_prior)).exp() \ + ((mu_infer-mu_prior)/log_sigma_prior.exp())**2 \ - 2*(log_sigma_infer-log_sigma_prior) - 1 loss2 = 0.5*loss2.sum(dim=1).mean() return loss1, loss2, hidden_next def generate(self, hidden=None, temperature=None): if hidden is None: hidden=Variable(torch.zeros(1,64)) if temperature is None: temperature = 0.8 # 1. h => z z_prior = self.prior(hidden) # sampling z = Variable(torch.randn(z_prior.size(0),64))*z_prior[:,64:].exp()+z_prior[:,:64] z = self.phi_z(z) # 2. h + z => x x_out = self.decoder(torch.cat([hidden, z], dim=1)) print (x_out.shape) # sampling x_sample = x = x_out.div(temperature).exp().multinomial(1).squeeze() x = self.phi_x(x) print (x.size()) # 3. x + z => h hidden_next = self.rnn(torch.cat([x,z], dim=1),hidden) return x_sample, hidden_next def generate_text(self, hidden=None,temperature=None, n=100): res = [] hidden = None for _ in range(n): x_sample, hidden = self.generate(hidden,temperature) res.append(chr(x_sample.data[0])) return "".join(res) # Test net = VRNNCell() x = Variable(torch.LongTensor([12,13,14])) hidden = Variable(torch.rand(3,64)) output, hidden_next, z_infer, z_prior = net(x, hidden) loss1, loss2, _ = net.calculate_loss(x, hidden) loss1, loss2 hidden = Variable(torch.zeros(1,64)) net.generate_text() # - # ## Download tiny shakspear text # + from six.moves.urllib import request url = "https://raw.githubusercontent.com/jcjohnson/torch-rnn/master/data/tiny-shakespeare.txt" text = request.urlopen(url).read().decode() print('-----SAMPLE----\n') print(text[:100]) # - # ### A convinient function to sample text # + import numpy as np def batch_generator(seq_size=300, batch_size=64): cap = len(text) - seq_size*batch_size while True: idx = np.random.randint(0, cap, batch_size) res = [] for _ in range(seq_size): batch = torch.LongTensor([ord(text[i]) for i in idx]) res.append(batch) idx += 1 yield res g = batch_generator() batch = next(g) # - # ## Model Training # + net = VRNNCell() max_epoch = 2000 optimizer = optim.Adam(net.parameters(), lr=0.001) g = batch_generator() hidden = Variable(torch.zeros(64,64)) #batch_size x hidden_size for epoch in range(max_epoch): batch = next(g) loss_seq = 0 loss1_seq, loss2_seq = 0, 0 optimizer.zero_grad() for x in batch: loss1, loss2, hidden = net.calculate_loss(Variable(x),hidden) loss1_seq += loss1.data[0] loss2_seq += loss2.data[0] loss_seq = loss_seq + loss1+loss2 loss_seq.backward() optimizer.step() hidden.detach_() if epoch%100==0: print('>> epoch {}, loss {:12.4f}, decoder loss {:12.4f}, latent loss {:12.4f}'.format(epoch, loss_seq.data[0], loss1_seq, loss2_seq)) print(net.generate_text()) print() # - # ## Evaluation sample = net.generate_text(n=1000, temperature=1) print(sample) # ## Comments # # - Denifinitely train longer to get better results. # - Keep in mind the rnn kernel only has 1 layer, with 64 neurons. # - Seems no need to tune temperature here. temperature = 0.8 generates a lot of obscure spelling. temperature = 1 works fine.
vrnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Evaluating Your Forecast # # So far you have prepared your data, and generated your first Forecast. Now is the time to pull down the predictions from this Predictor, and compare them to the actual observed values. This will let us know the impact of accuracy based on the Forecast. # # You can extend the approaches here to compare multiple models or predictors and to determine the impact of improved accuracy on your use case. # # Overview: # # * Setup # * Obtaining a Prediction # * Plotting the Actual Results # * Plotting the Prediction # * Comparing the Prediction to Actual Results # ## Setup # Import the standard Python Libraries that are used in this lesson. # + import json import time import dateutil.parser import boto3 import pandas as pd # - # The line below will retrieve your shared variables from the earlier notebooks. # %store -r # Once again connect to the Forecast APIs via the SDK. session = boto3.Session(region_name=region) forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') # ## Obtaining a Prediction: # # Now that your predictor is active we will query it to get a prediction that will be plotted later. forecastResponse = forecastquery.query_forecast( ForecastArn=forecast_arn_deep_ar, Filters={"item_id":"client_12"} ) # ## Plotting the Actual Results # # In the first notebook we created a file of observed values, we are now going to select a given date and customer from that dataframe and are going to plot the actual usage data for that customer. actual_df = pd.read_csv("data/item-demand-time-validation.csv", names=['timestamp','value','item']) actual_df.head() # Next we need to reduce the data to just the day we wish to plot, which is the First of November 2014. actual_df = actual_df[(actual_df['timestamp'] >= '2014-11-01') & (actual_df['timestamp'] < '2014-11-02')] # Lastly, only grab the items for client_12 actual_df = actual_df[(actual_df['item'] == 'client_12')] actual_df.head() actual_df.plot() # ## Plotting the Prediction: # # Next we need to convert the JSON response from the Predictor to a dataframe that we can plot. # Generate DF prediction_df_p10 = pd.DataFrame.from_dict(forecastResponse['Forecast']['Predictions']['p10']) prediction_df_p10.head() # Plot prediction_df_p10.plot() # The above merely did the p10 values, now do the same for p50 and p90. prediction_df_p50 = pd.DataFrame.from_dict(forecastResponse['Forecast']['Predictions']['p50']) prediction_df_p90 = pd.DataFrame.from_dict(forecastResponse['Forecast']['Predictions']['p90']) # ## Comparing the Prediction to Actual Results # # After obtaining the dataframes the next task is to plot them together to determine the best fit. # We start by creating a dataframe to house our content, here source will be which dataframe it came from results_df = pd.DataFrame(columns=['timestamp', 'value', 'source']) # Import the observed values into the dataframe: for index, row in actual_df.iterrows(): clean_timestamp = dateutil.parser.parse(row['timestamp']) results_df = results_df.append({'timestamp' : clean_timestamp , 'value' : row['value'], 'source': 'actual'} , ignore_index=True) # To show the new dataframe results_df.head() # Now add the P10, P50, and P90 Values for index, row in prediction_df_p10.iterrows(): clean_timestamp = dateutil.parser.parse(row['Timestamp']) results_df = results_df.append({'timestamp' : clean_timestamp , 'value' : row['Value'], 'source': 'p10'} , ignore_index=True) for index, row in prediction_df_p50.iterrows(): clean_timestamp = dateutil.parser.parse(row['Timestamp']) results_df = results_df.append({'timestamp' : clean_timestamp , 'value' : row['Value'], 'source': 'p50'} , ignore_index=True) for index, row in prediction_df_p90.iterrows(): clean_timestamp = dateutil.parser.parse(row['Timestamp']) results_df = results_df.append({'timestamp' : clean_timestamp , 'value' : row['Value'], 'source': 'p90'} , ignore_index=True) results_df # + pivot_df = results_df.pivot(columns='source', values='value', index="timestamp") pivot_df # - pivot_df.plot() # Once you are done exploring this Forecast you can cleanup all the work that was done by executing the cells inside `Cleanup.ipynb` within this folder.
notebooks/legacy/Tutorial/3.Evaluating_Your_Predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/julianovale/pythonparatodos/blob/main/M%C3%B3dulo12Aula09.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="560-Kp0JJ-Bc" class A: def __init__(self,x): self.__x = x def fun(self): print('Public: Método A: fun, x = ', self.__x) def _fun(self): print('Protected: Método A: _fun, x = ', self.__x) def __fun(self): print('Private: Método A: __fun, x = ', self.__x) def mPrivate(self): self.__fun() # + id="KA76FhoHKpg5" obj = A(1.0) # + colab={"base_uri": "https://localhost:8080/"} id="_xfWGfI1Lsmg" outputId="8f94ea63-1374-424b-e8c7-42aa3cef2315" # Método public obj.fun() # + colab={"base_uri": "https://localhost:8080/"} id="nbT-Sj7VLzZO" outputId="6d57bc62-fbc8-483a-e405-7c4105f2664a" # Método protected obj._fun() # + colab={"base_uri": "https://localhost:8080/"} id="xa886Ax9Md_H" outputId="3d506959-323d-4a61-cd49-ac9eae13b30d" obj.mPrivate() # + colab={"base_uri": "https://localhost:8080/"} id="65CFEuVWPNyO" outputId="578bec58-e5e0-43fc-fd32-6dd555cad261" # Burlando o acesso private (campo e método) obj._A__fun() print("A.x = ", obj._A__x)
Módulo12Aula09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt # + (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() # Normalize pixel values to be between 0 and 1 train_images, test_images = train_images / 255.0, test_images / 255.0 # + class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i]) # The CIFAR labels happen to be arrays, # which is why you need the extra index plt.xlabel(class_names[train_labels[i][0]]) plt.show() # - model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.summary() model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10)) model.summary() # + model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels)) # + plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5, 1]) plt.legend(loc='lower right') test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) # - print(test_acc) import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import numpy as np (X_train, y_train), (X_test,y_test) = datasets.cifar10.load_data() X_train.shape X_test.shape y_train.shape y_train[:5] y_train = y_train.reshape(-1,) y_train[:5] y_test = y_test.reshape(-1,) classes = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"] def plot_sample(X, y, index): plt.figure(figsize = (15,2)) plt.imshow(X[index]) plt.xlabel(classes[y[index]]) plot_sample(X_train, y_train, 0) plot_sample(X_train, y_train, 1) X_train = X_train / 255.0 X_test = X_test / 255.0 # + ann = models.Sequential([ layers.Flatten(input_shape=(32,32,3)), layers.Dense(3000, activation='relu'), layers.Dense(1000, activation='relu'), layers.Dense(10, activation='softmax') ]) ann.compile(optimizer='SGD', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ann.fit(X_train, y_train, epochs=5) # + from sklearn.metrics import confusion_matrix , classification_report import numpy as np y_pred = ann.predict(X_test) y_pred_classes = [np.argmax(element) for element in y_pred] print("Classification Report: \n", classification_report(y_test, y_pred_classes)) # - cnn = models.Sequential([ layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 3)), layers.MaxPooling2D((2, 2)), layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(64, activation='relu'), layers.Dense(10, activation='softmax') ]) cnn.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) cnn.fit(X_train, y_train, epochs=10) cnn.evaluate(X_test,y_test) y_pred = cnn.predict(X_test) y_pred[:5] y_classes = [np.argmax(element) for element in y_pred] y_classes[:5] y_test[:5] plot_sample(X_test, y_test,3) classes[y_classes[3]] classes[y_classes[3]] # + plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5, 1]) plt.legend(loc='lower right') test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) # - import os.path if os.path.isfile('models/medical_trial_model.h5') is False: model.save('models/medical_trial_model.h5') from tensorflow.keras.models import load_model new_model = load_model('models/medical_trial_model.h5') new_model.summary() new_model.get_weights() new_model.optimizer json_string = model.to_json() json_string from tensorflow.keras.models import model_from_json model_architecture = model_from_json(json_string) model_architecture.summary() import os.path if os.path.isfile('models/my_model_weights.h5') is False: model.save_weights('models/my_model_weights.h5') model2 = Sequential([ Dense(units=16, input_shape=(1,), activation='relu'), Dense(units=32, activation='relu'), Dense(units=2, activation='softmax') ])
models/CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [<NAME>](http://www.sebastianraschka.com) # import time print('Last updated: %s' %time.strftime('%d/%m/%Y')) # <hr> # I would be happy to hear your comments and suggestions. # Please feel free to drop me a note via # [twitter](https://twitter.com/rasbt), [email](mailto:<EMAIL>), or [google+](https://plus.google.com/118404394130788869227). # <hr> # # Linear regression via the least squares fit method # <a name="sections"></a> # <br> # <br> # # # Sections # - [Introduction](#Introduction) # # - [Least squares fit implementations](#Least-squares-fit-implementations) # # - [The matrix approach in Python and NumPy](#The-matrix-approach-in-Python-and-NumPy) # # - [The classic approach in Python](#The-classic-approach-in-Python) # # - [Visualization](#Visualization) # # - [Performance growth rates](#Performance-growth-rates) # # - [Results](#Results) # <a name="introduction"></a> # <br> # <br> # # Introduction # [[back to top](#Sections)] # Linear regression via the least squares method is the simplest approach to performing a regression analysis of a dependent and a explanatory variable. The objective is to find the best-fitting straight line through a set of points that minimizes the sum of the squared offsets from the line. # The offsets come in 2 different flavors: perpendicular and vertical - with respect to the line. # ![](https://raw.githubusercontent.com/rasbt/python_reference/master/Images/least_squares_vertical.png) # ![](https://raw.githubusercontent.com/rasbt/python_reference/master/Images/least_squares_perpendicular.png) # # As <NAME> summarizes it nicely in his article "[Problems of Linear Least Square Regression - And Approaches to Handle Them](http://www.arsa-conf.com/archive/?vid=1&aid=2&kid=60101-220)": "the perpendicular offset method delivers a more precise result but is are more complicated to handle. Therefore normally the vertical offsets are used." # Here, we will also use the method of computing the vertical offsets. # # # In more mathematical terms, our goal is to compute the best fit to *n* points $(x_i, y_i)$ with $i=1,2,...n,$ via linear equation of the form # $f(x) = a\cdot x + b$. # We further have to assume that the y-component is functionally dependent on the x-component. # In a cartesian coordinate system, $b$ is the intercept of the straight line with the y-axis, and $a$ is the slope of this line. # In order to obtain the parameters for the linear regression line for a set of multiple points, we can re-write the problem as matrix equation # $\pmb X \; \pmb a = \pmb y$ # $\Rightarrow\Bigg[ \begin{array}{cc} # x_1 & 1 \\ # ... & 1 \\ # x_n & 1 \end{array} \Bigg]$ # $\bigg[ \begin{array}{c} # a \\ # b \end{array} \bigg]$ # $=\Bigg[ \begin{array}{c} # y_1 \\ # ... \\ # y_n \end{array} \Bigg]$ # With a little bit of calculus, we can rearrange the term in order to obtain the parameter vector $\pmb a = [a\;b]^T$ # $\Rightarrow \pmb a = (\pmb X^T \; \pmb X)^{-1} \pmb X^T \; \pmb y$ # <br> # The more classic approach to obtain the slope parameter $a$ and y-axis intercept $b$ would be: # $a = \frac{S_{x,y}}{\sigma_{x}^{2}}\quad$ (slope) # # # $b = \bar{y} - a\bar{x}\quad$ (y-axis intercept) # where # # # $S_{xy} = \sum_{i=1}^{n} (x_i - \bar{x})(y_i - \bar{y})\quad$ (covariance) # # # $\sigma{_x}^{2} = \sum_{i=1}^{n} (x_i - \bar{x})^2\quad$ (variance) # <a name="implementations"></a> # <br> # <br> # # Least squares fit implementations # [[back to top](#Sections)] # <a name='matrix_approach'></a> # <br> # <br> # ### The matrix approach in Python and NumPy # [[back to top](#Sections)] # First, let us implement the equation: # $\pmb a = (\pmb X^T \; \pmb X)^{-1} \pmb X^T \; \pmb y$ # which I will refer to as the "matrix approach". # #### Matrix approach implemented in NumPy and (C)Python # + import numpy as np def matrix_lstsqr(x, y): """ Computes the least-squares solution to a linear matrix equation. """ X = np.vstack([x, np.ones(len(x))]).T return (np.linalg.inv(X.T.dot(X)).dot(X.T)).dot(y) # - # <a name='classic_approach'></a> # <br> # <br> # ### The classic approach in Python # [[back to top](#Sections)] # Next, we will calculate the parameters separately, using standard library functions in Python only, which I will call the "classic approach". # $a = \frac{S_{x,y}}{\sigma_{x}^{2}}\quad$ (slope) # # # $b = \bar{y} - a\bar{x}\quad$ (y-axis intercept) # Note: I refrained from using list comprehensions and convenience functions such as `zip()` in # order to maximize the performance for the Cython compilation into C code in the later sections. # # #### Implemented in (C)Python def classic_lstsqr(x_list, y_list): """ Computes the least-squares solution to a linear matrix equation. """ N = len(x_list) x_avg = sum(x_list)/N y_avg = sum(y_list)/N var_x, cov_xy = 0, 0 for x,y in zip(x_list, y_list): temp = x - x_avg var_x += temp**2 cov_xy += temp * (y - y_avg) slope = cov_xy / var_x y_interc = y_avg - slope*x_avg return (slope, y_interc) # <a name='sample_data'></a> # <br> # <br> # <br> # <br> # ## Visualization # [[back to top](#Sections)] # To check how our dataset is distributed, and how the least squares regression line looks like, we will plot the results in a scatter plot. # Note that we are only using our "matrix approach" to visualize the results - for simplicity. We expect both approaches to produce similar results, which we confirm with the code snippet below. # + import random import numpy as np random.seed(12345) x = [x_i*random.randrange(8,12)/10 for x_i in range(500)] y = [y_i*random.randrange(8,12)/10 for y_i in range(100,600)] np.testing.assert_almost_equal( classic_lstsqr(x, y), matrix_lstsqr(x, y), decimal=5) print('ok') # - # %matplotlib inline # + from matplotlib import pyplot as plt import random random.seed(12345) x = [x_i*random.randrange(8,12)/10 for x_i in range(500)] y = [y_i*random.randrange(8,12)/10 for y_i in range(100,600)] slope, intercept = matrix_lstsqr(x, y) line_x = [round(min(x)) - 1, round(max(x)) + 1] line_y = [slope*x_i + intercept for x_i in line_x] plt.figure(figsize=(8,8)) plt.scatter(x,y) plt.plot(line_x, line_y, color='red', lw='2') plt.ylabel('y') plt.xlabel('x') plt.title('Linear regression via least squares fit') ftext = 'y = ax + b = {:.3f} + {:.3f}x'\ .format(slope, intercept) plt.figtext(.15,.8, ftext, fontsize=11, ha='left') plt.show() # - # <br> # <br> # <a name='performance1'></a> # <br> # <br> # # Performance growth rates # [[back to top](#Sections)] # Now, finally let us take a look at the effect of different sample sizes on the relative performances for each approach. # + import timeit import random random.seed(12345) funcs = ['classic_lstsqr', 'matrix_lstsqr'] orders_n = [10**n for n in range(1,5)] timings = {f:[] for f in funcs} for n in orders_n: x_list = ([x_i*np.random.randint(8,12)/10 for x_i in range(n)]) y_list = ([y_i*np.random.randint(10,14)/10 for y_i in range(n)]) x_ary = np.asarray(x_list) y_ary = np.asarray(y_list) timings['classic_lstsqr'].append(min(timeit.Timer('classic_lstsqr(x_list, y_list)', 'from __main__ import classic_lstsqr, x_list, y_list')\ .repeat(repeat=3, number=1000))) timings['matrix_lstsqr'].append(min(timeit.Timer('matrix_lstsqr(x_ary, y_ary)', 'from __main__ import matrix_lstsqr, x_ary, y_ary')\ .repeat(repeat=3, number=1000))) # + import platform import multiprocessing def print_sysinfo(): print('\nPython version :', platform.python_version()) print('compiler :', platform.python_compiler()) print('NumPy version :', np.__version__) print('\nsystem :', platform.system()) print('release :', platform.release()) print('machine :', platform.machine()) print('processor :', platform.processor()) print('CPU count :', multiprocessing.cpu_count()) print('interpreter:', platform.architecture()[0]) print('\n\n') # + import matplotlib.pyplot as plt def plot(timings, title, labels, orders_n): plt.rcParams.update({'font.size': 12}) fig = plt.figure(figsize=(11,10)) for lb in labels: plt.plot(orders_n, timings[lb], alpha=0.5, label=labels[lb], marker='o', lw=3) plt.xlabel('sample size n') plt.ylabel('time per computation in milliseconds') #plt.xlim([min(orders_n) / 10, max(orders_n)* 10]) plt.legend(loc=2) plt.grid() plt.xscale('log') plt.yscale('log') plt.title(title) plt.show() # - # <br> # <br> # # Results # [[back to top](#Sections)] # + title = 'Performance of Linear Regression Least Squares Fits' labels = {'classic_lstsqr': '"classic" least squares in (C)Python', 'matrix_lstsqr': '"matrix" least squares in in (C)Python + NumPy', } print_sysinfo() plot(timings, title, labels, orders_n) # - test complete; Gopal
tests/others/linregr_least_squares_fit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tensor Flow and MNIST Data Set # ## TensorFlow # # In this note I use [MNIST Data Set](http://yann.lecun.com/exdb/mnist/) to learn how to use TensorFlow. # {% asset_img output_26_1.png %} # # Firstly we'll briefly go through the installation and common elements of TensorFlow. Then we will start to input the MNIST data, and use Matplotlib to display some of our MNIST images. Then we start to construct our TensorFlow model with cross entropy error measure and gradient descent optimization. Finally we will compare our output y to test dataset to estimate our model performance. # # # ### Installation of TensorFlow for Winodws 10 # # As TensorFlow is not compatible with Windows 10, we need to install TensorFlow into a virtual environment. The following steps indicates how to do this with Anaconda. # 1. Install [Anaconda](https://www.anaconda.com/distribution/) # 2. Open 'Anaconda Prompt' # 3. Create a vitrual environment named 'virtualEnv' with anaconda: # ``` Bash # conda create --name virtualEnv python=3.6 anaconda # ``` # After the packages are installed, we can use these commands to activate / deactivate the virtual environment: # ``` Bash # conda activate virtualEnv # conda deactivate # ``` # 4. Install Tensorflow in virtualEnv: # ``` Bash # conda install tensorflow # ``` # Then we finised TensorFlow installation. # # Test: # After TensorFlow installed, we can active the 'virtualEnv', and type the code below in to Jupyter Notebook: # ``` Python # import tensorflow as tf # ``` # If no error / warning occurs, then it means we already installed TensorFlow correctly. # # Reference: <https://makerpro.cc/2019/01/introduction-of-ai-experiments-and-the-installation-of-tensorflow-in-windows-system/> # # # ### Import TF import tensorflow as tf # ### TF Elements # 1. Simple Constants x = tf.constant(100) # 2. Running Sessions sess = tf.Session() sess.run(x) # To line up multiple Tensorflow operations in a session: with tf.Session() as sess: print(x ** 2) print(x + 4) # 3. Place Holder # When we don't know the constants right now (for example, batch input need to update x and y for each iteration), we can declare place holder and use '**feed_dict={x:`x_batch`, y:`y_batch`}**'later to fed in the data. x = tf.placeholder(tf.int32) y = tf.placeholder(tf.int32) # 4. Defining Operations add = tf.add(x,y) sub = tf.subtract(x,y) mul = tf.multiply(x,y) # Example of use placeholder with operation objects: d = {x:20,y:30} with tf.Session() as sess: print('Operations with Constants') print('Addition',sess.run(add,feed_dict=d)) print('Subtraction',sess.run(sub,feed_dict=d)) print('Multiplication',sess.run(mul,feed_dict=d)) # ## Get the MNIST Data # ### MNIST data # [!Office Website](http://yann.lecun.com/exdb/mnist/) # [!Chinese Data Source](https://scidm.nchc.org.tw/dataset/mnist) # # data sets: (datasize by 'mnist.train.num_examples') # # mnist.train 55000 # mnist.test 10000 # mnist.validation 5000 # # image and label: # # X = mnist.train.images # y = mnist.train.label # # type(mnist): # # tensorflow.contrib.learn.python.learn.datasets.base.Datasets import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/",one_hot=True) # ### Visualizing the Data import matplotlib.pyplot as plt # %matplotlib inline mnist.train.images[0].shape plt.figure(figsize=(10, 5)) plt.subplot(1,2,1) plt.imshow(mnist.train.images[5].reshape(28,28)) plt.subplot(1,2,2) plt.imshow(mnist.train.images[2].reshape(28,28)) plt.figure(figsize=(15, 7)) for i in range(8): plt.subplot(2,4,i+1) plt.imshow(mnist.train.images[2*i+1].reshape(28,28)) mnist.train.labels[5] mnist.train.labels[9] mnist.train.images[8].max() plt.figure(figsize=(10, 5)) plt.imshow(mnist.train.images[5].reshape(28,28),cmap='gist_gray') plt.figure(figsize=(12, 5)) plt.imshow(mnist.train.images[5].reshape(784,1),aspect=0.01) # ## Create the Model # **Variable Objects** # # tf.placeholder # # tf.Variable # # **Math Operation Objects** # # tf.matmul # # tf.reduce_mean(array, axis) # return the mean value in each vector # tf.reduce_mean(array, axis) # return the max value in each vector # # tf.argmax(input, axis(=0:row; =1:column)) # if input=array, return the index of max value in each vector # if input=vector, return the index of max value in the vector # # tf.equal(input, pred) # return True of False for each element comparsion. The dim. of return is same as input array's dim. # # tf.nn.softmax_cross_entropy_with_logits_v2 # tf.train.GradientDescentOptimizer # tf.train.GradientDescentOptimizer.minimize # tf.global_variables_initializer # # x = tf.placeholder(tf.float32,shape=[None,784]) # 10 because 0-9 possible numbers W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) # Output = Possibility y = tf.matmul(x,W) + b # Loss and Optimizer y_true = tf.placeholder(tf.float32,[None,10]) # Cross Entropy cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y)) # Use Gradient Descent Optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5) # Train train = optimizer.minimize(cross_entropy) # ### Create Session # Everything except of variables (**constants** and **placeholders**) do not require initialization . # But **Variable (even if it is a constant)** should be initialized. init = tf.global_variables_initializer() # A Session object encapsulates the environment in which **Operation objects are executed, and Tensor objects are evaluated**. with tf.Session() as sess: sess.run(init) # Train the model for 1000 steps on the training set # Using built in batch feeder from mnist for convenience for step in range(1000): batch_x , batch_y = mnist.train.next_batch(100) sess.run(train,feed_dict={x:batch_x,y_true:batch_y}) # Test the Train Model matches = tf.equal(tf.argmax(y,1),tf.argmax(y_true,1)) acc = tf.reduce_mean(tf.cast(matches,tf.float32)) print(sess.run(tf.reduce_max(y,1), feed_dict={x:mnist.test.images})) print(sess.run(tf.argmax(y,1), feed_dict={x:mnist.test.images})) print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels})) # ## Reference: # # <https://www.udemy.com/python-for-data-science-and-machine-learning-bootcamp/> # # <https://www.tensorflow.org/api_docs/python/tf/math/argmax)> # # <https://stackoverflow.com/questions/34987509/tensorflow-max-of-a-tensor-along-an-axis> #
TensorFlow/Tensor Flow and MNIST Data Set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Testing notebook # # I use this notebook to test recently developed functions # ### Default imports # + # %config InlineBackend.figure_format = 'retina' # %matplotlib inline import numpy as np import scipy as sp import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') from misshapen import nonshape # - # ### Sample data x = np.load('./exampledata.npy') Fs = 1000 # # 1. oscdetect_whitten # Function inputs f_range = (10,20) f_slope = ((2,8),(25,35)) window_size_slope = 1000 window_size_spec = 350 isosc = nonshape.oscdetect_whitten(x, f_range, Fs, f_slope, window_size_slope=window_size_slope, window_size_spec=window_size_spec, plot_spectral_slope_fit = True, plot_powerts = True)
notebooks/testing notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pyskip # %%time D = 1024 R = D // 4 disc = pyskip.Tensor((D, D), 0) for y in range(D): discriminant = R**2 - (y - D // 2 + 0.5)**2 if discriminant < 0: continue x_0 = int(D // 2 - 0.5 - discriminant**0.5) x_1 = int(D // 2 - 0.5 + discriminant**0.5) disc[x_0:x_1, y] = 1 plt.imshow(disc.to_numpy()) # %%timeit conv_disc = disc[D // 4:-D // 4, D // 4:-D // 4].eval() plt.imshow(disc[D // 4:-D // 4, D // 4:-D // 4].to_numpy()) def conv_2d(tensor, kernel, padding=0, fill=0): pad_shape = ( tensor.shape[0] + 2 * padding, tensor.shape[1] + 2 * padding, ) pad = pyskip.Tensor(shape=pad_shape, dtype=tensor.dtype, val=fill) pad[padding:-padding, padding:-padding] = tensor out_shape = ( pad_shape[0] - kernel.shape[0] + 1, pad_shape[1] - kernel.shape[1] + 1, ) out = pyskip.Tensor(shape=out_shape, dtype=tensor.dtype, val=0) for y in range(kernel.shape[1]): for x in range(kernel.shape[0]): stop_x = pad_shape[0] - kernel.shape[0] + x + 1 stop_y = pad_shape[1] - kernel.shape[1] + y + 1 out += kernel[x, y] * pad[x:stop_x, y:stop_y] return out # + # %%time edges = conv_2d( tensor=disc, kernel = pyskip.Tensor.from_list([ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1], ]), padding=1, ).eval() # - plt.imshow(np.abs(edges.to_numpy())) from scipy.signal import convolve2d disc_np = disc.to_numpy() # + # %%time edges_np = convolve2d( disc_np, np.array([ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1], ]), ) # - plt.imshow(np.abs(edges_np)) def gaussian_kernel(size=3, std=1.0): """Returns a 2D Gaussian kernel array.""" K = signal.gaussian(size, std=std).reshape(size, 1) return np.outer(K, K)
notebooks/taylor_tensor_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Cloblak/aipi540_deeplearning/blob/main/1D_CNN_Attempts/1D_CNN_asof_111312FEB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Xj0pR3efRVrc" colab={"base_uri": "https://localhost:8080/"} outputId="bf48ad35-f7fc-4399-fbc2-f4a209f0092e" # !pip install alpaca_trade_api # + [markdown] id="hdKRKIogGAu6" # Features To Consider # - Targets are only predicting sell within market hours, i.e. at 1530, target is prediciting price for 1100 the next day. Data from pre and post market is taken into consideration, and a sell or buy will be indicated if the price will flucuate after close. # + id="J1fWNRnTQZX-" # Import Dependencies import numpy as np import pandas as pd import torch from torch.utils.data import DataLoader, TensorDataset from torch.autograd import Variable from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout from torch.optim import Adam, SGD from torch.utils.data import DataLoader, TensorDataset from torch.utils.tensorboard import SummaryWriter from torchsummary import summary import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from tqdm.notebook import tqdm import alpaca_trade_api as tradeapi from datetime import datetime, timedelta, tzinfo, timezone, time import os.path import ast import threading import math import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler import warnings # + colab={"base_uri": "https://localhost:8080/"} id="DrI_WR501Iis" outputId="ba4cf4c7-9ae1-4630-ca9a-9fc731d149ee" random_seed = 182 torch.manual_seed(random_seed) # + id="IXnO8ykgRIuv" PAPER_API_KEY = "<KEY>" PAPER_SECRET_KEY = "<KEY>" PAPER_BASE_URL = 'https://paper-api.alpaca.markets' # + id="_3XShkLcRQMs" api = tradeapi.REST(PAPER_API_KEY, PAPER_SECRET_KEY, PAPER_BASE_URL, api_version='v2') # + id="tINNlljbRaDs" colab={"base_uri": "https://localhost:8080/"} outputId="2c5107d2-98da-4fbe-8b2e-76fa79da3399" def prepost_train_test_validate_offset_data(api, ticker, interval, train_days=180, test_days=60, validate_days=30, offset_days = 0): ticker_data_dict = None ticker_data_dict = {} monthly_data_dict = None monthly_data_dict = {} interval_loop_data = None interval_loop_data = pd.DataFrame() stock_data = None days_to_collect = train_days + test_days + validate_days + offset_days TZ = 'US/Eastern' start = pd.to_datetime((datetime.now() - timedelta(days=days_to_collect)).strftime("%Y-%m-%d %H:%M"), utc=True) end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d %H:%M"), utc=True) stock_data = api.get_bars(ticker, interval, start = start.isoformat(), end=end.isoformat(), adjustment="raw").df interval_loop_data = interval_loop_data.append(stock_data) df_start_ref = interval_loop_data.index[0] start_str_ref = pd.to_datetime(start, utc=True) while start_str_ref.value < ( pd.to_datetime(df_start_ref, utc=True) - pd.Timedelta(days=2.5)).value: end_new = pd.to_datetime(interval_loop_data.index[0].strftime("%Y-%m-%d %H:%M"), utc=True).isoformat() stock_data_new = None stock_data_new = api.get_bars(ticker, interval, start=start, end=end_new, adjustment="raw").df #stock_data_new = stock_data_new.reset_index() interval_loop_data = interval_loop_data.append(stock_data_new).sort_values(by=['index'], ascending=True) df_start_ref = interval_loop_data.index[0] stock_yr_min_df = interval_loop_data.copy() stock_yr_min_df["Open"] = stock_yr_min_df['open'] stock_yr_min_df["High"]= stock_yr_min_df["high"] stock_yr_min_df["Low"] = stock_yr_min_df["low"] stock_yr_min_df["Close"] = stock_yr_min_df["close"] stock_yr_min_df["Volume"] = stock_yr_min_df["volume"] stock_yr_min_df["VolumeWeightedAvgPrice"] = stock_yr_min_df["vwap"] stock_yr_min_df["Time"] = stock_yr_min_df.index.tz_convert(TZ) stock_yr_min_df.index = stock_yr_min_df.index.tz_convert(TZ) final_df = stock_yr_min_df.filter(["Time", "Open", "High", "Low", "Close", "Volume", "VolumeWeightedAvgPrice"], axis = 1) first_day = final_df.index[0] traintest_day = final_df.index[-1] - pd.Timedelta(days= test_days+validate_days+offset_days) valtest_day = final_df.index[-1] - pd.Timedelta(days= test_days+offset_days) last_day = final_df.index[-1] - pd.Timedelta(days= offset_days) training_df = final_df.loc[first_day:traintest_day] #(data_split - pd.Timedelta(days=1))] validate_df = final_df.loc[traintest_day:valtest_day] testing_df = final_df.loc[valtest_day:last_day] full_train = final_df.loc[first_day:last_day] offset_df = final_df.loc[last_day:] return training_df, validate_df, testing_df, full_train, offset_df, final_df, traintest_day, valtest_day from datetime import date train_start = date(2017, 1, 1) train_end = date(2020, 3, 29) train_delta = train_end - train_start print(f'Number of days of Training Data {train_delta.days}') val_day_num = 400 print(f'Number of days of Validation Data {val_day_num}') test_start = train_end + timedelta(val_day_num) test_end = date.today() test_delta = (test_end - test_start) print(f'Number of days of Holdout Test Data {test_delta.days}') ticker = "CORN" # Ticker Symbol to Test interval = "5Min" # Interval of bars train_day_int = train_delta.days # Size of training set (Jan 2010 - Oct 2017) val_day_int = val_day_num # Size of validation set test_day_int = test_delta.days # Size of test set offset_day_int = 0 # Number of days to off set the training data train_raw, val_raw, test_raw, full_raw, offset_raw, complete_raw, traintest_day, testval_day = prepost_train_test_validate_offset_data(api, ticker, interval, train_days=train_day_int, test_days=test_day_int, validate_days=val_day_int, offset_days = offset_day_int) def timeFilterAndBackfill(df): """ Prep df to be filled out for each trading day: Time Frame: 0930-1930 Backfilling NaNs Adjusting Volume to Zero if no Trading data is present - Assumption is that there were no trades duing that time We will build over lapping arrays by 30 min to give ourselfs more oppurtunities to predict during a given trading day """ df = df.between_time('07:29','17:29') # intial sorting of data TZ = 'US/Eastern' # define the correct timezone start_dateTime = pd.Timestamp(year = df.index[0].year, month = df.index[0].month, day = df.index[0].day, hour = 7, minute = 25, tz = TZ) end_dateTime = pd.Timestamp(year = df.index[-1].year, month = df.index[-1].month, day = df.index[-1].day, hour = 17, minute = 35, tz = TZ) # build blank index that has ever 5 min interval represented dateTime_index = pd.date_range(start_dateTime, end_dateTime, freq='5min').tolist() dateTime_index_df = pd.DataFrame() dateTime_index_df["Time"] = dateTime_index filtered_df = pd.merge_asof(dateTime_index_df, df, on='Time').set_index("Time").between_time('09:29','17:29') # create the close array by back filling NA, to represent no change in close closeset_list = [] prev_c = None for c in filtered_df["Close"]: if prev_c == None: if math.isnan(c): prev_c = 0 closeset_list.append(0) else: prev_c = c closeset_list.append(c) elif prev_c != None: if c == prev_c: closeset_list.append(c) elif math.isnan(c): closeset_list.append(prev_c) else: closeset_list.append(c) prev_c = c filtered_df["Close"] = closeset_list # create the volume volumeset_list = [] prev_v = None for v in filtered_df["Volume"]: if prev_v == None: if math.isnan(v): prev_v = 0 volumeset_list.append(0) else: prev_v = v volumeset_list.append(v) elif prev_v != None: if v == prev_v: volumeset_list.append(0) prev_v = v elif math.isnan(v): volumeset_list.append(0) prev_v = 0 else: volumeset_list.append(v) prev_v = v filtered_df["Volume"] = volumeset_list adjvolumeset_list = [] prev_v = None for v in filtered_df["VolumeWeightedAvgPrice"]: if prev_v == None: if math.isnan(v): prev_v = 0 adjvolumeset_list.append(0) else: prev_v = v adjvolumeset_list.append(v) elif prev_v != None: if v == prev_v: adjvolumeset_list.append(0) prev_v = v elif math.isnan(v): adjvolumeset_list.append(0) prev_v = 0 else: adjvolumeset_list.append(v) prev_v = v filtered_df["VolumeWeightedAvgPrice"] = adjvolumeset_list preped_df = filtered_df.backfill() return preped_df # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="J6Cw_PVrh2lJ" outputId="5d7fd0a2-0aa0-43a6-db2d-1d9c268c0169" train_raw[275:300] # + id="fWqLBPQPbjYZ" outputId="7728dc2d-2eb5-4a0a-d25b-2fec64721193" colab={"base_uri": "https://localhost:8080/", "height": 392} def buildTargets_VolOnly(full_df = full_raw, train_observations = train_raw.shape[0], val_observations = val_raw.shape[0], test_observations = test_raw.shape[0], alph = .55, volity_int = 10): """ This function will take a complete set of train, val, and test data and return the targets. Volitility will be calculated over the 252 5min incriments The Target shift is looking at 2 hours shift from current time """ returns = np.log(full_df['Close']/(full_df['Close'].shift())) returns.fillna(0, inplace=True) volatility = returns.rolling(window=(volity_int)).std()*np.sqrt(volity_int) return volatility #return train_targets, val_targets, test_targets, full_targets volatility = buildTargets_VolOnly() fig = plt.figure(figsize=(15, 7)) ax1 = fig.add_subplot(1, 1, 1) volatility.plot(ax=ax1, color = "red") ax1.set_xlabel('Date') ax1.set_ylabel('Volatility', color = "red") ax1.set_title(f'Annualized volatility for {ticker}') ax2 = ax1.twinx() full_raw.Close.plot(ax=ax2, color = "blue") ax2.set_ylabel('Close', color = "blue") ax2.axvline(x=full_raw.index[train_raw.shape[0]]) ax2.axvline(x=full_raw.index[val_raw.shape[0]+train_raw.shape[0]]) plt.show() # + id="0_yU14VEM37m" train = timeFilterAndBackfill(train_raw) val = timeFilterAndBackfill(val_raw) test = timeFilterAndBackfill(test_raw) train = train[train.index.dayofweek <= 4].copy() val = val[val.index.dayofweek <= 4].copy() test = test[test.index.dayofweek <= 4].copy() train["Open"] = np.where((train["Volume"] == 0), train["Close"], train["Open"]) train["High"] = np.where((train["Volume"] == 0), train["Close"], train["High"]) train["Low"] = np.where((train["Volume"] == 0), train["Close"], train["Low"]) val["Open"] = np.where((val["Volume"] == 0), val["Close"], val["Open"]) val["High"] = np.where((val["Volume"] == 0), val["Close"], val["High"]) val["Low"] = np.where((val["Volume"] == 0), val["Close"], val["Low"]) test["Open"] = np.where((test["Volume"] == 0), test["Close"], test["Open"]) test["High"] = np.where((test["Volume"] == 0), test["Close"], test["High"]) test["Low"] = np.where((test["Volume"] == 0), test["Close"], test["Low"]) def strided_axis0(a, L, overlap=1): if L==overlap: raise Exception("Overlap arg must be smaller than length of windows") S = L - overlap nd0 = ((len(a)-L)//S)+1 if nd0*S-S!=len(a)-L: warnings.warn("Not all elements were covered") m,n = a.shape s0,s1 = a.strides return np.lib.stride_tricks.as_strided(a, shape=(nd0,L,n), strides=(S*s0,s0,s1)) # OLDER CODE WITHOUT OVERLAP OF LABELING # def blockshaped(arr, nrows, ncols): # """ # Return an array of shape (n, nrows, ncols) where # n * nrows * ncols = arr.size # If arr is a 2D array, the returned array should look like n subblocks with # each subblock preserving the "physical" layout of arr. # """ # h, w = arr.shape # assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}" # assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}" # return np.flip(np.rot90((arr.reshape(h//nrows, nrows, -1, ncols) # .swapaxes(1,2) # .reshape(-1, nrows, ncols)), axes = (1, 2)), axis = 1) def blockshaped(arr, nrows, ncols, overlapping_5min_intervals = 12): """ Return an array of shape (n, nrows, ncols) where n * nrows * ncols = arr.size If arr is a 2D array, the returned array should look like n subblocks with each subblock preserving the "physical" layout of arr. """ h, w = arr.shape assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}" assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}" return np.flip(np.rot90((strided_axis0(arr, 24, overlap=overlapping_5min_intervals).reshape(-1, nrows, ncols)), axes = (1, 2)), axis = 1) train_tonp = train[["Open", "High", "Low", "Close", "Volume"]] val_tonp = val[["Open", "High", "Low", "Close", "Volume"]] test_tonp = test[["Open", "High", "Low", "Close", "Volume"]] train_array = train_tonp.to_numpy() val_array = val_tonp.to_numpy() test_array = test_tonp.to_numpy() X_train_pre_final = blockshaped(train_array, 24, 5, overlapping_5min_intervals = 12) X_val_pre_final = blockshaped(val_array, 24, 5, overlapping_5min_intervals = 12) X_test_pre_final = blockshaped(test_array, 24, 5, overlapping_5min_intervals = 12) # X_train_pre_final = blockshaped(train_array, 24, 5) # X_val_pre_final = blockshaped(val_array, 24, 5) # X_test_pre_final = blockshaped(test_array, 24, 5) # + colab={"base_uri": "https://localhost:8080/"} id="V8KcE_XCidzo" outputId="c25cfa07-04e9-4ad2-b0b3-ca548fda67f0" X_train_pre_final[0] # + id="Pe89LdnsLltO" # create target from OHLC and Volume Data def buildTargets(obs_array, alph = .55, volity_int = 10): """ This function will take a complete set of train, val, and test data and return the targets. Volitility will be calculated over the 24 5min incriments. The Target shift is looking at 2 hours shift from current time shift_2hour = The amount of time the data interval take to equal 2 hours (i.e. 5 min data interval is equal to 24) alph = The alpha value for calculating the shift in price volity_int = the number of incriments used to calculate volitility """ target_close_list =[] for arr in obs_array: target_close_list.append(arr[3][-1]) target_close_df = pd.DataFrame() target_close_df["Close"] = target_close_list target_close_df["Volitility"] = target_close_df["Close"].rolling(volity_int).std() # print(len(volatility), len(target_close_df["Close"])) targets = [2] * len(target_close_df.Close) targets = np.where(target_close_df.Close.shift() >= (target_close_df.Close * (1 + alph * target_close_df["Volitility"])), 1, targets) targets = np.where(target_close_df.Close.shift() <= (target_close_df.Close * (1 - alph * target_close_df["Volitility"])), 0, targets) return targets # + id="4aYPOa7INyAl" volity_val = 10 alph = .015 y_train_pre_final = buildTargets(X_train_pre_final, alph=alph, volity_int = volity_val) y_val_pre_final = buildTargets(X_val_pre_final, alph=alph, volity_int = volity_val) y_test_pre_final = buildTargets(X_test_pre_final, alph=alph, volity_int = volity_val) # + id="vWIY2rwEYCfM" def get_class_distribution(obj): count_dict = { "up": 0, "flat": 0, "down": 0, } for i in obj: if i == 1: count_dict['up'] += 1 elif i == 0: count_dict['down'] += 1 elif i == 2: count_dict['flat'] += 1 else: print("Check classes.") return count_dict # + id="-BsVCfr8YCiX" outputId="841f576c-43c6-4654-f5ab-05d7ca29f7d7" colab={"base_uri": "https://localhost:8080/", "height": 288} bfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7)) # Train sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set') # Validation sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set') # Test sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test_pre_final)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set') # + id="1XdpMcVCo2_b" def createFinalData_RemoveLateAfternoonData(arr, labels): assert arr.shape[0] == len(labels), "X data do not match length of y labels" step_count = 0 filtered_y_labels = [] for i in range(arr.shape[0]): if i == 0: final_arr = arr[i] filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 elif i == 1: final_arr = np.stack((final_arr, arr[i])) filtered_y_labels.append(labels[i]) step_count += 1 elif step_count == 0: final_arr = np.vstack((final_arr, arr[i][None])) filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 elif (step_count) % 5 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count += 1 elif (step_count) % 6 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count += 1 elif (step_count) % 7 == 0: #print(f'skipping {i} array, step_count: {step_count}') step_count = 0 else: final_arr = np.vstack((final_arr, arr[i][None])) filtered_y_labels.append(labels[i]) #print(f'Appending index {i}, step_count: {step_count}') step_count += 1 return final_arr, filtered_y_labels X_train, y_train = createFinalData_RemoveLateAfternoonData(X_train_pre_final, y_train_pre_final) X_val, y_val = createFinalData_RemoveLateAfternoonData(X_val_pre_final, y_val_pre_final) X_test, y_test = createFinalData_RemoveLateAfternoonData(X_test_pre_final, y_test_pre_final) y_train = np.array(y_train) y_val = np.array(y_val) y_test = np.array(y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ZyvritE4qPNR" outputId="b5a5abcb-f11d-415c-89ed-f9c87ca726c0" # Check it arrays are made correctly train[12:48] # + colab={"base_uri": "https://localhost:8080/"} id="Bi-1VYrmn0Eb" outputId="823c1ee7-8f38-4358-b8a6-0111571a20b1" np.set_printoptions(threshold=200) y_train_pre_final[0:24] # + id="xPkrkhqV4Ef-" ###### # Code fro scaling at a later date ###### # from sklearn.preprocessing import MinMaxScaler scalers = {} for i in range(X_train.shape[1]): scalers[i] = MinMaxScaler() X_train[:, i, :] = scalers[i].fit_transform(X_train[:, i, :]) for i in range(X_val.shape[1]): scalers[i] = MinMaxScaler() X_val[:, i, :] = scalers[i].fit_transform(X_val[:, i, :]) for i in range(X_test.shape[1]): scalers[i] = MinMaxScaler() X_test[:, i, :] = scalers[i].fit_transform(X_test[:, i, :]) # + id="kNH38ORXLGfn" def get_class_distribution(obj): count_dict = { "up": 0, "flat": 0, "down": 0, } for i in obj: if i == 1: count_dict['up'] += 1 elif i == 0: count_dict['down'] += 1 elif i == 2: count_dict['flat'] += 1 else: print("Check classes.") return count_dict # + outputId="2612551f-6215-4e62-94ef-797f0a58ea4b" colab={"base_uri": "https://localhost:8080/", "height": 288} id="qqTz9-J7LGft" bfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7)) # Train sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set') # Validation sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set') # Test sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set') # + id="tdeD0Rsc4rqJ" ###### ONLY EXECUTE FOR 2D CNN ##### X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1], X_train.shape[2]) X_val = X_val.reshape(X_val.shape[0], 1, X_val.shape[1], X_val.shape[2]) X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1], X_test.shape[2]) # + colab={"base_uri": "https://localhost:8080/"} id="QLv4oER44svp" outputId="fff37c5e-6bb1-4cff-a2bd-340f04bfe005" print(f'X Train Length {X_train.shape}, y Train Label Length {y_train.shape}') print(f'X Val Length {X_val.shape}, y Val Label Length {y_val.shape}') print(f'X Test Length {X_test.shape}, y Test Label Length {y_test.shape}') # + [markdown] id="KHDKw2Fj3Q9Z" # # 2D CNN Build Model # + id="-s53Ic5bo3vy" trainset = TensorDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long()) valset = TensorDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).long()) testset = TensorDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).long()) # + colab={"base_uri": "https://localhost:8080/"} id="oTp_I6ku4l5S" outputId="3872a917-be5c-4bed-87f4-02d2f6cc55d3" trainset # + colab={"base_uri": "https://localhost:8080/"} id="DOj73NY0o3yx" outputId="91faedc8-ff53-4e27-c068-ef2112c6591e" batch_size = 1 # train_data = [] # for i in range(len(X_train)): # train_data.append([X_train[i].astype('float'), y_train[i]]) train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(train_loader)) print(i1.shape) # val_data = [] # for i in range(len(X_val)): # val_data.append([X_val[i].astype('float'), y_val[i]]) val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(val_loader)) print(i1.shape) test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False) i1, l1 = next(iter(test_loader)) print(i1.shape) # + colab={"base_uri": "https://localhost:8080/"} id="atpanhjQ1reK" outputId="c13f9423-3095-4cc9-cbca-1bfd1e419543" # Get next batch of training images windows, labels = iter(train_loader).next() print(windows) windows = windows.numpy() # plot the windows in the batch, along with the corresponding labels for idx in range(batch_size): print(labels[idx]) # + id="O94NjECN1rjZ" # Set up dict for dataloaders dataloaders = {'train':train_loader,'val':val_loader} # Store size of training and validation sets dataset_sizes = {'train':len(trainset),'val':len(valset)} # Get class names associated with labels classes = [0,1,2] # + id="vXEmYChM1rm6" class StockShiftClassification(nn.Module): def __init__(self): super(StockShiftClassification, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size = (1,3), stride=1, padding = 1) self.pool1 = nn.MaxPool2d((1,4),4) self.conv2 = nn.Conv2d(32, 64, kernel_size = (1,3), stride=1, padding = 1) self.pool2 = nn.MaxPool2d((1,3),3) self.conv3 = nn.Conv2d(64, 128, kernel_size = (1,3), stride=1, padding = 1) self.pool3 = nn.MaxPool2d((1,2),2) self.fc1 = nn.Linear(256,1000) #calculate this self.fc2 = nn.Linear(1000, 500) #self.fc3 = nn.Linear(500, 3) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = F.relu(self.conv3(x)) x = self.pool3(x) #print(x.size(1)) x = x.view(x.size(0), -1) # Linear layer x = self.fc1(x) x = self.fc2(x) #x = self.fc3(x) output = x#F.softmax(x, dim=1) return output # + colab={"base_uri": "https://localhost:8080/"} id="54Ia_q7D1rp5" outputId="1390682b-5118-4c53-ed0b-63f176a76775" # Instantiate the model net = StockShiftClassification().float() # Display a summary of the layers of the model and output shape after each layer summary(net,(windows.shape[1:]),batch_size=batch_size,device="cpu") # + id="QrmujRIy6z_K" def train_model(model, criterion, optimizer, train_loaders, device, num_epochs=50, scheduler=onecycle_scheduler): model = model.to(device) # Send model to GPU if available writer = SummaryWriter() # Instantiate TensorBoard iter_num = {'train':0,'val':0} # Track total number of iterations for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Get the input images and labels, and send to GPU if available for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # Zero the weight gradients optimizer.zero_grad() # Forward pass to get outputs and calculate loss # Track gradient only for training data with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(outputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # Backpropagation to get the gradients with respect to each weight # Only if in train if phase == 'train': loss.backward() # Update the weights optimizer.step() # Convert loss into a scalar and add it to running_loss running_loss += loss.item() * inputs.size(0) # Track number of correct predictions running_corrects += torch.sum(preds == labels.data) # Iterate count of iterations iter_num[phase] += 1 # Write loss for batch to TensorBoard writer.add_scalar("{} / batch loss".format(phase), loss.item(), iter_num[phase]) # scheduler.step() # Calculate and display average loss and accuracy for the epoch epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # Write loss and accuracy for epoch to TensorBoard writer.add_scalar("{} / epoch loss".format(phase), epoch_loss, epoch) writer.add_scalar("{} / epoch accuracy".format(phase), epoch_acc, epoch) writer.close() return # + id="9rIib1fI60C5" outputId="9eb1a798-5a73-4d18-fe94-1db7c2885789" colab={"base_uri": "https://localhost:8080/"} # Train the model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Cross entropy loss combines softmax and nn.NLLLoss() in one single class. weights = torch.tensor([1.5, 2.25, 1.]).to(device) criterion_weighted = nn.CrossEntropyLoss(weight=weights) criterion = nn.CrossEntropyLoss() # Define optimizer #optimizer = optim.SGD(net.parameters(), lr=0.001) optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.00001) n_epochs= 10 # For demo purposes. Use epochs>100 for actual training onecycle_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, base_momentum = 0.8, steps_per_epoch=len(train_loader), epochs=n_epochs) train_model(net, criterion, optimizer, dataloaders, device, num_epochs=n_epochs) #, scheduler=onecycle_scheduler) # + id="5PPJOMlc60Gg" def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model = model.to(device) model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.log_softmax(logits, dim=1) #print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(3): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # + id="sveM96JI60I1" outputId="6cd9df76-cc65-48a5-8c76-0079b1e0e96d" colab={"base_uri": "https://localhost:8080/"} # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(net,val_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(3): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) # + id="mMzsJ_yR60Lk" from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / np.sum(cm).astype('float') misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() nb_classes = 9 # Initialize the prediction and label lists(tensors) predlist=torch.zeros(0,dtype=torch.long, device='cpu') lbllist=torch.zeros(0,dtype=torch.long, device='cpu') with torch.no_grad(): for i, (inputs, classes) in enumerate(dataloaders['val']): inputs = inputs.to(device) classes = classes.to(device) outputs = net.forward(inputs) _, preds = torch.max(outputs, 1) # Append batch prediction results predlist=torch.cat([predlist,preds.view(-1).cpu()]) lbllist=torch.cat([lbllist,classes.view(-1).cpu()]) # Confusion matrix conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy()) plot_confusion_matrix(conf_mat, [0,1,2]) from sklearn.metrics import precision_score precision_score(lbllist.numpy(), predlist.numpy(), average='weighted') from sklearn.metrics import classification_report print(classification_report(lbllist.numpy(), predlist.numpy(), target_names=["down","up","flat"], digits=4)) # + id="b75QfEyk60OH" # + id="TJwQWbq460Rt" # + id="Z5FkksyC60Uj" # + id="wX7XD9vI60Xa" # + id="PhwjxUvM60Z4" # + colab={"base_uri": "https://localhost:8080/", "height": 217} id="QHrwUIUH5Y2X" outputId="6c9c9998-4fb0-416a-e404-f17875600b01" train_x = torch.from_numpy(X_train).float() train_y = torch.from_numpy(y_train).long() val_x = torch.from_numpy(X_val).float() val_y = torch.from_numpy(y_val).long() # + id="G7pd_S2T3Sc3" # defining the model model = net # defining the optimizer optimizer = Adam(model.parameters(), lr=0.07) # defining the loss function criterion = CrossEntropyLoss() # checking if GPU is available if torch.cuda.is_available(): model = model.cuda() criterion = criterion.cuda() # + id="kNlNCuVq3Sin" from torch.autograd import Variable def train(epoch, train_x, train_y, val_x, val_y): model.train() tr_loss = 0 # getting the training set x_train, y_train = Variable(train_x), Variable(train_y) # getting the validation set x_val, y_val = Variable(val_x), Variable(val_y) # converting the data into GPU format if torch.cuda.is_available(): x_train = x_train.cuda() y_train = y_train.cuda() x_val = x_val.cuda() y_val = y_val.cuda() # clearing the Gradients of the model parameters optimizer.zero_grad() # prediction for training and validation set output_train = model(x_train) output_val = model(x_val) # computing the training and validation loss loss_train = criterion(output_train, y_train) loss_val = criterion(output_val, y_val) train_losses.append(loss_train) val_losses.append(loss_val) # computing the updated weights of all the model parameters loss_train.backward() optimizer.step() tr_loss = loss_train.item() if epoch%2 == 0: # printing the validation loss print('Epoch : ',epoch+1, '\t', 'loss :', loss_val) # + colab={"base_uri": "https://localhost:8080/"} id="xmTMhu8w3uzt" outputId="9fb193aa-7510-42e7-d1c1-dcf8e78c610f" # defining the number of epochs n_epochs = 100 # empty list to store training losses train_losses = [] # empty list to store validation losses val_losses = [] # training the model for epoch in range(n_epochs): train(epoch, X_train, y_train, X_val, y_val) # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="bWO3Eumu3u55" outputId="d85c492e-1b20-41f0-ff6f-c887dba2a06b" # plotting the training and validation loss plt.plot(train_losses, label='Training loss') plt.plot(val_losses, label='Validation loss') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="humgK9b33u9G" outputId="a36a10f0-64e7-4c66-8910-550dfc878b8d" from sklearn.metrics import accuracy_score from tqdm import tqdm with torch.no_grad(): output = model(X_val.cuda()) softmax = torch.exp(output).cpu() prob = list(softmax.numpy()) predictions = np.argmax(prob, axis=1) # accuracy on training set accuracy_score(y_val, predictions) # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="4y-KTtq83Slp" outputId="00e64f40-ddad-4a41-9272-c0ff0d40bf11" # defining the number of epochs n_epochs = 25 # empty list to store training losses train_losses = [] # empty list to store validation losses val_losses = [] # training the model for epoch in range(n_epochs): train(epoch) # + id="lygoH0pb3Soh" # + id="WVhjxBYP244n" # + id="8uUcC8Ny2484" # + id="vA9fWe8E25AF" # + id="fvw9JJir25C8" # + id="Y4x2pxz725F8" # + id="PLOaKPZ425I_" # + id="gc-ilO8H25L0" # + id="DYAveKAi25Ox" # + id="7HicDIS9Gvkg" def train_model(model, criterion, optimizer, train_loaders, device, num_epochs=50): #, scheduler=onecycle_scheduler): model = model.to(device) # Send model to GPU if available writer = SummaryWriter() # Instantiate TensorBoard iter_num = {'train':0,'val':0} # Track total number of iterations for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Get the input images and labels, and send to GPU if available for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # Zero the weight gradients optimizer.zero_grad() # Forward pass to get outputs and calculate loss # Track gradient only for training data with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # Backpropagation to get the gradients with respect to each weight # Only if in train if phase == 'train': loss.backward() # Update the weights optimizer.step() # Convert loss into a scalar and add it to running_loss running_loss += loss.item() * inputs.size(0) # Track number of correct predictions running_corrects += torch.sum(preds == labels.data) # Iterate count of iterations iter_num[phase] += 1 # Write loss for batch to TensorBoard writer.add_scalar("{} / batch loss".format(phase), loss.item(), iter_num[phase]) # scheduler.step() # Calculate and display average loss and accuracy for the epoch epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # Write loss and accuracy for epoch to TensorBoard writer.add_scalar("{} / epoch loss".format(phase), epoch_loss, epoch) writer.add_scalar("{} / epoch accuracy".format(phase), epoch_acc, epoch) writer.close() return # + colab={"base_uri": "https://localhost:8080/"} id="UdLmj7oNxU3o" outputId="61f86ee5-af4e-41b4-fdc9-dc8d9c083726" # Train the model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Cross entropy loss combines softmax and nn.NLLLoss() in one single class. weights = torch.tensor([1.75, 2.25, 1.]).to(device) criterion_weighted = nn.CrossEntropyLoss(weight=weights) criterion = nn.CrossEntropyLoss() # Define optimizer #optimizer = optim.SGD(net.parameters(), lr=0.001) optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.00001) n_epochs= 10 # For demo purposes. Use epochs>100 for actual training # onecycle_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, # max_lr=0.01, # base_momentum = 0.8, # steps_per_epoch=len(train_loader), # epochs=n_epochs) train_model(net, criterion, optimizer, dataloaders, device, num_epochs=n_epochs) #, scheduler=onecycle_scheduler) # + id="P5TjVx1_GvyB" def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.softmax(logits, dim=0) # print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(2): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # + id="ZD3M-mxzGv22" outputId="06b11fd0-9e9a-4543-da6f-ee499a45949e" colab={"base_uri": "https://localhost:8080/"} # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(net,test_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(2): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) # + id="KatHomsPGv6w" # + id="9toj4OljGv99" # + id="l67afvmlC1Xo" import time def train(model, optimizer, loss_fn, train_dl, val_dl, epochs=100, device='cpu'): print('train() called: model=%s, opt=%s(lr=%f), epochs=%d, device=%s\n' % \ (type(model).__name__, type(optimizer).__name__, optimizer.param_groups[0]['lr'], epochs, device)) history = {} # Collects per-epoch loss and acc like Keras' fit(). history['loss'] = [] history['val_loss'] = [] history['acc'] = [] history['val_acc'] = [] start_time_sec = time.time() for epoch in range(1, epochs+1): # --- TRAIN AND EVALUATE ON TRAINING SET ----------------------------- model.train() train_loss = 0.0 num_train_correct = 0 num_train_examples = 0 for batch in train_dl: optimizer.zero_grad() x = batch[0].to(device) y = batch[1].to(device) yhat = model(x) loss = loss_fn(yhat, y) loss.backward() optimizer.step() train_loss += loss.data.item() * x.size(0) num_train_correct += (torch.max(yhat, 1)[1] == y).sum().item() num_train_examples += x.shape[0] train_acc = num_train_correct / num_train_examples train_loss = train_loss / len(train_dl.dataset) # --- EVALUATE ON VALIDATION SET ------------------------------------- model.eval() val_loss = 0.0 num_val_correct = 0 num_val_examples = 0 for batch in val_dl: x = batch[0].to(device) y = batch[1].to(device) yhat = model(x) loss = loss_fn(yhat, y) val_loss += loss.data.item() * x.size(0) num_val_correct += (torch.max(yhat, 1)[1] == y).sum().item() num_val_examples += y.shape[0] val_acc = num_val_correct / num_val_examples val_loss = val_loss / len(val_dl.dataset) if epoch == 1 or epoch % 10 == 0: print('Epoch %3d/%3d, train loss: %5.2f, train acc: %5.2f, val loss: %5.2f, val acc: %5.2f' % \ (epoch, epochs, train_loss, train_acc, val_loss, val_acc)) history['loss'].append(train_loss) history['val_loss'].append(val_loss) history['acc'].append(train_acc) history['val_acc'].append(val_acc) # END OF TRAINING LOOP end_time_sec = time.time() total_time_sec = end_time_sec - start_time_sec time_per_epoch_sec = total_time_sec / epochs print() print('Time total: %5.2f sec' % (total_time_sec)) print('Time per epoch: %5.2f sec' % (time_per_epoch_sec)) return history # + id="pUFXzc6iLEi_" outputId="16353600-576c-4d78-8471-b3662c5efade" colab={"base_uri": "https://localhost:8080/"} y_flat_num = y_train[np.where(y_train == 2)].size y_down_weight = round((y_flat_num / y_train[np.where(y_train == 0)].size) * 1.2, 3) y_up_weight = round((y_flat_num / y_train[np.where(y_train == 1)].size) * 1.5, 3) print(y_down_weight, y_up_weight, 1) # + id="oRVWOhjhC1ig" outputId="d2e3bb3f-b21a-4c76-817f-d15541feeeee" colab={"base_uri": "https://localhost:8080/"} device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = net.to(device) criterion = nn.CrossEntropyLoss() # weights = torch.tensor([y_down_weight, y_up_weight, 1.]).to(device) # criterion_weighted = nn.CrossEntropyLoss(weight=weights) optimizer = torch.optim.Adam(net.parameters(), lr = 0.001, weight_decay=0.00001) epochs = 20 history = train( model = model, optimizer = optimizer, loss_fn = criterion, train_dl = train_loader, val_dl = test_loader, epochs=epochs, device=device) # + id="NMYpeHE_C1nP" outputId="1ef24f09-3226-454c-e134-5cf9e61140c6" colab={"base_uri": "https://localhost:8080/", "height": 545} import matplotlib.pyplot as plt acc = history['acc'] val_acc = history['val_acc'] loss = history['loss'] val_loss = history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'b', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + id="CidDImELC1q5" def test_model(model,val_loader,device): # Turn autograd off with torch.no_grad(): # Set the model to evaluation mode model = model.to(device) model.eval() # Set up lists to store true and predicted values y_true = [] test_preds = [] # Calculate the predictions on the test set and add to list for data in val_loader: inputs, labels = data[0].to(device), data[1].to(device) # Feed inputs through model to get raw scores logits = model.forward(inputs) #print(f'Logits: {logits}') # Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case) probs = F.softmax(logits) # print(f'Probs after LogSoft: {probs}') # Get discrete predictions using argmax preds = np.argmax(probs.cpu().numpy(),axis=1) # Add predictions and actuals to lists test_preds.extend(preds) y_true.extend(labels) # Calculate the accuracy test_preds = np.array(test_preds) y_true = np.array(y_true) test_acc = np.sum(test_preds == y_true)/y_true.shape[0] # Recall for each class recall_vals = [] for i in range(2): class_idx = np.argwhere(y_true==i) total = len(class_idx) correct = np.sum(test_preds[class_idx]==i) recall = correct / total recall_vals.append(recall) return test_acc, recall_vals # + id="6IwvDf26CP50" outputId="d71d18d6-a8ce-4686-a05c-0bd981878a9c" colab={"base_uri": "https://localhost:8080/"} # Calculate the test set accuracy and recall for each class acc,recall_vals = test_model(model,test_loader,device) print('Test set accuracy is {:.3f}'.format(acc)) for i in range(2): print('For class {}, recall is {}'.format(classes[i],recall_vals[i])) # + id="-gq47-2rC1um" outputId="ad91da67-946a-49b8-e8ad-242d3fcaa2a3" colab={"base_uri": "https://localhost:8080/", "height": 782} from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): """ given a sklearn confusion matrix (cm), make a nice plot Arguments --------- cm: confusion matrix from sklearn.metrics.confusion_matrix target_names: given classification classes such as [0, 1, 2] the class names, for example: ['high', 'medium', 'low'] title: the text to display at the top of the matrix cmap: the gradient of the values displayed from matplotlib.pyplot.cm see http://matplotlib.org/examples/color/colormaps_reference.html plt.get_cmap('jet') or plt.cm.Blues normalize: If False, plot the raw numbers If True, plot the proportions Usage ----- plot_confusion_matrix(cm = cm, # confusion matrix created by # sklearn.metrics.confusion_matrix normalize = True, # show proportions target_names = y_labels_vals, # list of names of the classes title = best_estimator_name) # title of graph Citiation --------- http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """ import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / np.sum(cm).astype('float') misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() nb_classes = 2 # Initialize the prediction and label lists(tensors) predlist=torch.zeros(0,dtype=torch.long, device='cpu') lbllist=torch.zeros(0,dtype=torch.long, device='cpu') with torch.no_grad(): for i, (inputs, classes) in enumerate(dataloaders['val']): # print(inputs) inputs = inputs.to(device) classes = classes.to(device) outputs = model.forward(inputs) #print(outputs) _, preds = torch.max(outputs, 1) # Append batch prediction results predlist=torch.cat([predlist,preds.view(-1).cpu()]) lbllist=torch.cat([lbllist,classes.view(-1).cpu()]) # Confusion matrix conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy()) plot_confusion_matrix(conf_mat, [0,1]) from sklearn.metrics import precision_score precision_score(lbllist.numpy(), predlist.numpy(), average='weighted') from sklearn.metrics import classification_report print(classification_report(lbllist.numpy(), predlist.numpy(), target_names=["down","up"], digits=4)) # + id="Qii8iha3QCuD" # + id="Yr3OmJwIQCxo" # + id="_4PW1DAfQC1M" # + id="4scWYtShQC4O" # + id="qC_czvrtQC7l" # + id="sD-NBQzbQC-u" # + id="fVjXxkcJQDB0"
1D_CNN_Attempts/1D_CNN_asof_111312FEB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import geopandas as gpd from IPython.display import HTML, display import matplotlib.pyplot as plt # - # This notebook joins and exports DFPS datasets for manual review of violations for potential injuries and other characteristics. It uses data exported from the state data portal on Feb 1, 2018 and searches keywords in the violation narrative field for possible injuries. # # - [non-compliance](https://data.texas.gov/Social-Services/DFPS-CCL-Non-Compliance-Data/tqgd-mf4x) # - [operations](https://data.texas.gov/Social-Services/DFPS-CCL-Daycare-and-Residential-Operations-Data/bc5r-88dy) # - [inspections](https://data.texas.gov/Social-Services/DFPS-CCL-Inspection-Investigation-Assessment-Data/m5q4-3y3d) non_compliance = pd.read_csv('../src/dfps-2018-02-01/non_compliance.csv').drop('Unnamed: 0', axis=1) operations = pd.read_csv('../src/dfps-2018-02-01/operations.csv').drop('Unnamed: 0', axis=1) inspections = pd.read_csv('../src/dfps-2018-02-01/assessment.csv').drop('Unnamed: 0', axis=1) # + import re # Get potential injury violations keywords = [ 'hurt', 'bruis', 'injur', 'fractur', 'conscious', 'lacerat' ] keywords_high = [ 'burn(?! cream)', 'sprain', 'concuss', 'seizure', 'vomit', 'tooth (?!brush|paste)', 'teeth ', 'responsiv', ] POTENTIAL_INJURY_KEYWORDS = '|'.join(keywords) POTENTIAL_INJURY_KEYWORDS_HIGH = '|'.join(keywords_high) potential_injury_violations = pd.concat([ non_compliance[ non_compliance.STANDARD_NUMBER_DESCRIPTION.str.contains( 'injur', regex=True, flags=re.IGNORECASE ) | non_compliance.NARRATIVE.str.contains( POTENTIAL_INJURY_KEYWORDS, regex=True, flags=re.IGNORECASE ) ], non_compliance[ non_compliance.NARRATIVE.fillna('').str.contains( POTENTIAL_INJURY_KEYWORDS_HIGH, regex=True, flags=re.IGNORECASE ) ].query( 'STANDARD_RISK_LEVEL == "High" | STANDARD_RISK_LEVEL == "Medium High"' ), non_compliance[ non_compliance.NARRATIVE.astype(str).str.contains( 'brok|break(?!fast)', regex=True, flags=re.IGNORECASE ) ].query('STANDARD_RISK_LEVEL == "High"') ]) # + # Get all violations from inspections # where a potential injury occurred. potential_injury_violations_and_related = non_compliance[ non_compliance.ACTIVITY_ID.isin( potential_injury_violations.ACTIVITY_ID.unique() ) ] # - records_for_manual_review = potential_injury_violations_and_related.merge( operations, on='OPERATION_ID' ).merge( inspections.drop( 'OPERATION_ID', axis=1 ), on='ACTIVITY_ID' ).assign( # add a column to signify if violation has a potential injury potential_injury = lambda x: x.STANDARD_NUMBER_DESCRIPTION.str.contains( 'injur', regex=True, flags=re.IGNORECASE ) | x.NARRATIVE.str.contains( POTENTIAL_INJURY_KEYWORDS, regex=True, flags=re.IGNORECASE ) | x.NARRATIVE.str.contains( POTENTIAL_INJURY_KEYWORDS_HIGH, regex=True, flags=re.IGNORECASE ) ).query( 'OPERATION_TYPE != "Child Placing Agency" & OPERATION_TYPE != "General Residential Operation"' ).where( # Only take 2016-02-01 to 2018-02-01 lambda x: pd.to_datetime(x.ACTIVITY_DATE) >= pd.to_datetime('2016-02-01') ) # This code below analyzes day cares and inspections with violations that were manually determined to have involved injuries. # # The `dat_injure` dataframe has a row for every injury detail and related violation for every operation. Because of this, records are by `operation_id` when counting day cares. dat_injure = pd.read_csv('../src/manual-injuries-mapped.csv') HTML( '<p><strong>{:,}</strong> inspections with an injury</p>'.format( len(dat_injure.activity_id.unique()) ) ) HTML( '<p><strong>{:,}</strong> inspections with an injury not reported to parents or licensing</p>'.format( len( dat_injure.query( 'related_violation == "Failure To Report Incident"' ).activity_id.unique() ) ) ) HTML( '<p><strong>{:,}</strong> inspections with an injury that required medical care</p>'.format( len( dat_injure.query( 'injury_detail == "Medical care"' ).activity_id.unique() ) ) ) HTML( '<p><strong>{:,}</strong> inspections with an injury and failure to supervise properly</p>'.format( len( dat_injure.query( 'related_violation == "Supervision"' ).activity_id.unique() ) ) ) HTML( '<p><strong>{:,}</strong> inspections with an injury intentionally caused by daycare caregivers</p>'.format( len( dat_injure.query( 'injury_detail == "Intentional"' ).activity_id.unique() ) ) )
notebooks/manual_injury_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # How to use joblib.Memory # # This example illustrates the usage of :class:`joblib.Memory` with both # functions and methods. # # ## Without :class:`joblib.Memory` # # ``costly_compute`` emulates a computationally expensive process which later # will benefit from caching using :class:`joblib.Memory`. # # # + import time import numpy as np def costly_compute(data, column_index=0): """Simulate an expensive computation""" time.sleep(5) return data[column_index] # - # Be sure to set the random seed to generate deterministic data. Indeed, if the # data is not deterministic, the :class:`joblib.Memory` instance will not be # able to reuse the cache from one run to another. # # # + rng = np.random.RandomState(42) data = rng.randn(int(1e5), 10) start = time.time() data_trans = costly_compute(data) end = time.time() print('\nThe function took {:.2f} s to compute.'.format(end - start)) print('\nThe transformed data are:\n {}'.format(data_trans)) # - # ## Caching the result of a function to avoid recomputing # # If we need to call our function several time with the same input data, it is # beneficial to avoid recomputing the same results over and over since it is # expensive. :class:`joblib.Memory` enables to cache results from a function # into a specific location. # # # + from joblib import Memory location = './cachedir' memory = Memory(location, verbose=0) def costly_compute_cached(data, column_index=0): """Simulate an expensive computation""" time.sleep(5) return data[column_index] costly_compute_cached = memory.cache(costly_compute_cached) start = time.time() data_trans = costly_compute_cached(data) end = time.time() print('\nThe function took {:.2f} s to compute.'.format(end - start)) print('\nThe transformed data are:\n {}'.format(data_trans)) # - # At the first call, the results will be cached. Therefore, the computation # time corresponds to the time to compute the results plus the time to dump the # results into the disk. # # # + start = time.time() data_trans = costly_compute_cached(data) end = time.time() print('\nThe function took {:.2f} s to compute.'.format(end - start)) print('\nThe transformed data are:\n {}'.format(data_trans)) # - # At the second call, the computation time is largely reduced since the results # are obtained by loading the data previously dumped to the disk instead of # recomputing the results. # # # ## Using :class:`joblib.Memory` with a method # # :class:`joblib.Memory` is designed to work with functions with no side # effects. When dealing with class, the computationally expensive part of a # method has to be moved to a function and decorated in the class method. # # # + def _costly_compute_cached(data, column): time.sleep(5) return data[column] class Algorithm(object): """A class which is using the previous function.""" def __init__(self, column=0): self.column = column def transform(self, data): costly_compute = memory.cache(_costly_compute_cached) return costly_compute(data, self.column) transformer = Algorithm() start = time.time() data_trans = transformer.transform(data) end = time.time() print('\nThe function took {:.2f} s to compute.'.format(end - start)) print('\nThe transformed data are:\n {}'.format(data_trans)) # + start = time.time() data_trans = transformer.transform(data) end = time.time() print('\nThe function took {:.2f} s to compute.'.format(end - start)) print('\nThe transformed data are:\n {}'.format(data_trans)) # - # As expected, the second call to the ``transform`` method load the results # which have been cached. # # # ## Clean up cache directory # # memory.clear(warn=False)
python/memory_basic_usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <div align="center">Introduction to Tensorflow</div> # --------------------------------------------------------------------- # # you can Find me on Github: # > ###### [ GitHub](https://github.com/lev1khachatryan) # # # Tensorflow is an open source numerical computing library for implementing production-ready machine learning models. It was originally developed by Google Brain team. Since it’s release it has been steadily taking over other deep learning libraries like Theano or Caffe. # # Several high level user-friendly deep learning libraries like Keras, TfLearn have been built on top of tensorflow already. It’s flexible architecture allows users to create and deploy machine learning and deep learning models in CPU, GPU, distributed machines and even mobile devices. It also supports interfaces for many programming languages including Python and C++. # # You can use Tensorflow Playground to see an implementation of a neural network in playground for practical experience. Google is using Tensorflow for search ranking, computer vision (Inception model), speech recognition, Youtube recommendations, machine translation for Google translate and in many other areas. # # In this notebook , we'll explore tensorflow and work on a regression problem to predict Airbnb rental listing prices from Boston Airbnb Open Data. We'll learn about basic concepts of tensorflow like ***Tensors*** and ***computational graph***, learn how to execute simple programs and implement a linear regression model from scratch first. Then we will learn how to use the high level estimator API for quickly building and testing models on the Airbnb data. # # ## Tensorflow fundamentals # # Tensorflow provides multiple API’s in different level of abstraction. The high level API’s like Keras, tf.estimator are built on top of the tensorflow core functionalities. Tensorflow estimators have been integrated with the core Tensorflow like Keras. # # For users who just want to use the common models, Tensorflow provides pre-made estimators or “Canned Estimators” which refer to implementations of common machine learning models. We will use the canned estimators in this tutorial to predict airbnb rental prices with linear regression and learn how to use the estimator API. However, before using estimators we’ll go through the basic building blocks in tensorflow. # # We can see how different parts fit into tensorflow architecture in this diagram: # # <img src='assets/20190918/1.png'> # # # ## Tensors and Computational Graphs # # Tensors are the central unit of data in tensorflow. Tensors are like numpy arrays, we can conceptually think of them as the n-dimensional abstraction of matrices. A zero-dimensional tensor is a scalar or a constant. A 1-dimensional tensor is a list or vector. A 2-D tensor is same as a n x m matrix where n = rows and m = columns. Above that we can just say n-dimensional tensors. # These tensors are passed to operations that perform computations on them. An ***operation*** is commonly known as an ***op*** . Operations take zero or more tensors as inputs, performs computation with them and outputs zero or more tensors. For example, an operation like ***tf.add*** may take two inputs 3 and 5 and output their summation 8. # The tensors and the operations are connected to each other in a computational graph. A computational graph is defined by considering the operations as nodes and the tensors as edges. The operations are mathematical operations that are done on data and the data is passed to the operations with the tensors. # We can visualize a computational graph like this. # # <img src='assets/20190918/2.png'> # # Here we have two input tensors that pass the constants 5 and 3 to the add operation and the operation outputs 8. # # # ## Constants and Running a Session : # import tensorflow as tf print(tf.__version__) # We will define two constant tensors a and b with tf.constant with constants 5 and 3 and add them up with tf.add as shown in the computational graph. a = tf.constant(5,name = "a") b = tf.constant(3, name = "b") result = tf.add(a,b,name='add_a_b') result # Unfortunately enough, our code has not produced the expected output. We can think of tensorflow core programs as having two distinct sections, first we have to define a computational graph that specifies the computations we want to do, then we have to run the code to get our actual results. We have defined our computational graph in this case, but we have not run the graph yet. # To evaluate result and get the output we have to run the code under a ***session***. A session takes a computational graph or part of a graph and executes it. It also holds the intermediate values and the results of performing the computation. We can create an instance of a session object from ***tf.Session class***. sess = tf.Session() sess.run(result) # # # ## Variables and Placeholders # # Since in machine learning we want to update the paramaters of the models when training, simply using constants whose values don't change is not enough, we need some mechanism to add trainable parameters to the computational graph. In Tensorflow we accomplish it using ***variables***. Variables require us to specify an ***initial value*** and the ***data type***. We create variables with the ***tf.variable*** op. # A graph can also be fed ***external inputs*** using ***placeholders*** so that we can feed arbitrary number of inputs from the training sets to the model. Placeholders act like tensor objects that do not have their values specified and are created using the op ***tf.placeholder***. # ***Placeholder values must be fed when we run them***. We can use ***sess.run method's feed_dict argument*** to feed the values to the placeholders. We have to specifiy the shape and datatype of the placeholder when we add them to the graph. A shape of None indicates that the placeholder can get any arbitrary input. # ***feed_dict*** is just a dictionary that maps graph elements like variables,constants or placeholders to values. We use it to overwrite the existing values of tensors. We can also use it to change variable values when running them. c = tf.Variable(3,dtype=tf.float32) d = tf.placeholder(dtype = tf.float32,shape=None) # Unlike constants, variables are not initialized when we call ***tf.Variable***. We'll have to run a special operation called ***tf.global_variables_initializer*** to initialize the variables by a session. sess.run(tf.global_variables_initializer()) print(sess.run(c,feed_dict = {c:14})) print(sess.run(d,feed_dict = {d:[1,1,3,5]})) # The variable c was initialized with 3, but we have changed it to 14 with the feed_dict parameter. The placeholders didn't have any specified value when we initialized it, but we fed it a list of values when we ran the code. # # # ## Tensorboard # # Tensorboard is a visualization tool that comes packaged with tensorflow.It’s very useful to visualize large scale machine learning models to debug them and understand what’s going on under the hood. With tensorboard we can also track our loss metrics and other values to see how they are changing over training steps. # # For using tensorboard, we can save our graphs with by writing summaries about them with summary writers. Summaries are like condensed information about models. Tensorboard creates visualizations out of this information. # # We have to pass the directory name where our graph log files will be saved and the computational graph we want to save into the summary writer object when calling it.. ***sess.graph*** contains the default computational graph for this session and writer writes it into the directory provided in logdir parameter. # # writer = tf.summary.FileWriter(logdir= "../first_graph",graph = sess.graph) writer.close() # + # tensorboard --logdir=first_graph # - # Tensorboard’s default port is 6006. So if you go to http://localhost:6006/#graphs tensorboard will be there. # # # # ## Linear Regression From Scratch : # # In a simple dataset with only one feature and one output to predict, the form of the equation looks like # <img src='assets/20190918/4.png'> # We can see that for different values of input X we can get the predictions by using the equation. We can also visualize it. # <img src='assets/20190918/5.png'> # We try to find out the best possible value for the weight and bias parameters using optimization technique against a loss function in order to fit a line through the data using the weight and the bias parameter when it comes to single feature.Loss functions tell us how good our predicted value is compared to the actual output. If we have n features the general equation is like this. # <img src='assets/20190918/6.png'> # # # # ## Introducing Tensorflow Estimators # # As mentioned earlier, estimators is a high level API integrated with Tensorflow that allows us to work with pre-implemented models and provides tools for quickly creating new models as need by customizing them. The interface is loosely scikit-learn inspired and follows a train-evaluate-predict loop similar to scikit-learn. Estimators is the base class, canned estimators or pre-implemented models are the sub-class. We are using the canned estimators in this notebook. # Estimators deal with all the details of creating computational graphs, initializing variables, training the model and saving checkpoint and logging files for Tensorboard behind the scene. But to work with the estimators, we’ve to become comfortable with two new concepts, ***feature columns*** and ***input functions***. Input functions are used for passing input data to the model for training and evaluation. Feature columns are specifications for how the model should interpret the input data. We will cover the feature columns and input function in detail in the later sections. # # Our general workflow will be to follow these steps : # # * Loading the libraries and dataset. # # # * Data proprocessing. # # # * Defining the feature columns. # # # * Building input function. # # # * Model instantiation, training and evaluation. # # # * Generating prediction. # # # * Visualizing the model and the loss metrics using Tensorboard. import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf plt.style.use("seaborn-colorblind") # %matplotlib inline # only displays the most important warnings tf.logging.set_verbosity(tf.logging.FATAL) used_features = ['property_type','room_type','bathrooms','bedrooms','beds','bed_type','accommodates','host_total_listings_count' ,'number_of_reviews','review_scores_value','neighbourhood_cleansed','cleaning_fee','minimum_nights','security_deposit','host_is_superhost', 'instant_bookable', 'price'] boston = pd.read_csv('inputs/boston-airbnb-open-data/listings.csv', usecols = used_features) print(boston.shape) boston.head(2) # + for feature in ["cleaning_fee","security_deposit","price"]: boston[feature] = boston[feature].map(lambda x:x.replace("$",'').replace(",",''),na_action = 'ignore') boston[feature] = boston[feature].astype(float) boston[feature].fillna(boston[feature].median(),inplace = True) for feature in ["bathrooms","bedrooms","beds","review_scores_value"]: boston[feature].fillna(boston[feature].median(),inplace = True) boston['property_type'].fillna('Apartment',inplace = True) # - boston["price"].plot(kind = 'hist',grid = True) plt.title("Price histogram before subsetting and log-transformation"); boston = boston[(boston["price"]>50)&(boston["price"]<500)] target = np.log(boston.price) target.hist() plt.title("Price distribution after the subsetting and log-transformation"); features = boston.drop('price',axis=1) features.head() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( features, target, test_size=0.33, random_state=42) # Get all the numeric feature names numeric_columns = ['host_total_listings_count','accommodates','bathrooms','bedrooms','beds', 'security_deposit','cleaning_fee','minimum_nights','number_of_reviews', 'review_scores_value'] # Get all the categorical feature names that contains strings categorical_columns = ['host_is_superhost','neighbourhood_cleansed','property_type','room_type','bed_type','instant_bookable'] numeric_features = [tf.feature_column.numeric_column(key = column) for column in numeric_columns] print(numeric_features[0]) Property_type = ["apartment","condo","apartment","villa","house","house"] gender_column = tf.feature_column.categorical_column_with_vocabulary_list(key = "Property_type", vocabulary_list = ["house", "apartment", "condo", "villa"] ) categorical_features = [tf.feature_column.categorical_column_with_vocabulary_list(key = column, vocabulary_list = features[column].unique()) for column in categorical_columns] print(categorical_features[3]) linear_features = numeric_features + categorical_features # Create training input function training_input_fn = tf.estimator.inputs.pandas_input_fn(x = X_train, y=y_train, batch_size=32, shuffle= True, num_epochs = None) # create testing input function eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=X_test, y=y_test, batch_size=32, shuffle=False, num_epochs = 1) linear_regressor = tf.estimator.LinearRegressor(feature_columns=linear_features, model_dir = "linear_regressor") linear_regressor.train(input_fn = training_input_fn,steps=2000) linear_regressor.evaluate(input_fn = eval_input_fn) # print("Loss is " + str(loss)) pred = list(linear_regressor.predict(input_fn = eval_input_fn)) pred = [p['predictions'][0] for p in pred] prices = np.exp(pred) print(prices) # # #
Lectures/2019.09.18. Part 1 - Intro to Tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import os, sys, tarfile import numpy as np import matplotlib.pyplot as plt import urllib from PIL import Image from keras.optimizers import RMSprop, SGD from keras.models import load_model import keras.backend as K import data_preprocessing, data_generator, visual, label_parser, data_postprocessing, evaluator import pdb import tensorflow as tf #os.environ["CUDA_VISIBLE_DEVICES"]="1" config = tf.ConfigProto() #to limit TensorFlow resources config.gpu_options.allow_growth = True tf.Session(config = config) # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - TEST_PATH = '../power/' test_ann = label_parser.parse_annotation_json(TEST_PATH, 'via_region_data.json') print("len(test_ann) = ", len(test_ann)) size = 300 config_test = {'grid_w':9, 'grid_h':9, 'img_w' : size, 'img_h' : size, 'is_augment' : False, 'batch_size' : len(test_ann)} GRID_W = config_test['grid_w'] GRID_H = config_test['grid_h'] input_w = config_test['img_w'] input_h = config_test['img_h'] testGenerator = data_generator.BatchGenerator(test_ann, config_test) import tensorflow as tf def YOLO_loss(y_true, y_pred): mask_obj = K.expand_dims(y_true[..., 0], axis=-1) mask_no_obj = 1 - mask_obj loss_obj = K.sum(K.square(y_true-y_pred)*mask_obj) #Loss_xywhp for object loss_no_obj = K.sum(K.square(y_true[...,0]-y_pred[...,0])*mask_no_obj[...,0] ) #only confidence N_obj = tf.reduce_sum(tf.to_float(mask_obj > 0.0)) N_no_obj = tf.reduce_sum(tf.to_float(mask_no_obj > 0.0)) return loss_obj / (N_obj + 1e-06) + 5 * loss_no_obj / (N_no_obj + 1e-06) #because only 1 from 5 values (p,x,y,w,h) from keras.utils.generic_utils import get_custom_objects from keras.models import load_model get_custom_objects().update({"YOLO_loss": YOLO_loss}) model = load_model("YOLO_Udacity", custom_objects={ 'loss': YOLO_loss } ) def add_chunk(indices, y_GT, y_pred): x_chunk, y_chunk = testGenerator.get_XY(indices) y_GT += [y_chunk] y_pred += [model.predict(x_chunk)] y_GT = [] y_pred = [] # we add to 'y_pred' data by chunk because of memory limitation chunk_size = 50 # we get 'chunk_size' images and forward pass the model on them N = len(test_ann) # images number for to evaluaet mAP num_of_chunk = int(N / chunk_size) for i in range(num_of_chunk): indices = np.array(range(i*chunk_size, (i+1)*chunk_size)) add_chunk(indices, y_GT, y_pred) print("predicted images (" + str(i*chunk_size) + " - " + str((i+1)*chunk_size) + ")") if num_of_chunk * chunk_size < N: indices = np.array(range(num_of_chunk * chunk_size, N )) add_chunk(indices, y_GT, y_pred) print("predicted images (" + str(num_of_chunk * chunk_size) + " - " + str(N) + ")") #merge chunks to shape : y[i] = output y_pred_reshaped = np.zeros((N, GRID_W, GRID_H, 4+1) ) y_GT_reshaped = np.zeros( (N, GRID_W, GRID_H, 4+1) ) y_pred = np.array(y_pred) y_GT = np.array(y_GT) last_ind = 0 for i in range(len(y_pred)): size = len(y_pred[i]) y_pred_reshaped[last_ind:last_ind+size] = y_pred[i][:] y_GT_reshaped[last_ind:last_ind+size] = y_GT[i][:] last_ind += size y_pred_reshaped.shape decoder = data_postprocessing.Decoder(GRID_W, GRID_H, input_w, input_h, 0.0) pred_rects_decoded, scores = decoder.decode(y_pred_reshaped) GT_rects_decoded = decoder.decode_GT(y_GT_reshaped) scores[3] # + pred_boxes = {} gt_boxes = {} for i in range(len(scores) ): pred_boxes[str(i)] = {} pred_boxes[str(i)]['boxes'] = pred_rects_decoded[i] pred_boxes[str(i)]['scores'] = scores[i] gt_boxes[str(i)] = GT_rects_decoded[i] res = evaluator.get_avg_precision_at_iou(gt_boxes,pred_boxes, 0.5) ap = res['avg_prec'] prec = res['precisions'] recall = res['recalls'] print("ap = %.3f"%ap) # - indices = np.array(range(20)) x_batch, y_batch = testGenerator.get_XY(indices) decoder = data_postprocessing.Decoder(GRID_W, GRID_H, input_w, input_h, 0.5) GT_rects_to_show = decoder.decode_GT(y_batch) pred_rects_to_show, scores_to_show = decoder.decode(y_pred_reshaped[indices]) # + img_num = 3 test_image = x_batch[img_num]*255.0 GT_rects = GT_rects_to_show[img_num] pred_rects = pred_rects_to_show[img_num] plt.rcParams['figure.figsize'] = 10, 10 #plt.imshow(visual.draw_image_pred_GT(test_image, rects_pred = pred_rects) ) plt.imshow(visual.draw_image_pred_GT(test_image, GT_rects, pred_rects) ) #plt.imshow(visual.draw_image_pred_GT(test_image, GT_rects) ) # + import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = 20, 15 plt.rcParams['lines.markersize'] = 30 plt.rcParams['lines.linewidth'] = 5 fig, ax = plt.subplots() for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(26) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(26) # summarize history for accuracy plt.plot(recall, prec) plt.title('model accuracy', fontsize = 26) plt.ylabel('precision', fontsize = 26) plt.xlabel('recall', fontsize = 26) plt.show() # -
evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # %config InlineBackend.figure_format = 'svg' from matplotlib import pyplot as plt from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import numpy as np N = 100 R = 10 t = np.linspace(0, 2*np.pi, N) def P(z): return z**2 - z + 10+10j @interact(R=(0,10,0.2), θ=(0,2*np.pi,0.1)) def fta(R, θ): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) ax1.grid() ax2.grid() ax1.set_xlim(-10, 10) ax1.set_ylim(-10, 10) ax2.set_xlim(-100, 100) ax2.set_ylim(-100, 100) z = R * np.exp(1j*t) w = P(z) ax1.plot(z.real, z.imag) ax2.plot(w.real, w.imag) zθ = R*np.exp(1j*θ) wθ = P(zθ) ax1.plot(zθ.real, zθ.imag, 'or') ax2.plot(wθ.real, wθ.imag, 'or')
FTA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="8YScmtTOkV2F" colab={"base_uri": "https://localhost:8080/"} outputId="53329dec-8e2b-48ea-b71b-ccec772aaec2" try: import pickle5 as pickle except: # !pip install pickle5 import pickle5 as pickle import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, GlobalMaxPooling1D from keras.layers import Conv1D, MaxPooling1D, Embedding, Flatten from keras.models import Model from sklearn.metrics import roc_auc_score,roc_curve, auc from numpy import random from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout from keras.optimizers import Adam from keras.utils.vis_utils import plot_model import sys sys.path.insert(0,'/content/drive/MyDrive/ML_Data/') import functions as f # + id="n2DrT3ZqkXfy" def load_data(randomize=False): try: with open("/content/drive/MyDrive/ML_Data/hyppi-train.pkl", "rb") as fh: df_train = pickle.load(fh) except: df_train = pd.read_pickle("C:/Users/nik00/py/proj/hyppi-train.pkl") try: with open("/content/drive/MyDrive/ML_Data/hyppi-independent.pkl", "rb") as fh: df_test = pickle.load(fh) except: df_test = pd.read_pickle("C:/Users/nik00/py/proj/hyppi-independent.pkl") if randomize: return shuff_together(df_train,df_test) else: return df_train,df_test df_train,df_test = load_data() print('The data used will be:') df_train[['Joined']] # + colab={"base_uri": "https://localhost:8080/", "height": 402} id="TiWWGqXtknZa" executionInfo={"status": "ok", "timestamp": 1617823354927, "user_tz": 240, "elapsed": 12619, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMwPlnKw7I-jN5XkbRlGeMro_PH58tNaE2oWC0dw=s64", "userId": "16675707467486783174"}} outputId="469d804b-3349-46cb-cdbc-567a045e20f6" rows = df_train['Joined'].shape[0] lengths = sorted(len(s) for s in df_train['Joined']) print("Median length of Joined sequence is",lengths[len(lengths)//2]) _ = f.sns.displot(lengths) _=plt.title("Most Joined sequences seem to be less than 2000 in length") # + colab={"base_uri": "https://localhost:8080/", "height": 836} id="pQTTkvu5ksZi" executionInfo={"status": "ok", "timestamp": 1617823629121, "user_tz": 240, "elapsed": 21224, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMwPlnKw7I-jN5XkbRlGeMro_PH58tNaE2oWC0dw=s64", "userId": "16675707467486783174"}} outputId="9adbc8c8-aa04-4ffc-928c-dc32b2eebc6e" data_1D_join,data_test_1D_join,num_words_1D_join,MAX_SEQUENCE_LENGTH_1D,MAX_VOCAB_SIZE_1D = f.get_seq_data_join(1000,1500,df_train,df_test,pad='pre',show=True) # + id="Q1aRS1Jkkxv5" EMBEDDING_DIM_1D = 5 VALIDATION_SPLIT = 0.2 BATCH_SIZE = 128 EPOCHS = 10 M_1D=10 # embedding_matrix = random.uniform(-1, 1,(num_words_1D_join,EMBEDDING_DIM_1D)) # embedding_layer = Embedding( # num_words_1D_join, # EMBEDDING_DIM_1D, # weights=[embedding_matrix], # input_length=MAX_SEQUENCE_LENGTH_1D, # trainable=True # ) # MAX_SEQUENCE_LENGTH_1D # + colab={"base_uri": "https://localhost:8080/"} id="yuCxJBLr3A6V" executionInfo={"status": "ok", "timestamp": 1617823554389, "user_tz": 240, "elapsed": 17440, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMwPlnKw7I-jN5XkbRlGeMro_PH58tNaE2oWC0dw=s64", "userId": "16675707467486783174"}} outputId="36a2aaf6-1b88-4<PASSWORD>-ce9a-5<PASSWORD>" ip = Input(shape=(MAX_SEQUENCE_LENGTH_1D,)) x = Embedding(num_words_1D_join, EMBEDDING_DIM_1D, input_length=MAX_SEQUENCE_LENGTH_1D,trainable=True)(ip) x = Conv1D(32, 3, activation='relu')(x) x = Dropout(0.2)(x) x = MaxPooling1D(3)(x) x= f.Flatten()(x) x = Dropout(0.2)(x) # x = Conv1D(128, 3, activation='relu')(x) # x = MaxPooling1D(3)(x) # x = Conv1D(128, 3, activation='relu')(x) #x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) x = Dropout(0.2)(x) output = Dense(1, activation='sigmoid')(x) model1D_CNN_join = Model(ip, output) model1D_CNN_join.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # plot_model(model1D_CNN_join, to_file='model_plot.png', show_shapes=True, show_layer_names=False) model1D_CNN_join.fit(data_1D_join, df_train['label'].values, epochs=EPOCHS, validation_data=(data_test_1D_join,df_test['label'].values)) print(roc_auc_score(df_test['label'].values, model1D_CNN_join.predict(data_test_1D_join))) # + id="t3ez_cE_OP4N" # + id="I6W4_StOOP7n" # + id="0TJV_JP-OP-r"
2. CNN/1D_join.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial 2: Working with Approximate Numbers # # Welcome to TenSEAL's third tutorial, where we will show how to use the library for operations on encrypted real numbers. We will also present another use case for encrypted evaluations over convolutions. # # This tutorial is inspired by the "Introduction to CKKS" talk at [Microsoft Private AI Bootcamp](https://www.microsoft.com/en-us/research/event/private-ai-bootcamp). # # We recommend checking out the other tutorials first: # - ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb). # - ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb). # # Authors: # - <NAME> - Twitter: [@y0uben11](https://twitter.com/y0uben11) # - <NAME> - Twitter: [@bcebere](https://twitter.com/bcebere) # ## Introduction # # # # TenSEAL is a library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes. # # # In this tutorial, we will briefly introduce and explain the CKKS scheme, highlighting its advantages. For more in-depth explanations, be sure to check the excellent "CKKS explained" series: # # - ['Part 1, Vanilla Encoding and Decoding'](https://blog.openmined.org/ckks-explained-part-1-simple-encoding-and-decoding/). # - ['Part 2, Full Encoding and Decoding'](https://blog.openmined.org/ckks-explained-part-2-ckks-encoding-and-decoding/). # - ['Part 3, Encryption and Decryption'](https://blog.openmined.org/ckks-explained-part-3-encryption-and-decryption/). # - ['Part 4, Multiplication and Relinearization'](https://blog.openmined.org/ckks-explained-part-4-multiplication-and-relinearization/). # - ['Part 5, Rescaling'](https://blog.openmined.org/ckks-explained-part-5-rescaling/). # # # ## Theory: CKKS scheme # # __Definition__ : Cheon-Kim-Kim-Song(CKKS) is a scheme for Leveled Homomorphic Encryption that supports approximate arithmetics over complex numbers (hence, real numbers). # # # # A high-level overview of the CKKS scheme is presented in the following diagram: # # <img src="https://blog.openmined.org/content/images/2020/08/Cryptotree_diagrams-2.svg" alt="ckks-high-level" width="600"/> # # ## Theory: CKKS Parameters # # #### The scaling factor # The first step of the CKKS scheme is encoding a vector of real numbers into a plaintext polynomial. # # # The scaling factor defines the encoding precision for the binary representation of the number. Intuitively, we are talking about binary precision as pictured below: # # # # <img src="assets/floating_point.png" alt="ckks-high-level" width="400"/> # # #### The polynomial modulus degree(poly_modulus_degree) # # The polynomial modulus($N$ in the diagram) directly affects: # - The number of coefficients in plaintext polynomials # - The size of ciphertext elements # - The computational performance of the scheme (bigger is worse) # - The security level (bigger is better). # # In TenSEAL, as in Microsoft SEAL the degree of the polynomial modulus must be a power of 2 (e.g. $1024$, $2048$, $4096$, $8192$, $16384$, or $32768$). # # #### The coefficient modulus sizes # # The last parameter required for the scheme is a list of binary sizes. # Using this list, SEAL will generate a list of primes of those binary sizes, called the coefficient modulus($q$ in the diagram). # # The coefficient modulus directly affects: # - The size of ciphertext elements # - The length of the list indicates the level of the scheme(or the number of encrypted multiplications supported). # - The security level (bigger is worse). # # In TenSEAL, as in Microsoft SEAL each of the prime numbers in the coefficient modulus must be at most 60 bits and must be congruent to 1 modulo 2*poly_modulus_degree. # ## Theory: CKKS Keys # # #### The secret key # The secret key is used for decryption. DO NOT SHARE IT. # # #### The public encryption key # The key is used for encryption in the public key encryption setup. # # #### The relinearization keys # Every new ciphertext has a size of 2, and multiplying ciphertexts of sizes $K$ and $L$ results in a ciphertext of size $K+L-1$. Unfortunately, this growth in size slows down further multiplications and increases noise growth. # # Relinearization is the operation that reduces the size of ciphertexts back to 2. This operation requires another type of public keys, the relinearization keys created by the secret key owner. # # The operation is needed for encrypted multiplications. The plain multiplication is fundamentally different from normal multiplication and does not result in ciphertext size growth. # # #### The Galois Keys(optional) # Galois keys are another type of public keys needed to perform encrypted vector rotation operations on batched ciphertexts. # # One use case for vector rotations is summing the batched vector that is encrypted. # ## Theory: CKKS internal operations # These operations are automatically executed by TenSEAL, unless the user opts-out. # # #### Relinearization # The operation is executed automatically by TenSEAL after each encrypted multiplication. # # The operations relinearizes a ciphertext, reducing its size down to $2$. If the size of encrypted ciphertext is $K+1$, the given relinearization keys need to have the size at least $K-1$. # # #### Rescaling # The operation is executed automatically by TenSEAL after each encrypted or plain multiplication. # # The approximation error exponentially grows up on the number of homomorphic multiplications. # To overcome this problem, most HE schemes usually use a modulus-switching technique. In the case of CKKS, the modulus-switching procedure is called rescaling. Applying the rescaling algorithm after a homomomorphic multiplication, the approximation error grows linearly, not exponentially. # # Given a ciphertext encrypted modulo $q_1...q_k$, this function switches the modulus down to $q_1...q_{k-1}$, scales the message down accordingly. # # This step consumes one prime from the coefficient modulus. And when you consume all of them, you won't be able to perform more multiplications. # ## Setup # # All modules are imported here. Make sure everything is installed by running the cell below. # + import torch from torchvision import transforms from random import randint import pickle from PIL import Image import numpy as np from matplotlib.pyplot import imshow from typing import Dict import tenseal as ts # - # ## TenSEAL CKKS Context # # The first step is to create a CKKS TenSEAL context. # # One potential example is # ``` # ctx = ts.context(ts.SCHEME_TYPE.CKKS, 8192, coeff_mod_bit_sizes=[60, 40, 40, 60]) # ``` # which specifies: # - scheme type: ts.SCHEME_TYPE.CKKS # - poly_modulus_degree: $8192$. # - coeff_mod_bit_sizes: The coefficient modulus sizes, here [60, 40, 40, 60]. This means that the coefficient modulus will contain 4 primes of 60 bits, 40 bits, 40 bits, and 60 bits. # - global_scale: the scaling factor, here set to $2^{40}$. # - optionally, TenSEAL supports switching between the public key and symmetric key encryption. By default, we will use public-key encryption. # # # By default, the relinearization keys are created, with automatic relinearization and rescaling enabled by default. # The user can create the Galois keys by calling generate_galois_keys. # + def context(): context = ts.context(ts.SCHEME_TYPE.CKKS, 8192, coeff_mod_bit_sizes=[60, 40, 40, 60]) context.global_scale = pow(2, 40) context.generate_galois_keys() return context context = context() # - # ## Plain tensor creation # # PlainTensor class works as a translation layer from common tensor representations to the encrypted forms offered by TenSEAL. It is the first step required for creating an encrypted tensor using TenSEAL. # # Observation: This translation is also automatically done by the encrypted tensor constructors, and you can skip it. # # <img src="assets/plaintensor_indepth.png" align="center" style="display: block; margin: auto;" /> # # + plain1 = ts.plain_tensor([1,2,3,4], [2,2]) print(" First tensor: Shape = {} Data = {}".format(plain1.shape, plain1.tolist())) plain2 = ts.plain_tensor(np.array([5,6,7,8]).reshape(2,2)) print(" Second tensor: Shape = {} Data = {}".format(plain2.shape, plain2.tolist())) # - # ## Theory: Encrypted tensor creation # # CKKS requires two operations for encrypting a new message: # # ### CKKS Encoding and Decoding # The operation encodes vectors of complex or real numbers into plaintext polynomials to be encrypted and computed using the CKKS scheme. # # If the polynomial modulus degree is $N$, then the encoding converts vectors of N/2 complex numbers into plaintext elements. Homomorphic operations performed on such encrypted vectors are applied coefficient (slot-)wise, enabling powerful SIMD functionality for computations that are vectorizable. (also known as batching) # # # The following diagram shows the detailed encoding-decoding flow(credits to Yongsoo Song, Introduction to CKKS, [Microsoft Private AI Bootcamp]) # # <img src="assets/ckks_encoding.png" alt="ckks-high-level" width="600"/> # # ### CKKS Encryption and Decryption # This operation converts a plaintext polynomial to a ciphertext. # # # The following diagram shows the detailed encryption-decryption flow(credits to Yongsoo Song, Introduction to CKKS, [Microsoft Private AI Bootcamp]) # <img src="assets/ckks_encryption.png" alt="ckks-high-level" width="600"/> # ## Practice: Encrypted tensor creation # # For creating a new encrypted tensor, TenSEAL executes the encoding and encryption automatically. # This applies to both CKKS and BFV schemes. # # The encrypted tensor encrypts a PlainTensor and stores the ciphertexts and shapes internally. # # We have a few variants of encrypted tensors: # - **BFVVector** - for 1D integer arrays. # - **CKKSVector** - for 1D float arrays. This version has a smaller memory footprint, but it is less flexible. # - **CKKSTensor** - for N-dimensional float arrays. This version supports tensorial operations on encrypted data, like reshaping or broadcasting. # # # <img src="assets/encrypted_tensor_relation.png" align="center" style="display: block; margin: auto;" /> # + encrypted_tensor1 = ts.ckks_tensor(context, plain1) encrypted_tensor2 = ts.ckks_tensor(context, plain2) print(" Shape = {}".format(encrypted_tensor1.shape)) print(" Encrypted Data = {}.".format(encrypted_tensor1)) encrypted_tensor_from_np = ts.ckks_tensor(context, np.array([5,6,7,8]).reshape([2,2])) print(" Shape = {}".format(encrypted_tensor_from_np.shape)) # - # ## Basic operations # # The following table enumerates the operations supported by CKKS tensors variants. # # | Operation | Description | # | --- | --- | # | negate | Negate an encrypted tensor | # | square | Compute the square of an encrypted tensor | # | power | Compute the power of an encrypted tensor | # | add | Addition between two encrypted tensors | # | add\_plain | Addition between an encrypted tensor and a plain tensor | # | sub | Subtraction between two encrypted tensors | # | sub\_plain | Subtraction between an encrypted tensor and a plain tensor | # | mul | Multiplication between two encrypted tensors | # | mul\_plain | Multiplication between an encrypted tensor and a plain tensor | # | dot | Dot product between two encrypted tensors | # | dot\_plain | Dot product between an encrypted tensor and a plain tensor | # | polyval | Polynomial evaluation with an encrypted tensor as variable | # | matmul | Multiplication between an encrypted vector and a plain matrix | # | matmul\_plain | Encrypted matrix multiplication with plain vector | # # # The CKKSVector variant contains the following additional operations: # # # | Operation | Description | # | --- | --- | # | conv2d\_im2col | Image Block to Columns | # def decrypt(enc): return enc.decrypt().tolist() # ### Addition of two encrypted tensors. result = encrypted_tensor1 + encrypted_tensor2 print("Plain equivalent: {} + {}\nDecrypted result: {}.".format(plain1.tolist(), plain2.tolist(), decrypt(result))) # ### Subtraction of two encrypted tensors. result = encrypted_tensor1 - encrypted_tensor2 print("Plain equivalent: {} - {}\nDecrypted result: {}.".format(plain1.tolist(), plain2.tolist(), decrypt(result))) # ### Multiplication of two encrypted tensors. # # The following diagram shows the detailed flow for multiplication and relinearization(credits to <NAME>, Introduction to CKKS, [Microsoft Private AI Bootcamp]) # # # <img src="assets/ckks_mul.png" alt="ckks-high-level" width="600"/> result = encrypted_tensor1 * encrypted_tensor2 print("Plain equivalent: {} * {}\nDecrypted result: {}.".format(plain1.tolist(), plain2.tolist(), decrypt(result))) # ### Multiplication with plain tensor # + plain = ts.plain_tensor([5,6,7,8], [2,2]) result = encrypted_tensor1 * plain print("Plain equivalent: {} * {}\nDecrypted result: {}.".format(plain1.tolist(), plain.tolist(), decrypt(result))) # - # ### Negation # + result = -encrypted_tensor1 print("Plain equivalent: -{}\nDecrypted result: {}.".format(plain1.tolist(), decrypt(result))) # - # ### Power result = encrypted_tensor1 ** 3 print("Plain equivalent: {} ^ 3\nDecrypted result: {}.".format(plain1.tolist(), decrypt(result))) # ### Polynomial evaluation $1 + X^2 + X^3$ # + result = encrypted_tensor1.polyval([1,0,1,1]) print("X = {}".format(plain1.tolist())) print("1 + X^2 + X^3 = {}.".format(decrypt(result))) # - # ### Sigmoid approximation # $\sigma(x) = 0.5 + 0.197 x - 0.004 x^3$ # # Reference: ["Logistic regression over encrypted data from fully homomorphic encryption", <NAME> et al](https://eprint.iacr.org/2018/462.pdf) # + result = encrypted_tensor1.polyval([0.5, 0.197, 0, -0.004]) print("X = {}".format(plain1.tolist())) print("0.5 + 0.197 X - 0.004 x^X = {}.".format(decrypt(result))) # - # # Encrypted inference demo # # Now that we introduced the CKKS scheme let's see it in action. # # The next example contains a classification over the MNIST dataset using a single convolution and two fully connected layers with a square activation function. # # It illustrates one of the prominent use cases for homomorphic encryption, as depicted here. # # <img src="https://blog.openmined.org/content/images/2020/04/OM---CKKS-Graphic-v.01@2x.png" align="center" style="display: block; margin: auto;"/> # # # Adapted from https://github.com/youben11/encrypted-evaluation # ## Client Helpers # + # Create the TenSEAL security context def create_ctx(): """Helper for creating the CKKS context. CKKS params: - Polynomial degree: 8192. - Coefficient modulus size: [40, 21, 21, 21, 21, 21, 21, 40]. - Scale: 2 ** 21. - The setup requires the Galois keys for evaluating the convolutions. """ poly_mod_degree = 8192 coeff_mod_bit_sizes = [40, 21, 21, 21, 21, 21, 21, 40] ctx = ts.context(ts.SCHEME_TYPE.CKKS, poly_mod_degree, -1, coeff_mod_bit_sizes) ctx.global_scale = 2 ** 21 ctx.generate_galois_keys() return ctx # Sample an image def load_input(): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) idx = randint(1, 6) img_name = "data/mnist-samples/img_{}.jpg".format(idx) print(img_name) img = Image.open(img_name) return transform(img).view(28, 28).tolist(), img # Helper for encoding the image def prepare_input(ctx, plain_input): enc_input, windows_nb = ts.im2col_encoding(ctx, plain_input, 7, 7, 3) assert windows_nb == 64 return enc_input # - # ## Server Model # # - We are using a pretrained plain model, stored in "tutorials/parameters/ConvMNIST-0.1.pickle". # Load a pretrained model and adapt the forward call for encrypted input class ConvMNIST(): """CNN for classifying MNIST data. Input should be an encoded 28x28 matrix representing the image. TenSEAL can be used for encoding `tenseal.im2col_encoding(ctx, input_matrix, 7, 7, 3)` The input should also be normalized with a mean=0.1307 and an std=0.3081 before encryption. """ def __init__(self, parameters: Dict[str, list]): self.conv1_weight = parameters["conv1_weight"] self.conv1_bias = parameters["conv1_bias"] self.fc1_weight = parameters["fc1_weight"] self.fc1_bias = parameters["fc1_bias"] self.fc2_weight = parameters["fc2_weight"] self.fc2_bias = parameters["fc2_bias"] self.windows_nb = parameters["windows_nb"] def forward(self, enc_x: ts.CKKSVector) -> ts.CKKSVector: # conv layer channels = [] for kernel, bias in zip(self.conv1_weight, self.conv1_bias): y = enc_x.conv2d_im2col(kernel, self.windows_nb) + bias channels.append(y) out = ts.CKKSVector.pack_vectors(channels) # squaring out.square_() # no need to flat # fc1 layer out = out.mm_(self.fc1_weight) + self.fc1_bias # squaring out.square_() # output layer out = out.mm_(self.fc2_weight) + self.fc2_bias return out @staticmethod def prepare_input(context: bytes, ckks_vector: bytes) -> ts.CKKSVector: try: ctx = ts.context_from(context) enc_x = ts.ckks_vector_from(ctx, ckks_vector) except: raise DeserializationError("cannot deserialize context or ckks_vector") try: _ = ctx.galois_keys() except: raise InvalidContext("the context doesn't hold galois keys") return enc_x def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) # ## Server helpers # + import pickle import os def load_parameters(file_path: str) -> dict: try: parameters = pickle.load(open(file_path, "rb")) print(f"Model loaded from '{file_path}'") except OSError as ose: print("error", ose) raise ose return parameters parameters = load_parameters("parameters/ConvMNIST-0.1.pickle") model = ConvMNIST(parameters) # - # ## Client Query # The client has to create the CKKS context for the first query. # Then, he samples and encrypts a random image from the dataset. # # The serialized context and encrypted image are sent to the server for evaluation. # + # CKKS context generation. context = create_ctx() # Random image sampling image, orig = load_input() # Image encoding encrypted_image = prepare_input(context, image) print("Encrypted image ", encrypted_image) print("Original image ") imshow(np.asarray(orig)) # We prepare the context for the server, by making it public(we drop the secret key) server_context = context.copy() server_context.make_context_public() # Context and ciphertext serialization server_context = server_context.serialize() encrypted_image = encrypted_image.serialize() client_query = { "data" : encrypted_image, "context" : server_context, } # - # ## Server inference # # The server deserializes the context and ciphertext. # It executes the inference, serializes the result and sends it back to the client. # + encrypted_query = model.prepare_input(client_query["context"], client_query["data"]) encrypted_result = model(encrypted_query).serialize() server_response = { "data" : encrypted_result } # - # ## Client process response # # The client deserializes and decrypts the result. # Since we cannot run the non-linearity over the CKKSVector, we run the last softmax step on the client side. # # Finally, we retrieve the final result. # + result = ts.ckks_vector_from(context, server_response["data"]).decrypt() probs = torch.softmax(torch.tensor(result), 0) label_max = torch.argmax(probs) print("Maximum probability for label {}".format(label_max)) # - # # Congratulations!!! - Time to Join the Community! # # Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! # # ### Star TenSEAL on GitHub # # The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building. # # - [Star TenSEAL](https://github.com/OpenMined/TenSEAL) # # ### Join our Slack! # # The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org) # # ### Donate # # If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! # # [OpenMined's Open Collective Page](https://opencollective.com/openmined) # ## References # # 1. <NAME>, Introduction to CKKS, [Private AI Bootcamp](microsoft.com/en-us/research/event/private-ai-bootcamp/#!videos). # 2. [Microsoft SEAL](https://github.com/microsoft/SEAL). # 3. <NAME>, [CKKS Explained Series](https://blog.openmined.org/ckks-explained-part-1-simple-encoding-and-decoding/)
tutorials/Tutorial 2 - Working with Approximate Numbers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Adding Entities to VDMS # # Now that we know how to retrieve information from VDMS, let's take a look at an example where we can insert new information and query that information back. # # We will start by connecting to the VDMS instance, and use the "AddEntity" command. # + import vdms db = vdms.vdms() db.connect("localhost") # + query = """ [ { "AddEntity" : { "class" : "Person", "properties" : { "name": "Regina", "lastname": "George" } } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # - # Now we can query and make sure that VDMS actually stored that new data: # + query = """ [ { "FindEntity" : { "class" : "Person", "constraints": { "name": ["==", "Regina"] }, "results" : { "list" : [ "name"] } } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # -
docker/demo/examples/2 - AddEntity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.1 64-bit (''pytorch'': conda)' # name: python3 # --- import math from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt data = pd.read_csv('../../test_assets/AttributeCrossPairs/vgg_covariance_matrix_results.log',header=None,sep='\s+',names=['p1','p2','acc','tpr', 'fpr', 'fnr', 'tnr']) print(data.shape) import math data['fpr_log'] = data['fpr'].apply(lambda x: 0 if x == 0 else math.log10(x) ) data_sym = pd.concat([data[['p1','p2','fpr_log']],data[['p2','p1','fpr_log']].rename(columns={'p2':'p1','p1':'p2'})],axis=0, ignore_index=True) data_sym.head() renaming = {'Monolid':'monolid eye', 'bald':'bald', 'big' : 'full lips', 'black' :'black hair', 'blonde':'blonde hair', 'brown':'brown hair', 'curly':'curly hair', 'gray':'gray hair', 'red':'red hair', 'small':'small lips', 'straight':'straight hair', 'type1': 'type 1 skin', 'type2':'type 2 skin', 'type3':'type 3 skin', 'type4':'type 4 skin', 'type5':'type 5 skin', 'type6':'type 6 skin', 'wavy':'wavy hair', 'Other': 'other eye', 'narrow': 'narrow nose', 'wide':'wide nose' } data_sym['p1'] = data_sym['p1'].replace(renaming).str.title() data_sym['p2'] = data_sym['p2'].replace(renaming).str.title() def triu_anti(m, k=0): m = np.asanyarray(m) mask = np.fliplr(np.tri(*m.shape[-2:], k=k-1, dtype=bool)) return np.where(mask, np.zeros(1, m.dtype), m) order = [ 'Type 6 Skin', 'Type 5 Skin', 'Type 4 Skin', 'Type 3 Skin', 'Type 2 Skin', 'Monolid Eye', 'Other Eye', 'Narrow Nose', 'Wide Nose', 'Small Lips', 'Full Lips', 'Wavy Hair', 'Straight Hair', 'Gray Hair', 'Curly Hair', 'Brown Hair', 'Blonde Hair', 'Black Hair', 'Bald', ] pxd = data_sym.pivot_table(index='p2',columns='p1',values='fpr_log') pxd = pxd.drop(index=['Type 1 Skin','Red Hair'],columns=['Type 1 Skin','Red Hair']) pxd = pxd.reindex(order[::-1]) pxd = pxd[order] pxd.index.name = 'Phenotype Attributes' pxd.columns.name = 'Phenotype Attributes' trilmask = (triu_anti(np.ones(pxd.shape,dtype=int),k=1) == 1) pxd[trilmask] = np.nan # + plt.style.use(["science", "ieee", "vibrant", "grid"]) plt.rcParams["grid.alpha"] = 0.35 plt.rcParams["grid.color"] = "#000000" plt.rcParams["xtick.color"] = "#000000" fig, ax = plt.subplots(figsize=(7, 5.25)) ax.autoscale(tight=True) im = ax.imshow(pxd, cmap='RdBu_r') # We want to show all ticks... ax.set_xticks(np.arange(len(pxd.columns))) ax.set_yticks(np.arange(len(pxd.index))) # ... and label them with the respective list entries ax.set_xticklabels(pxd.columns) ax.set_yticklabels(pxd.index) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") vals = pxd.values # Loop over data dimensions and create text annotations. for i in range(vals.shape[0]): for j in range(vals.shape[1]): text_color = 'w' if -1.93 > vals[i, j] > -2.34: text_color = 'gray' text = ax.text(j, i,"{:.2f}".format(vals[i, j]), ha="center", va="center", color=text_color,fontsize='x-small') # ax.set_title("asdasdas") fig.tight_layout() ax.grid(False) ax.set_axisbelow(True) ax.tick_params(which='minor', width=0, direction = 'out') ax.tick_params(which='major', direction = 'out') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') # fig.savefig('matched-covariance.pdf',dpi=300) fig.savefig('../matched-covariance.svg',dpi=200) #fig.savefig('matched-covariance.png',dpi=3000) plt.show()
figures/code/matched_covariance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # "50 startups." # ### _"Predict which companies to invest for maximizing profit" (Regression task)._ # ## Table of Contents # # # ## Part 0: Introduction # # ### Overview # The dataset that's we see here contains data about 50 startups. It has 7 columns: “ID”, “R&D Spend”, “Administration”, “Marketing Spend”, “State”, “Category” “Profit”. # # # **Метаданные:** # # * **ID** - startup ID # # * **R&D Spend** - how much each startup spends on Research and Development # # * **Administration** - how much they spend on Administration cost # # * **Marketing Spend** - how much they spend on Marketing # # * **State** - which state the startup is based in # # * **Category** - which business category the startup belong to # # * **Profit** - the profit made by the startup # # # ### Questions: # # # * #### Predict which companies to invest for maximizing profit (choose model with the best score; create predictions; choose companies) # # # ## [Part 1: Import, Load Data](#Part-1:-Import,-Load-Data.) # * ### Import libraries, Read data from ‘.csv’ file # # ## [Part 2: Exploratory Data Analysis](#Part-2:-Exploratory-Data-Analysis.) # * ### Info, Head # * ### Observation of target variable (describe + visualisation:distplot) # * ### Numerical and Categorical features # * #### List of Numerical and Categorical features # * ### Missing Data # * #### List of data features with missing values # * #### Filling missing values # * ### Numerical and Categorical features # * #### Visualisation of Numerical and categorical features (regplot + barplot) # # ## [Part 3: Data Wrangling and Transformation](#Part-3:-Data-Wrangling-and-Transformation.) # * ### One-Hot Encoding # * ### Standard Scaler (optional) # * ### Creating datasets for ML part # * ### 'Train\Test' splitting method # # ## [Part 4: Machine Learning](#Part-4:-Machine-Learning.) # * ### ML Models (Linear regression, Gradient Boosting Regression) # * ### Build, train, evaluate and visualise models # * ### Creating final predictions with Test set # * ### Model comparison # # # ## [Conclusion](#Conclusion.) # * ### Submission of ‘.csv’ file with predictions # ## Part 1: Import, Load Data. # * ### Import # + # import standard libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import pylab as pl from scipy import stats # import models and metrics from sklearn.metrics import r2_score, mean_squared_error, mean_squared_log_error, mean_absolute_error from sklearn.model_selection import cross_val_score, train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.ensemble import GradientBoostingRegressor # - # * ### Load Data # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # read data from '.csv' files train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') # identify target target = train['Profit'] # - # ## Part 2: Exploratory Data Analysis. # * ### Info # print the full summary of the Train dataset train.info() # print the full summary of the Test dataset test.info() # * ### Head # preview of the first 5 lines of the loaded Train data train.head() # preview of the first 5 lines of the loaded Test data test.head() # * ### Observation of target variable # target variable train['Profit'].describe() # visualisation of 'Profit' distribution sns.histplot(data=train, x='Profit', kde=True, bins=10, color='g') plt.title('Profit distribution') plt.show() # set 'ID' to index train = train.set_index('ID') test = test.set_index('ID') # * ### Numerical and Categorical features # #### List of Numerical and Categorical features # + # check for Numerical and Categorical features in Train numerical_feats_train = train.dtypes[train.dtypes != 'object'].index.to_list() print('Quantity of Numerical features:', len(numerical_feats_train)) print(numerical_feats_train) print() categorical_feats_train = train.dtypes[train.dtypes == 'object'].index.to_list() print('Quantity of Categorical features: ', len(categorical_feats_train)) print(categorical_feats_train) # - # * ### Missing values # #### List of data features with missing values # check the Train features with missing values missing_info = train.isna().sum() missing_info = missing_info[missing_info > 0] print(f'There are {missing_info.shape[0]} columns with NAN values') missing_info # check the Test features with missing values missing_info = test.isna().sum() missing_info = missing_info[missing_info > 0] print(f'There are {missing_info.shape[0]} columns with NAN values') missing_info # #### Filling missing values # Fields where NAN values have meaning. # # Explaining in further depth: # # * 'R&D Spend': Numerical - replacement of NAN by 'mean'; # * 'Administration': Numerical - replacement of NAN by 'mean'; # * 'Marketing Spend': Numerical - replacement of NAN by 'mean'; # * 'State': Categorical - replacement of NAN by 'None'; # * 'Category': Categorical - replacement of NAN by 'None'. # + # Numerical NAN columns to fill in Train and Test datasets num_nan_columns_fill = [ 'R&D Spend', 'Administration', 'Marketing Spend' ] # replace 'NAN' with 'mean' in these columns for col in num_nan_columns_fill: mean_value = train[col].mean() train[col].fillna(mean_value, inplace=True) test[col].fillna(mean_value, inplace=True) # Categorical NAN columns to fill in Train and Test datasets cat_nan_columns_fill = [ 'State', 'Category' ] # replace 'NAN' with 'None' in these columns for col in cat_nan_columns_fill: train[col].fillna('None', inplace=True) test[col].fillna('None', inplace=True) # - # check is there any mising values left in Train train.isnull().sum().sum() # check is there any mising values left in Test test.isnull().sum().sum() # #### Visualisation of Numerical features (regplot) # + # numerical features visualisation nr_rows = 2 nr_cols = 2 fig, axs = plt.subplots(nr_rows, nr_cols, figsize=(nr_cols*3.5,nr_rows*3)) num_feats = list(numerical_feats_train) not_plot = ['Id', 'Profit'] plot_num_feats = [c for c in list(numerical_feats_train) if c not in not_plot] for r in range(0,nr_rows): for c in range(0,nr_cols): i = r*nr_cols + c if i < len(plot_num_feats): sns.regplot(x=plot_num_feats[i], y='Profit', data=train, ax=axs[r][c], color='#5081ac') stp = stats.pearsonr(train[plot_num_feats[i]], train['Profit']) str_title = "r = " + "{0:.2f}".format(stp[0]) + " " "p = " + "{0:.2f}".format(stp[1]) axs[r][c].set_title(str_title, fontsize=11) plt.tight_layout() plt.show() # - # categorical features visualisation # 'Profit' split in 'State' level sns.barplot(x='State', y='Profit', data=train, palette='Blues_d') plt.title('Profit by State') plt.show() # categorical features visualisation # 'Profit' split in 'Category' level sns.barplot(x='Category', y='Profit', data=train, palette='Blues_d') plt.xticks(rotation=90) plt.title('Profit by Category') plt.show() # ## Part 3: Data Wrangling and Transformation. # * ### One-Hot Encoding # + # One-Hot Encoding Train dataset train = pd.get_dummies(train, columns=['State', 'Category']) # Drop target variable train = train.drop(columns=['Profit']) # - # preview of the first 5 lines of the loaded Train data train.head() # Train data shape train.shape # One Hot-Encoding Test dataset test = pd.get_dummies(test, columns=['State', 'Category']) # preview of the first 5 lines of the loaded Test data test.head() # Test data shape test.shape # Drop unnecessary variables train = train.drop(columns=['Category_None']) test = test.drop(columns=['State_None']) # * ### StandardScaler # + scaler = StandardScaler() train_st = pd.DataFrame( data=scaler.fit_transform(train), index=train.index, columns=train.columns ) test_st = pd.DataFrame( data=scaler.transform(test), index=test.index, columns=test.columns ) # - train_st.head() # * ### Creating datasets for ML part # + # set 'X' for features of scaled Train dataset 'sc_train' X = train_st # set 'y' for the target 'Profit' y = target # 'X_Test' for features of scaled Test dataset 'sc_test' X_test = test_st # - # * ### 'Train\Test' split X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.30, random_state=0) X_train.shape X_valid.shape # ## Part 4: Machine Learning. # * ### Build, train, evaluate and visualise models # * #### Linear Regression # + # Linear Regression model lr = LinearRegression() # Model Training lr.fit(X_train, y_train) # Model Prediction lr_pred = lr.predict(X_valid) lr_pred[:5] # - # Model R2 score lr_r2_score = lr.score(X_valid, y_valid) lr_r2_score # + # Model Metrics lr_metrics = pd.DataFrame({ 'Model': 'Linear Regression', 'r2score':r2_score(y_valid, lr_pred), 'MAE': mean_absolute_error (y_valid, lr_pred), 'MSE': mean_squared_error(y_valid, lr_pred), 'RMSE': np.sqrt(mean_squared_error(y_valid, lr_pred)), 'MSLE': mean_squared_log_error(y_valid, lr_pred), 'RMSLE':np.sqrt(mean_squared_log_error(y_valid, lr_pred)) }, index=[1]) lr_metrics # + # visualisation of Train dataset predictions # Plot outputs plt.figure(figsize=(8,5)) pl.plot(y_valid, lr_pred, 'ro') pl.plot([0,200000], [0,200000], 'b-') pl.xlabel('Predicted Profit') pl.ylabel('Profit') pl.show() # - # Test final predictions lr_pred1 = lr.predict(X_test) lr_pred1[:5] # + # Model Metrics lr_metrics1 = pd.DataFrame({ 'Model': 'Linear Regression', 'r2score':r2_score(y, lr_pred1), 'MAE': mean_absolute_error (y, lr_pred1), 'MSE': mean_squared_error(y, lr_pred1), 'RMSE': np.sqrt(mean_squared_error(y, lr_pred1)), 'MSLE': mean_squared_log_error(y, lr_pred1), 'RMSLE':np.sqrt(mean_squared_log_error(y, lr_pred1)) }, index=[1]) lr_metrics1 # + # visualisation of Test dataset predictions # Plot outputs plt.figure(figsize=(8,5)) pl.plot(y, lr_pred1, 'ro') pl.plot([0,200000], [0,200000], 'b-') pl.xlabel('Predicted Profit') pl.ylabel('Profit') pl.show() # - # comparison between Actual 'Profit' from Train dataset and Predicted 'Profit' from Test dataset actualvspredicted = pd.DataFrame({"Actual Profit": y, "LR Predicted Profit": lr_pred1}) actualvspredicted.head(10).style.background_gradient(cmap='Blues') # * #### Gradient Boosting Regressor # + # Gradient Boosting Regressor model gb = GradientBoostingRegressor(random_state=0) # Model Training gb.fit(X_train, y_train) # Model Prediction gb_pred = gb.predict(X_valid) # Model R2 score gb_score = gb.score(X_valid, y_valid) gb_score # + # Model Metrics gb_metrics = pd.DataFrame({ 'Model': 'Gradient Boosting Regressor', 'r2score':r2_score(y_valid, gb_pred), 'MAE': mean_absolute_error(y_valid, gb_pred), 'MSE': mean_squared_error(y_valid, gb_pred), 'RMSE': np.sqrt(mean_squared_error(y_valid, gb_pred)), 'MSLE': mean_squared_log_error(y_valid, gb_pred), 'RMSLE':np.sqrt(mean_squared_log_error(y_valid, gb_pred)) }, index=[2]) gb_metrics # - # Test final predictions gb_pred1 = gb.predict(X_test) gb_pred1.shape # + # Model Metrics gb_metrics1 = pd.DataFrame({ 'Model': 'Gradient Boosting Regressor', 'r2score':r2_score(y, gb_pred1), 'MAE': mean_absolute_error(y, gb_pred1), 'MSE': mean_squared_error(y, gb_pred1), 'RMSE': np.sqrt(mean_squared_error(y, gb_pred1)), 'MSLE': mean_squared_log_error(y, gb_pred1), 'RMSLE':np.sqrt(mean_squared_log_error(y, gb_pred1)) }, index=[2]) gb_metrics1 # + # visualisation of Test dataset predictions # Plot outputs plt.figure(figsize=(8,5)) pl.plot(y, gb_pred1, 'ro') pl.plot([0,200000], [0,200000], 'b-') pl.xlabel('Predicted Profit') pl.ylabel('Profit') pl.show() # - # ### Model comparison # score comparison of models frames = [lr_metrics1, gb_metrics1] training_result = pd.concat(frames) training_result # comparison between Actual 'Profit' from Train dataset abd Predicted 'Profit' from Test dataset actualvspredicted = pd.DataFrame({ 'Actual Profit': y, 'LR Predicted Profit': lr_pred1, 'GB Predicted Profit': gb_pred1 }) actualvspredicted.head(10).style.background_gradient(cmap='Blues') # **Result**: The best model is **Gradient Boosting Regressor** with **R2 score = 0.971998**. # ## Conclusion. # submission of .csv file with final predictions sub = pd.DataFrame() sub['ID'] = test.index sub['Profit'] = gb_pred1 sub.to_csv('StartupPredictions.csv', index=False)
ML-101 Modules/Module 02/Lesson 02/Practice/startup-profit-prediction - Practice.ipynb
# --- # title: "while Statement" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "while statement using Python." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Import the random module import random # ### Create a variable of the true number of deaths of an event deaths = 6 # ## Create a variable that is denotes if the while loop should keep running running = True # ### while running is True while running: # Create a variable that randomly create a integer between 0 and 10. guess = random.randint(0,10) # if guess equals deaths, if guess == deaths: # then print this print('Correct!') # and then also change running to False to stop the script running = False # else if guess is lower than deaths elif guess < deaths: # then print this print('No, it is higher.') # if guess is none of the above else: # print this print('No, it is lower') # By the output, you can see that the while script keeping generating guesses and checking them until guess matches deaths, in which case the script stops.
docs/python/basics/while_statements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # ## Train nodule detector with LUNA16 dataset # + deletable=true editable=true INPUT_DIR = '../../input/' OUTPUT_DIR = '../../output/lung-cancer/01/' IMAGE_DIMS = (50,50,50,1) # + deletable=true editable=true # %matplotlib inline import numpy as np import pandas as pd import h5py import matplotlib.pyplot as plt import sklearn import os import glob from modules.logging import logger import modules.utils as utils from modules.utils import Timer import modules.logging import modules.cnn as cnn import modules.ctscan as ctscan # + [markdown] deletable=true editable=true # ## Analyse input data # + [markdown] deletable=true editable=true # ### Let us import annotations # + deletable=true editable=true annotations = pd.read_csv(INPUT_DIR + 'annotations.csv') candidates = pd.read_csv(INPUT_DIR + 'candidates.csv') # + deletable=true editable=true print(annotations.iloc[1]['seriesuid']) print(str(annotations.head())) annotations.info() # + deletable=true editable=true print(candidates.iloc[1]['seriesuid']) print(str(candidates.head())) candidates.info() # + deletable=true editable=true print(len(candidates[candidates['class'] == 1])) print(len(candidates[candidates['class'] == 0])) # + [markdown] deletable=true editable=true # ### Lets take a look at some images # + deletable=true editable=true scan = ctscan.CTScanMhd(INPUT_DIR, '1.3.6.1.4.1.14519.5.2.1.6279.6001.979083010707182900091062408058') # + deletable=true editable=true pixels = scan.get_image() plt.imshow(pixels[80]) # + deletable=true editable=true pixels = scan.get_subimage((40,40,10), (230,230,230)) plt.imshow(pixels[40]) # + [markdown] deletable=true editable=true # ### Classes are heaviliy unbalanced, hardly 0.2% percent are positive. # # The best way to move forward will be to undersample the negative class and then augment the positive class heaviliy to balance out the samples. # # #### Plan of attack: # # 1. Get an initial subsample of negative class and keep all of the positives such that we have a 80/20 class distribution # # 2. Create a training set such that we augment minority class heavilby rotating to get a 50/50 class distribution # + deletable=true editable=true positives = candidates[candidates['class']==1].index negatives = candidates[candidates['class']==0].index # + [markdown] deletable=true editable=true # ### Ok the class to get image data works # # Next thing to do is to undersample negative class drastically. Since the number of positives in the data set of 551065 are 1351 and rest are negatives, I plan to make the dataset less skewed. Like a 70%/30% split. # + deletable=true editable=true positives # + deletable=true editable=true np.random.seed(42) negIndexes = np.random.choice(negatives, len(positives)*5, replace = False) print(len(positives)) print(len(negIndexes)) # + deletable=true editable=true candidatesDf = candidates.iloc[list(positives)+list(negIndexes)] # + [markdown] deletable=true editable=true # ## Prepare input data # + [markdown] deletable=true editable=true # ### Split into test train set # + deletable=true editable=true from sklearn.cross_validation import train_test_split X = candidatesDf.iloc[:,:-1] Y = candidatesDf.iloc[:,-1] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 42) # + deletable=true editable=true #print(str(X_test)) #print(str(Y_test)) # + [markdown] deletable=true editable=true # ### Create a validation dataset # + deletable=true editable=true X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.20, random_state = 42) # + deletable=true editable=true print(len(X_train)) print(len(X_val)) print(len(X_test)) # + deletable=true editable=true print('number of positive cases are ' + str(Y_train.sum())) print('total set size is ' + str(len(Y_train))) print('percentage of positive cases are ' + str(Y_train.sum()*1.0/len(Y_train))) # + [markdown] deletable=true editable=true # ### We will need to augment the positive dataset like mad! Add new keys to X_train and Y_train for augmented data # + deletable=true editable=true tempDf = X_train[Y_train == 1] tempDf = tempDf.set_index(X_train[Y_train == 1].index + 1000000) X_train_new = X_train.append(tempDf) tempDf = tempDf.set_index(X_train[Y_train == 1].index + 2000000) X_train_new = X_train_new.append(tempDf) ytemp = Y_train.reindex(X_train[Y_train == 1].index + 1000000) ytemp.loc[:] = 1 Y_train_new = Y_train.append(ytemp) ytemp = Y_train.reindex(X_train[Y_train == 1].index + 2000000) ytemp.loc[:] = 1 Y_train_new = Y_train_new.append(ytemp) X_train = X_train_new Y_train = Y_train_new print(len(X_train), len(Y_train)) # + deletable=true editable=true print('After undersampling') print('number of positive cases are ' + str(Y_train.sum())) print('total set size is ' + str(len(Y_train))) print('percentage of positive cases are ' + str(Y_train.sum()*1.0/len(Y_train))) # + deletable=true editable=true print(len(X_train)) print(len(X_val)) print(len(X_test)) print(X_train.head()) print(Y_train.head()) # + [markdown] deletable=true editable=true # ### Prepare output dir # + deletable=true editable=true utils.mkdirs(OUTPUT_DIR, recreate=True) modules.logging.setup_file_logger(OUTPUT_DIR + 'out.log') logger.info('Dir ' + OUTPUT_DIR + ' created') # + [markdown] deletable=true editable=true # ### Create HDF5 dataset with input data # + deletable=true editable=true def create_dataset(file_path, x_data, y_data): logger.info('Creating dataset ' + file_path + ' size=' + str(len(x_data))) file_path_tmp = file_path + '.tmp' with h5py.File(file_path_tmp, 'w') as h5f: x_ds = h5f.create_dataset('X', (len(x_data), IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], IMAGE_DIMS[3]), chunks=(1, IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], IMAGE_DIMS[3]), dtype='f') y_ds = h5f.create_dataset('Y', (len(y_data), 2), dtype='f') valid = [] for c, idx in enumerate(x_data.index): #if(c>3): break d = x_data.loc[idx] filename = d[0] t = Timer('Loading scan ' + str(filename)) scan = ctscan.CTScanMhd(INPUT_DIR, filename) pixels = scan.get_subimage((d[3],d[2],d[1]), IMAGE_DIMS) #add color channel dimension pixels = np.expand_dims(pixels, axis=3) #plt.imshow(pixels[round(np.shape(pixels)[0]/2),:,:,0]) #plt.show() if(np.shape(pixels) == (50,50,50,1)): x_ds[c] = pixels y_ds[c] = [1,0] if(y_data.loc[idx] == 1): y_ds[c] = [0,1] valid.append(c) else: logger.warning('Invalid shape detected in image. Skipping. ' + str(np.shape(pixels))) t.stop() #dump only valid entries to dataset file c = 0 with h5py.File(file_path, 'w') as h5fw: x_dsw = h5fw.create_dataset('X', (len(valid), IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], IMAGE_DIMS[3]), chunks=(1, IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], IMAGE_DIMS[3]), dtype='f') y_dsw = h5fw.create_dataset('Y', (len(valid), 2), dtype='f') with h5py.File(file_path_tmp, 'r') as h5fr: x_dsr = h5fr['X'] y_dsr = h5fr['Y'] for i in range(len(x_dsr)): if(i in valid): x_dsw[c] = x_dsr[i] y_dsw[c] = y_dsr[i] c = c + 1 os.remove(file_path_tmp) utils.validate_xy_dataset(file_path, save_dir=OUTPUT_DIR + 'samples/') # + deletable=true editable=true #create_dataset(OUTPUT_DIR + 'nodules-train.h5', X_train, Y_train) # + deletable=true editable=true #create_dataset(OUTPUT_DIR + 'nodules-validate.h5', X_val, Y_val) # + deletable=true editable=true create_dataset(OUTPUT_DIR + 'nodules-test.h5', X_test, Y_test) # + deletable=true editable=true
kaggle-lung-cancer-approach2/01-nodule-segmentation-prepare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:sasmodels2] * # language: python # name: conda-env-sasmodels2-py # --- # # 4. fitting algorithms # # The fitting algorithms used to fit your scattering data can have a significant impact on your results. Depending on your system, certain algorithms may lead you to only a local minimum, while others can provide extensive uncertainty analysis of your data. # # There are multiple fitting algorithms available in the `bumps` package and you can read more information about each one at https://bumps.readthedocs.io/en/latest/guide/fitting.html and at https://bumps.readthedocs.io/en/latest/guide/optimizer.html. In this notebook, we provide examples for the Levenberg-Marquardt and DREAM fitting procedures. # + from bumps.names import Parameter, inf from bumps.fitters import fit import numpy as np import os import matplotlib.pyplot as plt import pandas as pd import sasmodels from sasmodels.core import load_model from sasmodels.bumps_model import Model, Experiment import bumps # uncomment and edit line below to add path to the sasview source code # sys.path.append("/path/to/sasview/src") import sas # + data_np = np.loadtxt('../example_data/sphere_smearing.csv',delimiter=',') mask = np.where(data_np[:,1]>0)[0] # removing points that may be below zero due to noise data = sasmodels.data.Data1D(x=data_np[mask,0], y=data_np[mask,1], dy=data_np[mask,2]) kernel = sasmodels.core.load_model("sphere") scale = Parameter(0.5, name='scale').range(0,1) # setting our fitting range to (0,1) background = Parameter(0.01, name='incoherent background').range(0,1) # scattering length densities for our scattering particles and solvent sld = Parameter(0.728, name='sld') sld_solvent = Parameter(5.238, name='sld solvent') radius = Parameter(150, limits=(0,inf), name='radius').range(0,5000) radius_pd = Parameter(0.05, limits=(0,inf), name='radius') radius_pd_type = 'lognormal' model = Model(model=kernel, scale=scale, background=background, sld=sld, sld_solvent=sld_solvent, radius=radius, radius_pd=radius_pd, radius_pd_type=radius_pd_type ) experiment = sasmodels.bumps_model.Experiment(data=data, model=model) smearing = sasmodels.resolution.Slit1D(data.x, 0.25) experiment.resolution = smearing problem = bumps.fitproblem.FitProblem(experiment) results = bumps.fitters.fit(problem, method='lm', verbose=True) problem.plot() # - # In this example, when we called the `bumps.fitters.fit` function, we specified the 'lm' method which stands for Levenberg-Marquardt, a gradient descent fitting algorithm. This model can be computationally efficient and provide best possible fitting parameters, but it assumes you are close to the desired minimum, i.e. you have to carefully consider starting parameters. For example, if we set the radius in this example to be 1000, the LM model is unable to find the global minimum and gets trapped locally. # + data_np = np.loadtxt('../example_data/sphere_smearing.csv',delimiter=',') mask = np.where(data_np[:,1]>0)[0] # removing points that may be below zero due to noise data = sasmodels.data.Data1D(x=data_np[mask,0], y=data_np[mask,1], dy=data_np[mask,2]) kernel = sasmodels.core.load_model("sphere") scale = Parameter(0.5, name='scale').range(0,1) # setting our fitting range to (0,1) background = Parameter(0.01, name='incoherent background').range(0,1) # scattering length densities for our scattering particles and solvent sld = Parameter(0.728, name='sld') sld_solvent = Parameter(5.238, name='sld solvent') radius = Parameter(1000, limits=(0,inf), name='radius').range(0,5000) radius_pd = Parameter(0.05, limits=(0,inf), name='radius') radius_pd_type = 'lognormal' model = Model(model=kernel, scale=scale, background=background, sld=sld, sld_solvent=sld_solvent, radius=radius, radius_pd=radius_pd, radius_pd_type=radius_pd_type ) experiment = sasmodels.bumps_model.Experiment(data=data, model=model) smearing = sasmodels.resolution.Slit1D(data.x, 0.25) experiment.resolution = smearing problem = bumps.fitproblem.FitProblem(experiment) results = bumps.fitters.fit(problem, method='lm', verbose=True) problem.plot() # - # DREAM is a Markov chain monte carlo fitting algorithm that is less likely to get trapped in local minima, but it is more computationally expensive than LM. We can implement the DREAM method by modifying the arguments of the `fit` function: # + data_np = np.loadtxt('../example_data/sphere_smearing.csv',delimiter=',') mask = np.where(data_np[:,1]>0)[0] # removing points that may be below zero due to noise data = sasmodels.data.Data1D(x=data_np[mask,0], y=data_np[mask,1], dy=data_np[mask,2]) kernel = sasmodels.core.load_model("sphere") scale = Parameter(0.5, name='scale').range(0,1) # setting our fitting range to (0,1) background = Parameter(0.01, name='incoherent background').range(0,1) # scattering length densities for our scattering particles and solvent sld = Parameter(0.728, name='sld') sld_solvent = Parameter(5.238, name='sld solvent') radius = Parameter(1000, limits=(0,inf), name='radius').range(0,5000) radius_pd = Parameter(0.05, limits=(0,inf), name='radius') radius_pd_type = 'lognormal' model = Model(model=kernel, scale=scale, background=background, sld=sld, sld_solvent=sld_solvent, radius=radius, radius_pd=radius_pd, radius_pd_type=radius_pd_type ) experiment = sasmodels.bumps_model.Experiment(data=data, model=model) smearing = sasmodels.resolution.Slit1D(data.x, 0.25) experiment.resolution = smearing problem = bumps.fitproblem.FitProblem(experiment) results = bumps.fitters.fit(problem, method='dream', verbose=True) problem.plot() # - # Here we saw that DREAM was able to overcome the local minimum that trapped the LM model with identical starting conditions. Another benefit of using a DREAM approach is that it provides an uncertainty analysis of the parameters. results.state.show() # The analysis provides information about the history of the parameters as the fits proceeds and the distribution of the draws for each of the parameters. Ideally, these histograms and correlation plots will provide information about the confidence in each result. Additionally, it could uncover systems were there were perhaps unexpected correlations between two parameters or a multi-modal distributions of parameters that each provide a suitable 'fit' to the data. These plots can help reserachers narrow down fitting ranges or uncover complexity in their system that may not be well understood using a more common fitting procedure. To read more about the details of these plots, please see the following documentation at https://bumps.readthedocs.io/en/latest/guide/optimizer.html#fit-dream. # # A json file of the fitting parameter results and these uncertainty plots can be saved to a local directory by uncommenting the following cell and modifying the file path and base name as needed: # + #results.state.show(figfile='/path/to/save/location/base_file_name') # -
getting_started/5_fitting_algorithms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting ROC curves with ctaplot # + pycharm={"is_executing": false} import ctaplot import numpy as np import matplotlib.pyplot as plt import astropy.units as u # + pycharm={"is_executing": false} ctaplot.set_style() # - # ## ROC curves # ROC curves are useful to assess the discrimination power of a reconstruction pipeline. # For IACT, we often only care about gamma events in a one vs all fashion. For that purpose, one can use `ctaplot.plot_roc_curve_gammaness` # + pycharm={"is_executing": false} def fake_reco_distri(size, good=True): """ Generate a random distribution between 0 and 1. If `good==True`, the distribution is shifted towards 1. If `good==False`, the distribution is shifted towards 0. """ r0 = np.random.gamma(5, 1, size) if good: return 1 - r0/r0.max() else: return r0/r0.max() # + pycharm={"is_executing": false} # Example of fake distri: plt.hist(fake_reco_distri(10000, good=True), bins=100); plt.show() # + pycharm={"is_executing": false} # Let's simulate some events. Following the CORSIKA convention, 0 are for gammas, 1 for electrons, 101 for protons. nb_events = 10000 particles = [0, 1, 101] mc_type = np.random.choice(particles, size=nb_events) gammaness = np.empty(nb_events) gammaness[mc_type==0] = fake_reco_distri(len(mc_type[mc_type==0]), good=True) gammaness[mc_type!=0] = fake_reco_distri(len(mc_type[mc_type!=0]), good=False) # + pycharm={"is_executing": false} plt.figure(figsize=(14,8)) ax = ctaplot.plot_gammaness_distribution(mc_type, gammaness, bins=100, histtype='step', linewidth=3); ax.grid('on') plt.show() # + pycharm={"is_executing": false} plt.figure(figsize=(14,8)) ax = ctaplot.plot_roc_curve_gammaness(mc_type, gammaness, linewidth=4); ax.legend(fontsize=20); plt.show() # - # ### Multiclass ROC curve # In this case, one needs a reconstruction probability per class. # The probability should be between 0 and 1. # + pycharm={"is_executing": false} reco_proba = {} for p in particles: reco_proba[p] = np.ones_like(mc_type, dtype=np.float) reco_proba[p][mc_type==p] = fake_reco_distri(len(mc_type[mc_type==p]), good=True) reco_proba[p][mc_type!=p] = fake_reco_distri(len(mc_type[mc_type!=p]), good=False) # + pycharm={"is_executing": false} plt.figure(figsize=(14,8)) ax = ctaplot.plot_roc_curve_multiclass(mc_type, reco_proba, linewidth=3, linestyle='--'); ax.legend(fontsize=20); plt.show() # - # ## ROC curves as a function of the gamma energy # # One can evaluate the classification performance as a function of the gamma energy. # In this case, the AUC is computed for gammas in each band vs **all** non-gammas particles (regardless of their energies). # + pycharm={"is_executing": false} # Fake energies between 10GeV and 10TeV: mc_gamma_energies = 10**(4*np.random.rand(nb_events) - 2) * u.TeV # + pycharm={"is_executing": false} plt.figure(figsize=(14,8)) ax = ctaplot.plot_roc_curve_gammaness_per_energy(mc_type, gammaness, mc_gamma_energies, energy_bins=u.Quantity([0.01,0.1,1,3,10], u.TeV), linestyle='--', alpha=0.8, linewidth=3, ); ax.legend(fontsize=20); plt.show()
examples/notebooks/roc_curves.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CEN426 - Introduction to Machine Learning Task 9 # --- # fsdfds # # ### Student Name: <NAME> # ### StudentID: 2016556017 import tensorflow as tf from tensorflow.keras import datasets, layers, models from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt # ## Dataset 1 # + (X1_train, y1_train), (X1_test, y1_test) = datasets.cifar10.load_data() # Normalize pixel values to be between 0 and 1 X1_train, X1_test = X1_train / 255.0, X1_test / 255.0 # + class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X1_train[i]) plt.xlabel(class_names[y1_train[i][0]]) plt.show() # - model1 = models.Sequential() model1.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model1.add(layers.MaxPooling2D((2, 2))) model1.add(layers.Conv2D(64, (3, 3), activation='relu')) model1.add(layers.MaxPooling2D((2, 2))) model1.add(layers.Conv2D(64, (3, 3), activation='relu')) model1.add(layers.Flatten()) model1.add(layers.Dense(64, activation='relu')) model1.add(layers.Dense(10)) model1.summary() # + model1.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history1 = model1.fit(X1_train, y1_train, epochs=10, validation_data=(X1_test, y1_test)) # + plt.plot(history1.history['accuracy'], label='accuracy') plt.plot(history1.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5, 1]) plt.legend(loc='lower right') test_loss1, test_acc1 = model1.evaluate(X1_test, y1_test, verbose=2) # - print(test_acc1) # ## Dataset 2 (X2_train, y2_train), (X2_test, y2_test) = datasets.mnist.load_data(path="mnist.npz") y2_train plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X2_train[i]) plt.xlabel(y2_train[i]) plt.show() X2_train = X2_train / 255.0 X2_test = X2_test / 255.0 X2_train = X2_train.reshape(X2_train.shape[0],28,28,1) X2_test = X2_test.reshape(X2_test.shape[0],28,28,1) y2_train = to_categorical(y2_train) y2_test = to_categorical(y2_test) model2 = models.Sequential() model2.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model2.add(layers.Conv2D(64, (3, 3), activation='relu')) model2.add(layers.Conv2D(64, (3, 3), activation='relu')) model2.add(layers.Flatten()) model2.add(layers.Dense(10)) model2.summary() # + model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history2 = model2.fit(X2_train, y2_train, validation_data=(X2_test, y2_test), epochs=3) # + plt.plot(history2.history['accuracy'], label='accuracy') plt.plot(history2.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0, 1]) plt.legend(loc='lower right') test_loss2, test_acc2 = model2.evaluate(X2_test, y2_test, verbose=2) print(test_acc2) # - # ## Dataset 3 (X3_train, y3_train), (X3_test, y3_test) = datasets.fashion_mnist.load_data() plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X3_train[i]) plt.xlabel(y3_train[i]) plt.show() X3_train = X3_train/255.0 X3_test = X3_test/255.0 y3_train = to_categorical(y3_train) y3_test = to_categorical(y3_test) X3_train.shape X3_train = X3_train.reshape(X3_train.shape[0],28,28,1) X3_test = X3_test.reshape(X3_test.shape[0],28,28,1) model3 = models.Sequential() model3.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model3.add(layers.Conv2D(64, (3, 3), activation='relu')) model3.add(layers.Conv2D(64, (3, 3), activation='relu')) model3.add(layers.Flatten()) model3.add(layers.Dense(10)) model3.summary() # + model3.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history3 = model3.fit(X3_train, y3_train, validation_data=(X3_test, y3_test), epochs=3) # + plt.plot(history3.history['accuracy'], label='accuracy') plt.plot(history3.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0, 1]) plt.legend(loc='lower right') test_loss3, test_acc3 = model3.evaluate(X3_test, y3_test, verbose=2) print(test_acc3) # -
assn09/assn09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="3_tyK4IFGI3Y" # ## Packages and Libraries # + id="kTuZc90mGI3Z" from __future__ import print_function import numpy as np # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import KFold , cross_val_score, GridSearchCV from classification_utilities import display_cm, display_adj_cm from sklearn.metrics import confusion_matrix, f1_score, r2_score from sklearn import preprocessing from sklearn.model_selection import LeavePGroupsOut from sklearn.multiclass import OneVsOneClassifier from scipy.signal import medfilt from sklearn.model_selection import train_test_split from sklearn.inspection import permutation_importance from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import learning_curve import matplotlib.colors as colors from sklearn.metrics import plot_confusion_matrix import xgboost as xgb from xgboost.sklearn import XGBClassifier from hyperopt import STATUS_OK, fmin, hp, tpe from sklearn.ensemble import RandomForestRegressor import data_augmentation as dtaug import plot_faceis as pltf import classification_utilities as clf_util # + [markdown] id="1-sZGEs2Rbe0" # ## Input data # + id="tzsTBaM3GI3a" well0 = 'CHURCHMAN BIBLE' #Load Data input_data = pd.read_csv('facies_vectors.csv') test = input_data[input_data['Well Name'] == well0] data = input_data[input_data['Well Name'] != well0] input_data = input_data[input_data['Well Name'] != well0] out1 = data[data['Well Name'] == 'ALEXANDER D'] data = data[data['Well Name'] != 'ALEXANDER D'] out2 = data[data['Well Name'] == 'KIMZEY A'] data = data[data['Well Name'] != 'KIMZEY A'] mean_PE=np.nanmean(input_data[input_data['Well Name'] == 'Recruit F9'].PE.values) data=data.replace(np.nan,mean_PE) input_data=input_data.replace(np.nan,mean_PE) # Parameters feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'NM_M', 'RELPOS'] feature_names2 = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS'] # Store features and labels X = data[feature_names].values y = data['PE'].values # Store well labels and depths well = data['Well Name'].values depth = data['Depth'].values #facies facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS'] facies_color_map = {} for ind, label in enumerate(facies_names): facies_color_map[label] = facies_colors[ind] # + [markdown] id="xHi70X2yRkKD" # ## Data augmentation # + id="m5h3wSs25ugO" X_aug,feature_names_aug=dtaug.augment_regre(X, well, depth,feature_names) data_aug=pd.DataFrame(X_aug,columns=feature_names_aug) data_aug['Well Name'],data_aug['Depth'],data_aug['Facies'] = [well, depth,data['Facies'].values] # + colab={"base_uri": "https://localhost:8080/", "height": 441} executionInfo={"elapsed": 4130, "status": "ok", "timestamp": 1607801715760, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="sS2dwjNEW3zO" outputId="2457cde0-01c9-4a07-e81f-8f3be9c3ee00" logs = data_aug.copy() logs['PE']=data['PE'].values pltf.plot1(logs[logs['Well Name'] == 'NOLAN'],facies_colors) plt.savefig('figures/figure4.png',dpi=500, bbox_inches='tight') # + [markdown] id="-v2gC2fTRqc-" # ## Random Forest # + colab={"base_uri": "https://localhost:8080/", "height": 587} executionInfo={"elapsed": 27168, "status": "ok", "timestamp": 1607801738815, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="B4-qip17n6Tv" outputId="f27cc74a-63ec-40a4-b9a3-b1de21c22899" # configure the cross-validation procedure cv_inner = KFold(n_splits=2, shuffle=True, random_state=1) # define the model est_reg = RandomForestRegressor(random_state=1,max_features='sqrt') # define search space param_grid = {'n_estimators':np.arange(80,101,2), 'max_depth':np.arange(15,25,1)} # define search search = GridSearchCV(estimator=est_reg, param_grid=param_grid, scoring='r2', n_jobs=-1, cv=cv_inner, verbose=1, refit=True) # configure the cross-validation procedure groups = data['Well Name'] #cv_outer = LeaveOneGroupOut() cv_outer = KFold(n_splits=7, shuffle=True, random_state=1) # execute the nested cross-validation scores = cross_val_score(search, X_aug, y, groups=groups, scoring='r2', cv=cv_outer, n_jobs=-1,verbose=1) # report performance print('Scores:',(scores),'\n') print('R2: mean: %.2f (+/- %.2f)' % (np.mean(scores), np.std(scores))) search.fit(X_aug,y) best_parameters = search.best_params_ print(best_parameters) # + [markdown] id="VAjHQud_3vg3" # ## Prediction # + id="yvI6OcnM3vg6" # Prepare training data X_tr = X_aug y_tr = y # Prepare test data well_ts = test['Well Name'].values depth_ts = test['Depth'].values X_ts = test[feature_names].values # Augment features X_ts,_=dtaug.augment_regre(X_ts, well_ts, depth_ts,feature_names) y_ts_hat=search.predict(X_ts) test['Predicted_PE'] = y_ts_hat # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 27158, "status": "ok", "timestamp": 1607801738824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="_kSgkPL13vg6" outputId="489c8168-58a5-4763-9b12-4340d02f9509" score = r2_score (test['PE'].values, test['Predicted_PE'].values) print("Score: %0.2f"%(score)) # + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 28102, "status": "ok", "timestamp": 1607801739789, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="zjE1jtSMvSTP" outputId="c3098771-3d10-41e1-d63b-77eaf579093e" well_name_plot = 'NOLAN' pltf.regression(data_aug,data,well_name_plot,search,test) # + colab={"base_uri": "https://localhost:8080/", "height": 729} executionInfo={"elapsed": 53581, "status": "ok", "timestamp": 1607801765288, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="e1oIXXMh4Ye9" outputId="40fc1178-0e57-4886-acb4-e6ede363fc21" est_reg = RandomForestRegressor(random_state=1,max_features='sqrt',max_depth=17,n_estimators=100) est_reg.fit(X_aug,y) y_ts_hat2=est_reg.predict(X_ts) feature_importance = est_reg.feature_importances_ result = permutation_importance(est_reg, X_ts, y_ts_hat2, n_repeats=10, random_state=1, n_jobs=1) # + id="H0rIqRY50o-8" outputId="33288338-87c2-4db3-84ef-71eb4e36dc17" well_name_plot = 'NOLAN' pltf.plot_figure5(data_aug,data,well_name_plot,est_reg,test,result) plt.savefig('figures/figure5.png',dpi=500, bbox_inches='tight') # + [markdown] id="W3mxkw-QR-T9" # ## Predict and attribute PE into missing wells # + id="BoK4kXgw-cLF" X_ts,_=dtaug.augment_regre(out1[feature_names].values, out1['Well Name'], \ out1['Depth'].values,feature_names) input_data.loc[input_data.index[input_data['Well Name']=='ALEXANDER D'], \ 'PE'] = search.predict(X_ts) X_ts,_=dtaug.augment_regre(out2[feature_names].values, out2['Well Name'], \ out2['Depth'].values,feature_names) input_data.loc[input_data.index[input_data['Well Name'] =='KIMZEY A'], \ 'PE'] = search.predict(X_ts) # + [markdown] id="lusmPD2ZSMJD" # # Facies Classification # + id="qzxfmCIY3uEh" X = input_data[feature_names2].values y = input_data['Facies'].values well = input_data['Well Name'].values depth = input_data['Depth'].values # + [markdown] id="Nwh-WR6ISUPV" # ### Augment features # + id="yRazQKq6GI3a" X_aug, padded_rows = dtaug.augment_features(X, well, depth) # + [markdown] id="6VDVFiVHSmj8" # ## Classifier # + id="6ps2f58DGI3b" #modified from https://github.com/seg/2016-ml-contest/blob/master/LA_Team/Facies_classification_LA_TEAM_08.ipynb # Train and test a classifier def train_and_test(X_tr, y_tr, X_v, well_v): SEED = 42 clf = XGBClassifier(colsample_bytree = 0.65, learning_rate=0.325, gamma = 0.6, max_depth=9, min_child_weight=2, n_estimators=149, seed = SEED, subsample = 0.85) clf.fit(X_tr, y_tr) y_train_pred = clf.predict(X_tr) # Test classifier y_v_hat = clf.predict(X_v) # Clean isolated facies for each well for w in np.unique(well_v): y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=5) return y_v_hat,y_train_pred,clf # + [markdown] id="LovQ-OT5GI3b" # ## Prediction # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 63210, "status": "ok", "timestamp": 1607801774955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="HSdFeNCdGI3b" outputId="565b40c1-8c39-4126-96a3-395c98ddc545" # Prepare training data X_tr = X y_tr = y # Augment features X_tr, padded_rows = dtaug.augment_features(X_tr, well, depth) # Removed padded rows X_tr = np.delete(X_tr, padded_rows, axis=0) y_tr = np.delete(y_tr, padded_rows, axis=0) - 1 # Prepare test data well_ts = test['Well Name'].values depth_ts = test['Depth'].values X_ts = test[feature_names2].values # Augment features X_ts, padded_rows = dtaug.augment_features(X_ts, well_ts, depth_ts) # Predict test labels y_ts_hat,y_train_pred,clf = train_and_test(X_tr, y_tr, X_ts, well_ts) # Save predicted labels test['Predicted'] = y_ts_hat + 1 score_train = f1_score (y_tr, y_train_pred, average ='micro') print("Score train: %0.2f"%(score_train)) score_test = f1_score (test['Facies'].values, test['Predicted'].values, average ='micro') print("Score test: %0.2f"%(score_test)) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 64358, "status": "ok", "timestamp": 1607801776121, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="mt5BblElYV0t" outputId="95358f30-27e7-4933-c7fb-e2bb2fdbfcb3" conf = confusion_matrix(test['Facies'].values, test['Predicted'].values) adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]]) print('Facies classification accuracy = %0.2f' % clf_util.accuracy(conf)) print('Adjacent facies classification accuracy = %.2f' % clf_util.accuracy_adjacent(conf)) # + id="jpjGIdh50o_H" outputId="6ad647da-5567-4157-b848-9cd85c9b3d4b" disp=plot_confusion_matrix(clf, X_ts, test['Facies'].values-1, display_labels=facies_names, cmap=plt.cm.Blues) plt.savefig('figures/figure7.png',dpi=500, bbox_inches='tight') # + [markdown] id="f3EaF8EQSzd2" # ## Facies classification in test well # + colab={"base_uri": "https://localhost:8080/", "height": 779} executionInfo={"elapsed": 64360, "status": "ok", "timestamp": 1607801776119, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxwjfNpgU8fibw1Wk63SHkC5MVbU435Iy_73QOCg=s64", "userId": "18072148807279056656"}, "user_tz": 180} id="ZizGhRjAGjSi" outputId="873c54ed-5e54-462b-a38e-697c0c7d6620" pltf.compare_facies_plot(test, 'Predicted', facies_colors) plt.savefig('figures/figure6.png',dpi=500, bbox_inches='tight')
2_predict_PE_Facies_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # load library import argparse import os import numpy as np from tqdm import tqdm from mypath import Path from dataloaders import make_data_loader from modeling.sync_batchnorm.replicate import patch_replication_callback from modeling.deeplab import * from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weigths_labels from utils.lr_scheduler import LR_Scheduler from utils.saver import Saver # from utils.summaries import TensorboardSummary from utils.metrics import Evaluator from dataloaders.datasets.lits import LiverSegmentation, TumorSegmentation from torch.utils.data import DataLoader import matplotlib.pyplot as plt import argparse from PIL import Image import cv2 import time import torch # + from scipy.ndimage import morphology def surfd(input1, input2, sampling=1, connectivity=1): input_1 = np.atleast_1d(input1.astype(np.bool)) input_2 = np.atleast_1d(input2.astype(np.bool)) conn = morphology.generate_binary_structure(input_1.ndim, connectivity) S = input_1 ^ morphology.binary_erosion(input_1, conn) Sprime = input_2 ^ morphology.binary_erosion(input_2, conn) dta = morphology.distance_transform_edt(~S,sampling) dtb = morphology.distance_transform_edt(~Sprime,sampling) sds = np.concatenate([np.ravel(dta[Sprime!=0]), np.ravel(dtb[S!=0])]) return sds # + parser = argparse.ArgumentParser() parser.add_argument('-f') parser.add_argument('--batch-size', type=int, default=200) parser.add_argument('--base-size', type=int, default=256) parser.add_argument('--crop-size', type=int, default=256) parser.add_argument('--mode', type=str, default='val') parser.add_argument('--kind', type=str, default='liver') parser.add_argument('--model-path', type=str, default='models/95_liver33.pth.tar') parser.add_argument('--backbone', type=str, default='xception') # parser.add_argument('--model-path', type=str, default='models/95_liver33.pth.tar') # parser.add_argument('--backbone', type=str, default='xception') args = parser.parse_args() # + # load model model = DeepLab(num_classes=2, backbone=args.backbone, output_stride=16, sync_bn=False, freeze_bn=False) # ckpt = torch.load('run/lits_tumor/resume-tumor-bce-crop/experiment_0/_checkpoint37.pth.tar')#67 0.8809 0.8809 ckpt = torch.load(args.model_path)#72 state_dict = ckpt['state_dict'] model.load_state_dict(state_dict) # - args.mode = 'val' args.mode # + tags=[] # load data for sn in range(111, 131): if args.kind == 'liver': dataset_test = LiverSegmentation(args, split=args.mode, study_num=sn) if args.kind == 'tumor': dataset_test = TumorSegmentation(args, split=args.mode, study_num=sn) print("num test img: ", len(dataset_test)) if len(dataset_test) == 0: continue dataloader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=0) # gpu use device = 'cuda' model.to(device) model.eval() # initialize scores cnt = 0 total_precision = 0 total_recall = 0 total_time = 0 total_cos = 0 total_voe = 0 total_assd = 0 total_vd = 0 # Dice, jaccard, VOE, ASSD, RVD, MSSD # run inference for i, sample in enumerate(dataloader): image, target = sample['image'], sample['label'] image = image.to(device) start_time = time.time() with torch.no_grad(): output = model(image) timedelta = time.time() - start_time total_time += timedelta pred = output.data.cpu().numpy() target = target.cpu().numpy() pred = np.argmax(pred, axis=1) # print(np.unique(pred)) # print(np.unique(target)) image = image.cpu().numpy() for idx in range(len(pred)): if args.mode == 'val': ## scoring pred_ = pred[idx].astype(np.uint8) target_ = target[idx].astype(np.uint8) intersection = np.logical_and(target_, pred_) union = np.logical_or(target_, pred_) voe = 1.0 - np.sum(intersection)/np.sum(union) sds = surfd(target_, pred_) if len(sds) == 0: assd = 0 else: assd = sds.mean() if np.sum(target_) == 0: vd = 1.0 else: vd = abs((int(np.sum(pred_)) - int(np.sum(target_))) / args.crop_size**2) # iou_score = np.sum(intersection) / np.sum(union) tp = np.sum(np.logical_and(target_ == 1, pred_ == 1))/256**2 fp = np.sum(np.logical_and(target_ == 0, pred_ == 1))/256**2 tn = np.sum(np.logical_and(target_ == 0, pred_ == 0))/256**2 fn = np.sum(np.logical_and(target_ == 1, pred_ == 0))/256**2 target_ = target_.ravel() pred_ = pred_.ravel() cos_sim = np.dot(target_, pred_)/(np.linalg.norm(target_)*np.linalg.norm(pred_)) precision = tp/(tp+fp) recall = tp/(tp+fn) voe = np.nan_to_num(voe, nan=1.0) cos_sim = np.nan_to_num(cos_sim, nan=1.0) precision = np.nan_to_num(precision, nan=1.0) recall = np.nan_to_num(recall, nan=1.0) total_cos += cos_sim total_precision+=precision total_recall+=recall total_voe += voe total_assd+=assd total_vd+=vd elif args.mode == 'vis': ##visualize(save) pred_ = pred[idx].astype(np.uint8) target_ = target[idx].astype(np.uint8) pred_[pred_ != 0] = 255 target_[target_ != 0] = 255 img_tmp = np.transpose(image[idx], axes=[1, 2, 0]) img_tmp *= (0.229, 0.224, 0.225) img_tmp += (0.485, 0.456, 0.406) img_tmp *= 255.0 img_tmp = img_tmp.astype(np.uint8) fig = plt.figure() fig.tight_layout() ax1 = fig.add_subplot(1, 3, 1) ax1.imshow(target_, cmap='gray') # ax1.set_title('Label') ax1.axes.xaxis.set_visible(False) ax1.axes.yaxis.set_visible(False) ax2 = fig.add_subplot(1, 3, 2) ax2.imshow(img_tmp, cmap=plt.cm.bone) # ax2.set_title('Original') ax2.axes.xaxis.set_visible(False) ax2.axes.yaxis.set_visible(False) ax3 = fig.add_subplot(1, 3, 3) ax3.imshow(pred_, cmap='gray') # ax3.set_title('Predict') ax3.axes.xaxis.set_visible(False) ax3.axes.yaxis.set_visible(False) # plt.show() os.makedirs('val/'+args.kind+f'/{str(sn)}/', exist_ok=True) plt.savefig('val/'+args.kind+f'/{str(sn)}/'+str(cnt)+'.png') plt.close(fig) cnt+=1 print(cnt, end='\r') if args.mode == 'val': # print scores avg_time = total_time/cnt p = total_precision/cnt*100 r = total_recall/cnt*100 cos = total_cos/cnt*100 f1 = 2*p*r/(p+r) voe = total_voe/cnt*100 assd = total_assd/cnt vd = total_vd/cnt*100 print(f"avg_time:{round(avg_time,4)} precision:{round(p,4)} recall:{round(r,4)} dice:{round(f1,4)} jaccard:{round(cos,4)} voe:{round(voe,4)} assd:{round(assd,4)} vd:{round(vd,4)}") # - #liver encoder avg_time:0.0058 precision:82.0091 recall:96.3349 dice:88.5966 jaccard:77.34 voe:79.59 assd:33.9379 vd:74.6414 #liver decoder avg_time:0.0002 precision:43.4459 recall:78.314 dice:55.8874 jaccard:65.1172 voe:83.3158 assd:94.6391 vd:62.6889 #liver aspp avg_time:0.0002 precision:54.8324 recall:94.1485 dice:69.3027 jaccard:78.0577 voe:81.2244 assd:96.2175 vd:74.6727 print(f"avg_time:{round(avg_time,4)} precision:{round(p,4)} recall:{round(r,4)} dice:{round(f1,4)} jaccard:{round(cos,4)} voe:{round(voe,4)} assd:{round(assd,4)} vd:{round(vd,4)}")
inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:571] # language: python # name: python3 # --- # + import pandas as pd import numpy as np import altair as alt import matplotlib.pyplot as plt import seaborn as sns from selenium import webdriver executable_path='/opt/homebrew/bin/geckodriver' driver = webdriver.Firefox(executable_path=executable_path) # - full_df = pd.read_csv('../data/processed/full_df.csv') train_df = pd.read_csv('../data/processed/train_df.csv') # + # Histogram Plot histogram_plot = alt.Chart(train_df, title = "Target variable histogram").mark_bar().encode( x = alt.X("total_cup_points:Q", bin=True), y ='count()', ) histogram_plot.save('../reports/images/target_histogram.png', webdriver=driver) histogram_plot # - splom = alt.Chart(train_df, title="Explanatory variables pair plot").mark_point(opacity=0.3).encode( x=alt.X(alt.repeat("column"), type='quantitative', scale = alt.Scale(zero=False)), y=alt.Y(alt.repeat("row"), type='quantitative', scale = alt.Scale(zero=False)), ).properties( width=200, height=200 ).repeat( row=['total_cup_points'], column=['moisture', 'category_one_defects', 'quakers', 'category_two_defects', 'altitude_mean_meters'] ) splom.save('../reports/images/target_histogram.png', webdriver=driver) splom # + # Correlation Plot plt.figure(figsize=(16, 6)) correlation_matrix = train_df.corr() mask = np.triu(np.ones_like(correlation_matrix, dtype=np.bool_)) heatmap = sns.heatmap(correlation_matrix, mask = mask, vmin=-1, vmax=1, annot=True, cmap='BrBG') heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12) fig = heatmap.get_figure() fig.savefig('../reports/images/correlation_matrix_heatmap.png') #plt.savefig('../reports/images/correlation_matrix_heatmap.png') # + import os import pandas as pd import numpy as np import altair as alt import matplotlib.pyplot as plt import seaborn as sns from docopt import docopt #opt = docopt(__doc__) def plot_target_histogram(dataframe:pd.DataFrame, target_feature:str, output_dir:str="../reports/images/"): # Histogram Plot of Target Variable histogram_plot = alt.Chart(train_df, title = "Target variable histogram").mark_bar().encode( x = alt.X("{target_feature}:Q", bin=True), y ='count()', ) histogram_plot.save(output_dir+'target_histogram.html') def plot_correlation_matrix(dataframe:pd.DataFrame, output_dir:str="../reports/images/"): # Correlation Plot - Diagonal Removed plt.figure(figsize=(16, 6)) correlation_matrix = train_df.corr() mask = np.triu(np.ones_like(correlation_matrix, dtype=np.bool_)) heatmap = sns.heatmap(correlation_matrix, mask = mask, vmin=-1, vmax=1, annot=True, cmap='BrBG') heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12) fig = heatmap.get_figure() fig.savefig(output_dir+'correlation_matrix_heatmap.png') return print("Correlation Matrix Plotted") def plot_pairwise(dataframe:pd.DataFrame, target_feature:str, output_dir:str="../reports/images/"): splom = alt.Chart(train_df, title="Explanatory variables pair plot").mark_point(opacity=0.3).encode( x=alt.X(alt.repeat("column"), type='quantitative', scale = alt.Scale(zero=False)), y=alt.Y(alt.repeat("row"), type='quantitative', scale = alt.Scale(zero=False)), ).properties( width=200, height=200 ).repeat( row=['{target_feature}'], column=['moisture', 'category_one_defects', 'quakers', 'category_two_defects', 'altitude_mean_meters']) splom.save(output_dir+'target_histogram.html') def plot_visualisations(): input_dir = "../data/processed/" output_dir = "../reports/images/" # Read Dataframes train_df = pd.read_csv(input_dir + 'train_df.csv') plot_target_histogram(train_df, "total_cup_points", output_dir) plot_pairwise(train_df, "total_cup_points", output_dir) plot_correlation_matrix(train_df, output_dir) plot_visualisations() # -
notebooks/visualisations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python_defaultSpec_1597045698335 # --- # - 2020-08-10 # - OS: Windows 10 # - yolov4: 1.1.0 # + from yolov4.tf import YOLOv4 yolo = YOLOv4() yolo.classes = "coco.names" yolo.input_size = 608 yolo.batch_size = 2 # + tags=[] import time start_t = time.time() dataset = yolo.load_dataset("train2017.txt", image_path_prefix="D:/coco/train2017") print(time.time() - start_t) # + tags=[] import tensorflow as tf import numpy as np import cv2 for i, (images, gt) in enumerate(dataset): for j in range(len(images)): _candidates = [] for candidate in gt: grid_size = candidate.shape[1] _candidates.append( tf.reshape(candidate[j], shape=(1, grid_size * grid_size * 3, -1)) ) candidates = np.concatenate(_candidates, axis=1) frame = images[j, ...] * 255 frame = frame.astype(np.uint8) pred_bboxes = yolo.candidates_to_pred_bboxes(candidates[0]) pred_bboxes = yolo.fit_pred_bboxes_to_original(pred_bboxes, frame.shape) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) image = yolo.draw_bboxes(frame, pred_bboxes) cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE) cv2.imshow("result", image) while cv2.waitKey(10) & 0xFF != ord("q"): pass if i == 10: break cv2.destroyWindow("result")
test/dataset/yolo_load_dataset_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Configuracion de grafica a usar # + import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; # lA ID de la GPU a usar, puede ser desde 0 hasta las N GPU's. Si es -1 significa que es en la CPU os.environ["CUDA_VISIBLE_DEVICES"]="-1"; # - # # Importacion de librerias # + from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from tensorflow import keras from IPython.display import display, clear_output from ipywidgets import interact, IntSlider import h5py import numpy as np import random # %matplotlib inline import matplotlib.pyplot as plt from Models.ltc_models import * # !wget -q https://raw.githubusercontent.com/JefeLitman/VideoDataGenerator/master/DatasetsLoaderUtils.py -O DatasetsLoaderUtils.py from DatasetsLoaderUtils import load_videoFrames_from_path # - tf.keras.activations # # Configuraciones para Tensorflow y Keras print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) tf.debugging.set_log_device_placement(False) random.seed(8128) np.random.seed(8128) tf.random.set_seed(8128) # # Carga de Datos root_path = "/home/jefelitman/DataSets/ucf101/split_1" root_path epoch = 20 batch_size = 30 size = [112, 112] frames = 16 canales = 3 video_shape = tuple([frames]+size[::-1]+[canales]) # + active="" # def time_sampling(video): # if len(video) < 60: # new_video = np.concatenate([video, np.zeros([60 - len(video), 58, 58, 3])], axis=0) # else: # new_video = video # mitad = len(new_video)//2 # return new_video[mitad-30:mitad+30] # - dataset = load_videoFrames_from_path(root_path, lambda x: x, size) train_gen = dataset.data_generator(1, canales) test_gen = dataset.data_generator(2, canales) def train_gen_sampling(): for v, l in train_gen: paso = len(v)//16 if paso == 0: video = np.concatenate([v, np.zeros([16 - len(v), 112, 112, 3])], axis=0) else: video = v for j in range(paso): yield video[j::paso][:16], l def test_gen_sampling(): for v, l in test_gen: paso = len(v)//16 if paso == 0: video = np.concatenate([v, np.zeros([16 - len(v), 112, 112, 3])], axis=0) else: video = v for j in range(paso): yield video[j::paso][:16], l 107039/30 # Cantidad de batches def scale(video, label): return video/255., label def video_flip_horizontal(video, label): return tf.reverse(video, [2]), label # + # Train dataset train_data = tf.data.Dataset.from_generator(train_gen_sampling, (tf.float32, tf.int64), (video_shape, [])) train_data = train_data.cache('/home/jefelitman/DataSets/temporal_cache_data/train').map(scale, 24) #train_data = train_data.concatenate(train_data.map(video_flip_horizontal, 24)) # Test dataset test_data = tf.data.Dataset.from_generator(test_gen_sampling, (tf.float32, tf.int64), (video_shape, [])) test_data = test_data.cache("/home/jefelitman/DataSets/temporal_cache_data/test").map(scale, 24) # - # # Red Neuronal LTC # ### Construccion del modelo # + #Entrada de la red neuronal dropout = 0.5 lr = 1e-3 weigh_decay = 5e-3 ltc_save_path = '/home/jefelitman/Saved_Models/trained_ucf/No_Encoder/no_inception/LTC-ori_split1_{w}x{h}x{f}_SGD_'.format( w=size[0], h=size[1],f=frames) if canales == 3: ltc_save_path += 'RGB_' else: ltc_save_path += 'B&N_' ltc_save_path += 'lr={l}_DO_IC_TDM_S255_E{e}'.format(l = lr, e = epoch) #Creacion de la carpeta donde se salvara el modelo if not os.path.isdir(ltc_save_path): os.mkdir(ltc_save_path) model_saves_path = os.path.join(ltc_save_path,'model_saves') if not os.path.isdir(model_saves_path): os.mkdir(model_saves_path) ltc_save_path # - #Parametros para la compilacion del modelo optimizador = keras.optimizers.SGD(learning_rate=lr, momentum=0.9) #optimizador = keras.optimizers.Adam(learning_rate=lr) perdida = keras.losses.SparseCategoricalCrossentropy(name="loss") precision = keras.metrics.SparseCategoricalAccuracy(name="acc") ltc = get_LTC_original(video_shape, len(dataset.to_class), dropout, weigh_decay) #Compilacion del modelo ltc.compile(optimizer = optimizador, loss = perdida, metrics = [precision]) # + #keras.utils.plot_model(ltc, 'LTC.png', show_shapes=True) # + #ltc = keras.models.load_model(os.path.join(ltc_save_path,'ltc_final_1000.h5')) # - ltc.summary() # ### Cargo los pesos pre entrenados # ##### Pesos del C3D # + active="" # c3d_weights = h5py.File('/home/jefelitman/Saved_Models/c3d-sports1M_weights.h5', 'r') # print(c3d_weights.keys()) # + active="" # c3d_weights['layer_0'].keys() # + active="" # weights = [] # for capa in ['layer_0','layer_2','layer_4','layer_5','layer_5']: # weights.append([ # np.moveaxis(np.r_[c3d_weights[capa]['param_0']], (0,1),(4,3)), #Cambio los ejes porque c3d estan con canales primero # np.r_[c3d_weights[capa]['param_1']] # ]) # for index, capa in enumerate(['conv3d_1','conv3d_2','conv3d_3','conv3d_4','conv3d_5']): # ltc.get_layer(capa).set_weights(weights[index]) # - # ##### Pesos de la InceptionV3 # + active="" # inceptionv3 = keras.applications.InceptionV3(weights="imagenet") # + active="" # for layer, index in [('conv3d_1',1),('conv3d_2',4),('conv3d_3',7),('conv3d_4',11),('conv3d_5',14)]: # old_weights, old_bias = ltc.get_layer(layer).get_weights() # new_weight = np.zeros(old_weights.shape) # new_bias = np.zeros(old_bias.shape) # pesos = inceptionv3.layers[index].get_weights()[0] # for entrada in range(old_weights.shape[3]): # for salida in range(old_weights.shape[4]): # new_weight[:,:,:,entrada,salida] = np.stack([pesos[:,:,entrada%pesos.shape[2],salida%pesos.shape[3]], # pesos[:,:,entrada%pesos.shape[2],salida%pesos.shape[3]], # pesos[:,:,entrada%pesos.shape[2],salida%pesos.shape[3]] # ] # )/3 # ltc.get_layer(layer).set_weights([new_weight, new_bias]) # - # ### Entrenamiento de la red con el generador # + #Funcion customizadas para el entrenamiento del modelo def cambio_lr(epoch, lr): if epoch == 10 or epoch == 16 : for i in ['conv3d_1','conv3d_2','conv3d_3','conv3d_4', 'conv3d_5','dense_6','dense_7','dense_8']: weigh_decay = ltc.get_layer(i).kernel_regularizer.get_config()['l2'] * 0.1 ltc.get_layer(i).kernel_regularizer = keras.regularizers.l2(weigh_decay) return optimizador.learning_rate.numpy() * 0.1 else: return optimizador.learning_rate.numpy() funciones = [ keras.callbacks.ModelCheckpoint( filepath=os.path.join(model_saves_path,'ltc_epoch_{epoch}.h5'), save_best_only=True, monitor='val_acc', verbose=1), keras.callbacks.LearningRateScheduler(cambio_lr, verbose=1), keras.callbacks.CSVLogger(os.path.join(ltc_save_path,'output.csv')) ] # + train_data = train_data.shuffle(len(dataset.__videos_train_path__), reshuffle_each_iteration=True).batch(batch_size).prefetch(1) test_data = test_data.shuffle(len(dataset.__videos_test_path__), reshuffle_each_iteration=True).batch(batch_size).prefetch(1) historial = ltc.fit(x = train_data, epochs=epoch, callbacks=funciones, validation_data=test_data) # - # ### Guardado del modelo #Salvado final definitivo del modelo una vez se detenga ltc.save(os.path.join(ltc_save_path,"ltc_final_{e}.h5".format(e=epoch))) # ### Graficas de los resultados de entrenamiento fig = plt.figure() plt.plot(historial.history["loss"],'k--') plt.plot(historial.history["val_loss"],'b--') plt.title('Loss over epochs') plt.legend(labels=["Loss","Val_Loss"]) plt.show() fig.savefig(os.path.join(ltc_save_path,'train_loss_epochs_{e}.png'.format(e=epoch))) fig = plt.figure() plt.plot(historial.history["acc"],'k--') plt.plot(historial.history["val_acc"],'b--') plt.title('Accuracy over epochs') plt.legend(labels=["Accuracy","Val_Accuracy"]) plt.show() fig.savefig(os.path.join(ltc_save_path,'train_accuracy_epochs_{e}.png'.format(e=epoch))) # ### Evaluacion del entrenamiento # + active="" # resultados = ltc.evaluate_generator(generator=dataset.get_test_generator(canales), # steps=dataset.test_batches, # max_queue_size=batch_size) # print("""Los resultados de la evaluacion del modelo fueron: # Perdida: {l} # Precision: {a}""".format(l=resultados[0],a=resultados[1]))
Running_LTC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') # Drape 2D Surface From Line {#create_draped_surf_example} # ========================== # # Drape a surface (2D array) from a line in 3D space. # # This is a common task to create a 2.5D image/sectional mesh of data like # GPR or airborne EM profiles (geophysics applications). This example # provides a look into how to create a 2.5D sectional mesh from typical # data in those use cases. # # For this example, we have an instrument path on the ground surface (the # line) and a 2D array of the collected image under that line. # # Originally posted in [this support # issue](https://github.com/pyvista/pyvista-support/issues/135). # # Suppose you have some GPR data (or anything that produces a line of data # with values at depth). With these data, you\'ll have a 2D image/array of # your data values and 3D coordinates of where that line/profile is in 3D # space (often where you collected the data on the surface of topography). # Attached below are some example data for this: 1) XYZ coordinates of a # GPR path and 2) a 2D array of data values produced from the GPR. # # the data here are wacky (it\'s difficult to get shareable data of decent # quality), so ignore them but pay attention to the structure. The # coordinates we have are technically shifted up and we have some NaN # filler above the surface - its weird and just ignore it. You\'ll # typically have a more uniform looking profile in 2D with the coordinates # associated to the top of each column in your 2D array. # # + import matplotlib.pyplot as plt import numpy as np # sphinx_gallery_thumbnail_number = 3 import pyvista as pv from pyvista import examples # Extract the data archive and load these files # 2D array of XYZ coordinates path = examples.download_gpr_path().points # 2D array of the data values from the imaging equipment data = examples.download_gpr_data_array() # - plt.figure(figsize=(15,3)) plt.pcolormesh(data, cmap="seismic", clim=[-1,1]) plt.gca().invert_yaxis() # View the the path of the GPR profile from a top-down perspective. Since # we have the full coordinates (XY and Z), we can create a structured mesh # \"draping\" down from those coordinates to hold the GPR image data. # plt.scatter(path[:,1], path[:,0]) plt.axis("image") plt.xlabel("Northing") plt.ylabel("Easting") # + assert len(path) in data.shape, "Make sure coordinates are present for every trace." # If not, you'll need to interpolate the path! # Grab the number of samples (in Z dir) and number of traces/soundings nsamples, ntraces = data.shape # Might be opposite for your data, pay attention here # Define the Z spacing of your 2D section z_spacing = 0.12 # Create structured points draping down from the path points = np.repeat(path, nsamples, axis=0) # repeat the Z locations across tp = np.arange(0, z_spacing*nsamples, z_spacing) tp = path[:,2][:,None] - tp points[:,-1] = tp.ravel() # - # Make a StructuredGrid from the structured points # # + grid = pv.StructuredGrid() grid.points = points grid.dimensions = nsamples, ntraces, 1 # Add the data array - note the ordering! grid["values"] = data.ravel(order="F") # - # And now we can plot it! or process or do anything, because it is a # PyVista mesh and the possibilities are endless with PyVista # # + cpos = [(1217002.366883762, 345363.80666238244, 3816.828857791056), (1216322.4753436751, 344033.0310674846, 3331.052985309526), (-0.17716571330686096, -0.25634368781817973, 0.9502106207279767)] p = pv.Plotter() p.add_mesh(grid, cmap="seismic", clim=[-1,1]) p.add_mesh(pv.PolyData(path), color='orange') p.show(cpos=cpos)
create-surface-draped.ipynb