code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.models import Model from keras.layers import Input, MaxPooling2D, Dropout, Conv2D, Conv2DTranspose, add, Lambda, TimeDistributed, Bidirectional, ConvLSTM2D from keras import backend as K import tensorflow as tf from keras.optimizers import RMSprop, Adam, SGD from keras.losses import binary_crossentropy from losses import * import math from datahandler import DataHandler from models import * from generator import * from params import * from callbacks import getCallbacks from kfold_data_loader import * from tqdm import tqdm import os import skimage.io as io from keras.models import * from keras import backend as K import argparse import sys import random import numpy as np from keras.models import * from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from models.unet import * from models.unet_se import * from datahandler import DataHandler from kfold_data_loader import * from params import * import os import cv2 import skimage.io as io from tqdm import tqdm from medpy.io import save from math import ceil, floor from matplotlib import pyplot as plt from sklearn.metrics import f1_score, jaccard_similarity_score from scipy.ndimage import _ni_support from scipy.ndimage.morphology import distance_transform_edt, binary_erosion,\ generate_binary_structure import warnings warnings.filterwarnings("ignore") lstm_graph = tf.get_default_graph() # + def lstmGenerator(images, batch_size, pre_model, pre_graph): i=0 c=0 bs = batch_size while True: with pre_graph.as_default(): batch_features = [] j = i if j == 0: res1 = np.expand_dims(np.zeros(images[j].shape), axis=0) else: img1 = np.expand_dims(images[j-1], axis=0) res1 = pre_model.predict(img1) img2 = np.expand_dims(images[j], axis=0) res2 = pre_model.predict(img2) if j == images.shape[0]-1: res3 = np.expand_dims(np.zeros(images[j].shape), axis=0) else: img3 = np.expand_dims(images[j+1], axis=0) res3 = pre_model.predict(img3) res = np.concatenate((res1,res2,res3), axis=0) images[j] = res2[0] res[res>=0.5] = 1 res[res<0.5] = 0 batch_features.append(res) i += 1 yield np.array(batch_features) def lstmModel(): with lstm_graph.as_default(): inputs = Input((3, 256, 256, 1)) original = Lambda(lambda x : x[:,1,:,:,:] * 0.5)(inputs) pool = TimeDistributed(MaxPooling2D(pool_size=2))(inputs) bclstm = Bidirectional(ConvLSTM2D(64, 3, return_sequences = True, padding='same', activation = 'relu'))(pool) bclstm = Bidirectional(ConvLSTM2D(64, 3, padding='same', activation = 'relu'))(bclstm) up = Conv2DTranspose(64,3, strides=2, padding='same', activation = 'relu')(bclstm) drop = Dropout(0.5)(up) outputs = Conv2D(1, (1,1), activation = 'sigmoid')(drop) outputs = Lambda(lambda x : x * 0.5)(outputs) outputs = add([outputs, original]) model = Model(inputs = inputs, outputs = outputs) model.compile(optimizer = Adam(lr = 1e-4), loss = binary_crossentropy, metrics = [dice_coef]) return model # - def getDiceScore(ground_truth, prediction): #convert to boolean values and flatten ground_truth = np.asarray(ground_truth, dtype=np.bool).flatten() prediction = np.asarray(prediction, dtype=np.bool).flatten() return f1_score(ground_truth, prediction) # + def hd(result, reference, voxelspacing=None, connectivity=1): hd1 = __surface_distances(result, reference, voxelspacing, connectivity).max() hd2 = __surface_distances(reference, result, voxelspacing, connectivity).max() hd = max(hd1, hd2) return hd def hd95(result, reference, voxelspacing=None, connectivity=1): hd1 = __surface_distances(result, reference, voxelspacing, connectivity) hd2 = __surface_distances(reference, result, voxelspacing, connectivity) hd95 = np.percentile(np.hstack((hd1, hd2)), 95) return hd95 def __surface_distances(result, reference, voxelspacing=None, connectivity=1): result = np.atleast_1d(result.astype(np.bool)) reference = np.atleast_1d(reference.astype(np.bool)) if voxelspacing is not None: voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim) voxelspacing = np.asarray(voxelspacing, dtype=np.float64) if not voxelspacing.flags.contiguous: voxelspacing = voxelspacing.copy() footprint = generate_binary_structure(result.ndim, connectivity) if 0 == np.count_nonzero(result): raise RuntimeError('The first supplied array does not contain any binary object.') if 0 == np.count_nonzero(reference): raise RuntimeError('The second supplied array does not contain any binary object.') result_border = result ^ binary_erosion(result, structure=footprint, iterations=1) reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1) dt = distance_transform_edt(~reference_border, sampling=voxelspacing) sds = dt[result_border] return sds # + image_files, mask_files = load_data_files('data/kfold_data/') skf = getKFolds(image_files, mask_files, n=10) kfold_indices = [] for train_index, val_index in skf.split(image_files, mask_files): kfold_indices.append({'train': train_index, 'val': val_index}) # - def predictMask(model, cur_graph, pre_model, pre_graph, image): image_gen = lstmGenerator(image, 1, pre_model, pre_graph) return model.predict_generator(image_gen, steps=len(image)) def predictAll(model, cur_graph, data, num_data=0): dice_scores = [] hd_scores = [] hd95_scores = [] pre_graph = tf.get_default_graph() with pre_graph.as_default(): pre_model = getUnet() print('loading pre weights %d'%i) pre_model.load_weights('logs/unet/kfold_unet/kfold_unet_dice_DA_K%d/kfold_unet_dice_DA_K%d_weights.h5'%(i,i)) for image_file, mask_file in tqdm(data, total=num_data): fname = image_file[image_file.rindex('/')+1 : image_file.index('.')] image, hdr = dh.getImageData(image_file) gt_mask, _ = dh.getImageData(mask_file, is_mask=True) assert image.shape == gt_mask.shape if image.shape[1] != 256: continue pred_mask = predictMask(model, cur_graph, pre_model, pre_graph, image) pred_mask[pred_mask>=0.5] = 1 pred_mask[pred_mask<0.5] = 0 dice_score = getDiceScore(gt_mask, pred_mask) if dice_score == 0: continue dice_scores.append(dice_score) hd_score = hd(gt_mask, pred_mask) hd_scores.append(hd_score) hd95_score = hd95(gt_mask, pred_mask) hd95_scores.append(hd95_score) return dice_scores, hd_scores, hd95_scores # + #Get data and generators unet_type = 'unet' dh = DataHandler() all_dice = [] all_hd = [] all_hd95 = [] for i in range(10): exp_name = 'kfold_%s_BiCLSTM_K%d'%(unet_type, i) #get parameters params = getParams(exp_name, unet_type=unet_type, is_lstm = True) val_img_files = np.take(image_files, kfold_indices[i]['val']) val_mask_files = np.take(mask_files, kfold_indices[i]['val']) with lstm_graph.as_default(): model = lstmModel() print('loading weights from %s'%params['checkpoint']['name']) model.load_weights(params['checkpoint']['name']) data = zip(val_img_files, val_mask_files) dice_score, hd_score, hd95_score = predictAll(model, lstm_graph, data, num_data=len(val_mask_files)) print('Finished K%d'%i) all_dice += dice_score all_hd += hd_score all_hd95 += hd95_score print('dice') for i in range(len(all_dice)): print(all_dice[i]) print() print('hd') for i in range(len(all_hd)): print(all_hd[i]) print() print('hd95') for i in range(len(all_hd95)): print(all_hd95[i]) print() print('Final results for %s'%unet_type) print('dice %f'%np.mean(all_dice)) print('hd %f'%np.mean(all_hd)) print('hd95 %f'%np.mean(all_hd95)) # -
LSTM EVAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import os from tqdm import tqdm # + train_path = 'hy_round1_train_20200102' test_path = 'hy_round1_testA_20200102' train_files = os.listdir(train_path) test_files = os.listdir(test_path) print (len(train_files), len(test_files)) # - for file in tqdm(train_files): df = pd.read_csv(f'{train_path}/{file}') ret.append(df) df = pd.concat(ret) df.columns = ['ship', 'x', 'y', 'v', 'd', 'time', 'type'] df.to_hdf('input/train.h5','df', mode = 'w') ret = [] for file in tqdm(test_files): df = pd.read_csv(f'{test_path}/{file}') ret.append(df) df = pd.concat(ret) df.columns = ['ship','x','y','v','d','time'] df.to_hdf('input/test.h5', 'df', mode = 'w')
Data_Prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="QOgkcaFdNLt5" colab_type="text" # ## Exercise 2: Reading Data in PySpark and Carrying Out SQL Operations # + id="NrI-vZMS65Ho" colab_type="code" colab={} import os import pandas as pd import numpy as np # + id="rwxUwACe663N" colab_type="code" colab={} from pyspark.sql import SparkSession spark = SparkSession.builder.appName('ml-bank').getOrCreate() # + id="coACgMe37Rst" colab_type="code" colab={} spark_df = spark.read.csv('https://raw.githubusercontent.com/TrainingByPackt/Big-Data-Analysis-with-Python/master/Lesson08/bank.csv', sep=';', header = True, inferSchema = True) # + id="I5cIY-pI7T63" colab_type="code" outputId="17c03a58-fd44-4452-804e-7890108ada54" colab={"base_uri": "https://localhost:8080/", "height": 122} spark_df.head(5) # + id="kuQH444f7ZTd" colab_type="code" outputId="6537d4bc-7285-4281-c07a-c27554e4df1d" colab={"base_uri": "https://localhost:8080/", "height": 340} spark_df.printSchema() # + id="Fk4EimfO7jiV" colab_type="code" outputId="5f89dd22-cccb-4bba-fe7e-584750dd1e23" colab={"base_uri": "https://localhost:8080/", "height": 34} spark_df.count() # + id="SbtFW9k37ldy" colab_type="code" outputId="5c555950-f7ed-4a9e-bd5c-8de9ae53924c" colab={"base_uri": "https://localhost:8080/", "height": 323} len(spark_df.columns), spark_df.columns # + id="JYZx8t2u7nnN" colab_type="code" outputId="ce0d8ddc-865e-42c0-f7c7-403401a10269" colab={"base_uri": "https://localhost:8080/", "height": 187} spark_df.describe().show() # + id="sYMbF6Qn7pbd" colab_type="code" outputId="e5081fa9-b7ae-4c79-c95b-60429a26e114" colab={"base_uri": "https://localhost:8080/", "height": 204} spark_df.select('balance','y').show(5) # + id="Lh6qa7PN7tnG" colab_type="code" outputId="3979038a-8c37-4ae9-bfd3-13035b6122ce" colab={"base_uri": "https://localhost:8080/", "height": 136} spark_df.crosstab('y', 'marital').show() # + id="4ucDHuYu7v2u" colab_type="code" outputId="e3101c7f-745c-4675-bc08-d9a3f2f15798" colab={"base_uri": "https://localhost:8080/", "height": 204} sample1 = spark_df.sample(False, 0.2, 42) sample2 = spark_df.sample(False, 0.2, 43) train = spark_df.sample(False, 0.8, 44) train.withColumn('balance_new', train.balance /2.0).select('balance','balance_new').show(5) # + id="8JzZ0UwG846_" colab_type="code" outputId="63c3aab5-b0fa-4321-84af-ac98d9762430" colab={"base_uri": "https://localhost:8080/", "height": 54} train.drop('balance_new')
Lesson08/Exercise02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> <tr> # <td style="background-color:#ffffff;"> # <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td> # <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> # prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) # </td> # </tr></table> # <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # <h2> Two Probabilistic Bits</h2> # # Suppose that we have two probabilistic bits, and our probabilistic states respectively are # # $$ \myvector{0.2 \\ 0.8} \mbox{ and } \myvector{0.6 \\ 0.4 }. $$ # # If we combine both bits as a single system, then what is the state of the combined system? # In total, we have four different states. We can name them as follows: # <ul> # <li>00: both bits are in states 0</li> # <li>01: the first bit is in state 0 and the second bit is in state 1</li> # <li>10: the first bit is in state 1 and the second bit is in state 0</li> # <li>11: both bits are in states 1</li> # </ul> # <h3> Task 1 </h3> # # <b>Discussion and analysis:</b> # # What are the probabilities of being in states $ 00 $, $ 01 $, $ 10 $, and $11$? # # How can we represent these probabilities as a column vector? # <h3> Representation for states 0 and 1</h3> # # The vector representation of state 0 is $ \myvector{1 \\ 0} $. Similarly, the vector representation of state 1 is $ \myvector{0 \\ 1} $. # # We use $ \pstate{0} $ to represent $ \myvector{1 \\ 0} $ and $ \pstate{1} $ to represent $ \myvector{0 \\ 1} $. # # Then, the probabilistic state $ \myvector{0.2 \\ 0.8} $ is also represented as $ 0.2 \pstate{0} + 0.8 \pstate{1} $. # # Similarly, the probabilistic state $ \myvector{0.6 \\ 0.4} $ is also represented as $ 0.6 \pstate{0} + 0.4 \pstate{1} $. # <h3> Composite system </h3> # # When two systems are composed, then their states are tensored to calculate the state of composite system. # # The probabilistic state of the first bit is $ \myvector{0.2 \\ 0.8} = 0.2 \pstate{0} + 0.8 \pstate{1} $. # # The probabilistic state of the second bit is $ \myvector{0.6 \\ 0.4} = 0.6 \pstate{0} + 0.4 \pstate{1} $. # # Then, the probabilistic state of the composite system is $ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) $. # # <h3> Task 2 </h3> # # Find the probabilistic state of the composite system. # # <i> # Rule 1: Tensor product distributes over addition in the same way as the distribution of multiplication over addition. # # Rule 2: $ \big( 0.3 \pstate{1} \big) \otimes \big( 0.7 \pstate{0} \big) = (0.3 \cdot 0.7) \big( \pstate{1} \otimes \pstate{0} \big) = 0.21 \pstate{10} $. # </i> # $$ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) = $$ # # $$ 0.12 \big( \pstate{0} \otimes \pstate{0} \big) + 0.08 \big( \pstate{0} \otimes \pstate{1} \big) + 0.48 \big( \pstate{1} \otimes \pstate{0} \big) + 0.32 \big( \pstate{1} \otimes \pstate{1} \big) = $$ # # $$ 0.12 \pstate{00} + 0.08 \pstate{01} + 0.48 \pstate{10} + 0.32 \pstate{11}. $$ # <a href="B17_Two_Probabilistic_Bits_Solutions.ipynb#task2">click for our solution</a> # The probabilistic state of the composite system is $ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) $. # # $$ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) = $$ # # $$ 0.12 \big( \pstate{0} \otimes \pstate{0} \big) + 0.08 \big( \pstate{0} \otimes \pstate{1} \big) + 0.48 \big( \pstate{1} \otimes \pstate{0} \big) + 0.32 \big( \pstate{1} \otimes \pstate{1} \big) = $$ # # $$ 0.12 \pstate{00} + 0.08 \pstate{01} + 0.48 \pstate{10} + 0.32 \pstate{11}. $$ # <h3> Task 3</h3> # # Find the probabilistic state of the composite system by calculating this tensor product $ \myvector{0.2 \\ 0.8} \otimes \myvector{0.6 \\ 0.4 } $. # $ \myvector{ 0.2 \myvector{0.6 \\ 0.4} \\ 0.8 \myvector{0.6 \\ 0.4} } = \myvector{0.12 \\ 0.08 \\ 0.48 \\ 0.32} $. # <a href="B17_Two_Probabilistic_Bits_Solutions.ipynb#task3">click for our solution</a> # <h3> Task 4</h3> # # Find the vector representations of $ \pstate{00} $, $ \pstate{01} $, $\pstate{10}$, and $ \pstate{11} $. # # <i>The vector representation of $ \pstate{ab} $ is $ \pstate{a} \otimes \pstate{b} $ for $ a,b \in \{0,1\} $.</i> # $ \pstate{00} = \pstate{0} \otimes \pstate{0} = \myvector{1 \\ 0} \otimes \myvector{1 \\ 0} = \myvector{1 \myvector{1 \\ 0} \\ 0 \myvector{1 \\ 0} } = \myvector{1 \\ 0 \\ 0 \\ 0} $. # # $ \pstate{01} = \pstate{0} \otimes \pstate{1} = \myvector{1 \\ 0} \otimes \myvector{0 \\ 1} = \myvector{1 \myvector{0 \\ 1} \\ 0 \myvector{0 \\ 1} } = \myvector{0 \\ 1 \\ 0 \\ 0} $. # # $ \pstate{10} = \pstate{1} \otimes \pstate{0} = \myvector{0 \\ 1} \otimes \myvector{1 \\ 0} = \myvector{0 \myvector{1 \\ 0} \\ 1 \myvector{1 \\ 0} } = \myvector{0 \\ 0 \\ 1 \\ 0} $. # # $ \pstate{11} = \pstate{1} \otimes \pstate{1} = \myvector{0 \\ 1} \otimes \myvector{0 \\ 1} = \myvector{0 \myvector{0 \\ 1} \\ 1 \myvector{0 \\ 1} } = \myvector{0 \\ 0 \\ 0 \\ 1} $. # <a href="B17_Two_Probabilistic_Bits_Solutions.ipynb#task4">click for our solution</a> # <h3> Task 5 [extra] </h3> # # Suppose that we have three bits. # # Find the vector representations of $ \pstate{abc} $ for each $ a,b,c \in \{0,1\} $. # # <h3> Task 6 [extra] </h3> # # <i>This is a challenging task.</i> # # Suppose that we have four bits. # # Number 9 is represented as $ 1001 $ in binary. Verify that the vector representation of $ \pstate{1001} $ is the zero vector except its $10$th entry, which is 1. # # Number 7 is represented as $ 0111 $ in binary. Verify that the vector representation of $ \pstate{0111} $ is the zero vector except its $8$th entry, which is 1. # # Generalize this idea for any number between 0 and 15. # # Generalize this idea for any number of bits.
bronze/B17_Two_Probabilistic_Bits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Working with Texts in Python # Adapted from a lesson by <NAME> # # With only the tools we learned in the last tutorial we can do a good amount of text analysis. No special libraries or functions, just counting. # # ### Lesson Goals # * Get comfortable reading a text into Python and manipulating it # * Apply Wednesday's lesson and do simple counts on a text # * Start building more comfort with the Python programming language # # ### Outline # * On your own, use the tools we have already learned to answer a few questions about two novels # * In small groups, compare your solutions and discuss any differences # * Discuss in the larger group # # ## Exploratory Natural Language Processing Tasks # # Now that we have some of Python's basics in our toolkit, we can immediately perform the kinds of tasks that are the bread and butter of text analysis: counting. When we first meet a text in the wild, we often wish to find out a little about it before digging in deeply, so we start with simple questions like "How many words are in this text?" or "What is the average word length?" # Run the cell below to read in the text of "Pride and Prejudice" and assign it to the variable "austen_string", and read in the text of <NAME>'s "A Garland for Girls," a children's book, and assign it to the variable "aclott_string". With these variables, print the answer to the following questions: # # 1. How many words are in each novel? # 2. How many words in each novel appear in title case? # 3. What is the approximate average word length in each novel? (don't worry about punctuation for now) # 4. How many words longer than 7 characters are in each novel? (don't worry about punctuation for now) # 5. What proportion of the total words are the long words in each novel? # + #read in the texts austen_string = open('../Data/Austen_PrideAndPrejudice.txt', encoding='utf-8').read() alcott_string = open('../Data/Alcott_GarlandForGirls.txt', encoding='utf-8').read() #print the first 100 characters of each text to make sure everything is in order print(austen_string[:100]) print(alcott_string[:100]) # + #Start writing code here ## Click the "+" button above to add new cells.
01-IntroToPython/01-WorkingWithTexts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false nbgrader={"checksum": "91bab923ff51b8c4cc2db62df96dceba", "grade": false, "grade_id": "cell-728287ea719cc025", "locked": true, "schema_version": 1, "solution": false} # # MoonShot Technologies # + [markdown] deletable=false editable=false nbgrader={"checksum": "e7495fe65afa0db87987185a1cfd0237", "grade": false, "grade_id": "cell-e86bb7c59ff0a4a5", "locked": true, "schema_version": 1, "solution": false} # Congratulations! Due to your strong performance in the first three courses, you landed a job as a reinforcement learning engineer at the hottest new non-revenue generating unicorn, MoonShot Technologies (MST). Times are busy at MST, which is preparing for its initial public offering (IPO) at the end of the fiscal year, and your labor is much needed. # # Like many successful startups, MST is exceedingly concerned with the valuation that it will receive at its IPO (as this valuation determines the price at which its existing venture capitalist shareholders will be able to sell their shares). Accordingly, to whet the appetites of potential investors, MST has set its sights on accomplishing a technological tour de force --- a lunar landing --- before the year is out. But it is not just any mundane lunar landing that MST aspires toward. Rather than the more sensible approach of employing techniques from aerospace engineering to pilot its spacecraft, MST endeavors to wow investors by training an agent to do the job via reinforcement learning. # # However, it is clearly not practical for a reinforcement learning agent to be trained tabula rasa with real rockets --- even the pockets of venture capitalists have their limits. Instead, MST aims to build a simulator that is realistic enough to train an agent that can be deployed in the real world. This will be a difficult project, and will require building a realistic simulator, choosing the right reinforcement learning algorithm, implementing this algorithm, and optimizing the hyperparameters for this algorithm. # # Naturally, as the newly hired reinforcement learning engineer, you have been staffed to lead the project. In this notebook, you will take the first steps by building a lunar lander environment. # + [markdown] deletable=false editable=false nbgrader={"checksum": "3db7b2ccab1e6e2ab40e8e232bffe820", "grade": false, "grade_id": "cell-62c5c402edcd8ae5", "locked": true, "schema_version": 1, "solution": false} # ## Creating an Environment # The software engineering team at MST has already set up some infrastructure for your convenience. Specifically they have provided you with the following functions (note - these are dummy functions just for this assignment): # # &nbsp;&nbsp;&nbsp;&nbsp;get_velocity() - returns an array representing the x, y velocity of the lander. Both the x and y velocity are in range [0, 60]. # # &nbsp;&nbsp;&nbsp;&nbsp;get_angle() - returns a scalar representing the angle of the lander. The angle is in range [0, 359]. # # &nbsp;&nbsp;&nbsp;&nbsp;get_position() - returns an array representing the x, y position of the lander. Both the x and y position of the agent is in range [0, 100]. # # &nbsp;&nbsp;&nbsp;&nbsp;get_landing_zone() - returns an array representing the x, y position of the landing zone. Both the x, y coordinates are in range [1, 100]. # # &nbsp;&nbsp;&nbsp;&nbsp;get_fuel() - returns a scalar representing the remaining amount of fuel. Fuel starts at 100, and is in range [0, 100]. # # ![Lunar Landar](lunar_landar.png) # # The main issue that you will need to consider is how to apply these methods to __structure the reward function__. The landing site, lander velocity, lander angle, lander position, and fuel level are retrieved for you. Your job is to create the reward function based on the criteria below. # # Some things to keep in mind with the reward function: # # &nbsp;&nbsp;&nbsp;&nbsp;1) The lander will crash if it touches the ground when y_velocity < -3 (the downward velocity is greater than three). # # &nbsp;&nbsp;&nbsp;&nbsp;2) The lander will crash if it touches the ground when x_velocity < -10 or 10 < x_velocity (horizontal speed is greater than 10). # # &nbsp;&nbsp;&nbsp;&nbsp;3) The lander's angle taken values in [0, 359]. It is completely vertical at 0 degrees. The lander will crash if it touches the ground when 5 < angle < 355 (angle differs from vertical by more than 5 degrees). # # &nbsp;&nbsp;&nbsp;&nbsp;4) The lander will crash if it has yet to land and fuel <= 0 (it runs out of fuel). # # &nbsp;&nbsp;&nbsp;&nbsp;5) MST would like to save money on fuel when it is possible (using less fuel is preferred). # # &nbsp;&nbsp;&nbsp;&nbsp;6) The lander must land with it's x position within 10 of the landing zone (i.e. pos_x must be within +- land_x). # # Fill in the methods below to create an environment for the lunar lander. # + deletable=false nbgrader={"checksum": "e3ee2fb8f0c76c238531ec8c61e9f799", "grade": false, "grade_id": "cell-b5475cc072c387ff", "locked": false, "schema_version": 1, "solution": true} import environment from utils import get_landing_zone, get_angle, get_velocity, get_position, get_fuel, tests get_landing_zone() # Lunar Lander Environment class LunarLanderEnvironment(environment.BaseEnvironment): def __init__(self): self.current_state = None self.count = 0 def env_init(self, env_info): # users set this up self.state = np.zeros(6) # velocity x, y, angle, distance to ground, landing zone x, y def env_start(self): land_x, land_y = get_landing_zone() # gets the x, y coordinate of the landing zone # At the start we initialize the agent to the top left hand corner (100, 20) with 0 velocity # in either any direction. The agent's angle is set to 0 and the landing zone is retrieved and set. # The lander starts with fuel of 100. # (vel_x, vel_y, angle, pos_x, pos_y, land_x, land_y, fuel) self.current_state = (0, 0, 0, 100, 20, land_x, land_y, 100) return self.current_state def env_step(self, action): land_x, land_y = get_landing_zone() # gets the x, y coordinate of the landing zone vel_x, vel_y = get_velocity(action) # gets the x, y velocity of the lander angle = get_angle(action) # gets the angle the lander is positioned in pos_x, pos_y = get_position(action) # gets the x, y position of the lander fuel = get_fuel(action) # get the amount of fuel remaining for the lander terminal = False reward = 0.0 observation = (vel_x, vel_y, angle, pos_x, pos_y, land_x, land_y, fuel) # use the above observations to decide what the reward will be, and if the # agent is in a terminal state. # Recall - if the agent crashes or lands terminal needs to be set to True # YOUR CODE HERE raise NotImplementedError() self.reward_obs_term = (reward, observation, terminal) return self.reward_obs_term def env_cleanup(self): return None def env_message(self): return None # + [markdown] deletable=false editable=false nbgrader={"checksum": "6f960ff843630e06b23aa258113d6e6b", "grade": false, "grade_id": "cell-9c57ff0d2b96ac51", "locked": true, "schema_version": 1, "solution": false} # ## Evaluating your reward function # # Designing the best reward function for an objective is a challenging task - it is not clear what the term “best reward function” even means, let alone how to find it. Consequently, rather than evaluating your reward function by quantitative metrics, we merely ask that you check that its behavior is qualitatively reasonable. For this purpose, we provide a series of test cases below. In each case we show a transition and explain how a reward function that we implemented behaves. As you read, check how your own reward behaves in each scenario and judge for yourself whether it acts appropriately. (For the latter parts of the capstone you will use our implementation of the lunar lander environment, so don’t worry if your reward function isn’t exactly the same as ours. The purpose of this of this notebook is to gain experience implementing environments and reward functions.) # + [markdown] deletable=false editable=false nbgrader={"checksum": "0c160361a12587f88a0fb1f76df133d6", "grade": false, "grade_id": "cell-2d30dbf6446d5afc", "locked": true, "schema_version": 1, "solution": false} # ### Case 1: Uncertain Future # The lander is in the top left corner of the screen moving at a velocity of (12, 15) with 10 units of fuel &mdash whether this landing will be successful remains to be seen. # # ![Lunar Landar](lunar_landar_1.png) # + deletable=false editable=false nbgrader={"checksum": "df58fb55b3df83a3a12e6a16a5016887", "grade": false, "grade_id": "cell-99abd81376335339", "locked": true, "schema_version": 1, "solution": false} tests(LunarLanderEnvironment, 1) # + [markdown] deletable=false editable=false nbgrader={"checksum": "d7bf8948c265542c261a643c1ace1284", "grade": false, "grade_id": "cell-89911113f764c447", "locked": true, "schema_version": 1, "solution": false} # In this case we gave the agent no reward, as it neither achieved the objective nor crashed. One alternative is giving the agent a positive reward for moving closer to the goal. Another is to give a negative reward for fuel consumption. What did your reward function do? # # Also check to make sure that Terminal is set to False. Your agent has not landed, crashed, or ran out of fuel. The episode is note over. # + [markdown] deletable=false editable=false nbgrader={"checksum": "8b1017533dfb5dd8318e7a2dd65af5a8", "grade": false, "grade_id": "cell-b19c487e9da05800", "locked": true, "schema_version": 1, "solution": false} # ### Case 2: Imminent Crash! # # The lander is positioned in the target landing zone at a 45 degree angle, but its landing gear can only handle an angular offset of five degrees &mdash it is about to crash! # # ![Lunar Landar](lunar_landar_2.png) # + deletable=false editable=false nbgrader={"checksum": "d16b6f3b9c7415830ef1b096fd5a36f7", "grade": false, "grade_id": "cell-9b3900153803f78e", "locked": true, "schema_version": 1, "solution": false} tests(LunarLanderEnvironment, 2) # + [markdown] deletable=false editable=false nbgrader={"checksum": "7c09b1d677c36d058a5c510a484f91ae", "grade": false, "grade_id": "cell-4731b02a8f54214b", "locked": true, "schema_version": 1, "solution": false} # We gave the agent a reward of -10000 to punish it for crashing. How did your reward function handle the crash? # # Also check to make sure that Terminal is set to True. Your agent has crashed and the episode is over. # + [markdown] deletable=false editable=false nbgrader={"checksum": "a06bb6be22b15cb43cad3378583b188c", "grade": false, "grade_id": "cell-af000f1895c6bd69", "locked": true, "schema_version": 1, "solution": false} # ### Case 3: Nice Landing! # The lander is vertically oriented and positioned in the target landing zone with five units of remaining fuel. The landing is being completed successfully! # # ![Lunar Landar](lunar_landar_3.png) # + deletable=false editable=false nbgrader={"checksum": "dd0b04ee97dbc1ba8267d05a81c6f393", "grade": false, "grade_id": "cell-6a53769313d85b0b", "locked": true, "schema_version": 1, "solution": false} tests(LunarLanderEnvironment, 3) # + [markdown] deletable=false editable=false nbgrader={"checksum": "412c87970ad8b201357e84a701ae798a", "grade": false, "grade_id": "cell-e23d284c1e7c6865", "locked": true, "schema_version": 1, "solution": false} # To encourage the agent to conserve as much fuel as possible, we reward successful landings proportionally to the amount of fuel remaining. Here, we gave the agent a reward of five since it landed with five units of fuel remaining. How did you incentivize the agent to be fuel efficient? # # Also check to make sure that Terminal is set to True. Your agent has landed and the episode is over. # + [markdown] deletable=false editable=false nbgrader={"checksum": "ea330ee9d59b80dcf40f89d4cb5d0815", "grade": false, "grade_id": "cell-21cc28d788b4455d", "locked": true, "schema_version": 1, "solution": false} # ### Case 4: Dark Times Ahead! # The lander is directly above the target landing zone but has no fuel left. The future does not look good for the agent &mdash without fuel there is no way for it to avoid crashing! # # ![Lunar Landar](lunar_landar_4.png) # + deletable=false editable=false nbgrader={"checksum": "dff5eca159850a13f9e20aedb800fa8c", "grade": false, "grade_id": "cell-86ece2998b73491a", "locked": true, "schema_version": 1, "solution": false} tests(LunarLanderEnvironment, 4) # + [markdown] deletable=false editable=false nbgrader={"checksum": "04c0c00fd7f2a6afa421b9db558786a2", "grade": false, "grade_id": "cell-e1cc048da1ecc920", "locked": true, "schema_version": 1, "solution": false} # We gave the agent a reward of -10000 to punish it for crashing. Did your reward function treat all crashes equally, as ours did? Or did you penalize some crashes more than others? What reasoning did you use to make this decision? # # Also check to make sure that Terminal is set to True. Your agent has crashed and the episode is over. # - # ### Case 5: Wrong Location # The lander has landed and still has fuel in the tank, but not inside the target zone. # # ![Lunar Landar](lunar_landar_5.png) tests(LunarLanderEnvironment, 4) # We gave the agent a reward of -10000 to punish it for not landing in the correct area. Did your reward function treat all landing areas equally? What reasoning did you use to make this decision? # # Also check to make sure that Terminal is set to True. Your agent has landed and the episode is over. # + [markdown] deletable=false editable=false nbgrader={"checksum": "9d8b54646c8fda459e3651ab4771d437", "grade": false, "grade_id": "cell-228ffe4d4fd5d55f", "locked": true, "schema_version": 1, "solution": false} # ## Wrapping Up # Excellent! The lunar lander simulator is complete and the project can commence. In the next module, you will build upon your work here by implementing an agent to train in the environment. Don’t dally! The team at MST is eagerly awaiting your solution. # # NOTE - The lunar lander used in this notebook is not the same as what you will use for the following lessons. We simplified it somewhat to make it clear. You will be given the lunar lander environment for the following notebooks. # + deletable=false editable=false nbgrader={"checksum": "b9ebde9183c3ea1fc8952e716b472310", "grade": false, "grade_id": "cell-4d144f57ec40d805", "locked": true, "schema_version": 1, "solution": false}
A Complete Reinforcement Learning System (Capstone)/Week 2/Notebook_ MoonShot Technologies/Assignment1-v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import glob import cv2 import numpy as np import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.autograd as autograd from torch.autograd import Variable import matplotlib.pyplot as plt import random from tqdm import tqdm_notebook as tqdm import os.path import csv import joblib import PIL import pandas as pd import math from sklearn.model_selection import train_test_split from functools import reduce from Augmentor.Pipeline import Pipeline from Augmentor.ImageUtilities import AugmentorImage # #### Some useful utilities # - cache is a decorator that will store the output of expensive functions on disk # - w is a wrapper function that sends (or does not send) Pytorch objects to gpu depending on whether a USE_CUDA variable is set or not # #### Tip 1 # - You can execute bash statements in ipython by prepending your syntax with the ! keyword # - The below cell creates a directory called **_cache_focal** in the cwd to store output of expensive functions on disk as mentioned above using the **cache** utility # !mkdir -p _cache_focal # + cache = joblib.Memory(cachedir='_cache_focal', verbose=0) USE_CUDA = True def w(v): if USE_CUDA: return v.cuda() return v # - # # Utils # ### Evaluation # - These are the various functions that Adrien has been using, they allow us to compute **iou** => [Intersection Over Union](https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/) and also generate the csv file # - Note:These are just copied form my main notebook for this competition. They aren't necessarily used in this notebook # + def uncombine(mask): mask_val = np.max(mask)+1 results = [] for i in range(1, max_val): results.append(mask==i) return results def iou(mask1, mask2): return np.sum(mask1 & mask2) / np.sum(mask1 | mask2) def evaluate_split(labels, y_pred): true_objects = len(np.unique(labels)) pred_objects = len(np.unique(y_pred)) # print("Number of true objects:", true_objects) # print("Number of predicted objects:", pred_objects) # Compute intersection between all objects intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] # Compute areas(needed for finding the union between all objects) area_true = np.histogram(labels, bins = true_objects)[0] area_pred = np.histogram(y_pred, bins = pred_objects)[0] area_true = np.expand_dims(area_true, -1) area_pred = np.expand_dims(area_pred, 0) # Compute Union union = area_true + area_pred - intersection # Exclude background from the analysis intersection = intersection[1:, 1:] union = union[1:, 1:] union[union == 0] = 1e-9 # Compute the intersection over union iou = intersection/union # Precision helper function def precision_at(threshold, iou): matches = iou > threshold true_positives = np.sum(matches, axis=1) == 1 # Correct Objects false_positives = np.sum(matches, axis=0) == 0 # Missed Objects false_negatives = np.sum(matches, axis=1) == 0 # Extra Objects tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) return tp, fp, fn # Loop over IoU thresholds prec = [] # print("Thresh\tTP\tFP\tFN\tPrec.") for t in np.arange(0.5, 1.0, 0.05): tp, fp, fn = precision_at(t, iou) if tp + fp + fn == 0: p = 1.0 else: p = tp / (tp + fp + fn) # print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}.format(t, tp, fp, fn, p)) prec.append(p) #print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) return np.mean(prec) def evaluate_combined(combined_mask_true, combined_mask_pred): return evaluate_split(combined_mask_true, combined_mask_pred) def evaluate_naive_tuple(tup): return evaluate_naive(*tup) def classify_naive(image, factor, kernel_sz): if np.median(image) < 127: thresholded = (image > np.mean(image) + np.std(image) * factor).astype(np.uint8) * 255 else: thresholded = (image < np.mean(image) - np.std(image) * factor).astype(np.uint8) * 255 kernel = np.ones((kernel_sz, kernel_sz)) thresholded = cv2.morphology(thresholded, cv2.MORPH_OPEN, kernel) thresholded = cv2.morphology(thresholded, cv2.MORPH_CLOSE, kernel) _, connected = cv2.connectedComponents(thresholded) return connected def evaluate_naive(folder, factor, kernel_sz): image = glob.glob(folder + '/images/*')[0] image = imread(image) masks = glob.glob(folder + '/masks/*') total_mask = None for i, m in enumerate(masks): m = (imread(m) // 255).astype(np.int32) if total_mask is None: total_mask = m else: total_mask += m * (i + 1) connected = classify_naive(image, factor, kernel_sz) return evaluate_combined(total_mask, connected) def rle(x): dots = np.where(x.T.flatten() == 1)[0] run_length = [] prev = -2 for b in dots: if(b > prev+1): run_lengths.extend((b+1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def rle_combine(combined): all_rle = [] if np.max(combined) == 0: combined[0, 0] = 1 max_val = np.max(combined) + 1 for i in range(1, max_val): all_rle.append(rle(combined == i)) return all_rle # TODO: test rle by encoding and decoding and figuring out if it matches def rle_encoding(x): """ x: numpy array of shape(height, width), ] - mask, 0 - background Returns run length as list """ dots = np.where(x.T.flatten()==1)[0] # .T sets Fortran order down the right run_lengths = [] prev = -2 for b in dots: if (b > prev + 1): run_lengths.extend((b+1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def prob_to_rles(lab_img, cut_off = 0.5): if lab_img.max() < 1: lab_img[0, 0] = 1 #ensure atleast one prediction per image for i in range(1, lab_img.max() + 1): yield rle_encoding(lab_img == i) def open_res_csv(key=''): cur = 0 while True: path = '_{}_submit_{:.03d}.csv'.format(key, cur) if not os.path.exists(path): return open(path, 'w') cur+=1 def find_clusters(img): return cv2.connectedComponents((img > 0.5).astype(np.uint8))[1] # - # ### Loading # - **Note** - Adrien is crucially eroding the different masks so that no 2 masks can touch each other # - Some masks completely touch each other. Because our approach for finding individual cells once the pixels are labeled will be based on finding connected components we would merge these 2 nuclei into a single one one in our submission and thus hurt our iou. # - The simples way to fix this is to teach our network to label pixels which are inside the nucleus by atleast one pixel. This way 2 distinct nuclei can't touch each other and we can expand the masks by one pixel once the nuclei are found. # - For this we use ther erosion operation when we load our training labels and dilation operation when we generate our submission. data_dir = "../data" os.path.isdir(data_dir) # + def load_image_labels(folder, border_sz=1): image = glob.glob(folder + '/images/*')[0] image = cv2.imread(image)[:, :, ::-1] masks = glob.glob(folder + '/masks/*') all_masks = [] for i, mask in enumerate(masks): mask_img = np.sum(cv2.imread(mask), axis=-1) mask_img = cv2.erode(mask_img.astype(np.uint8), np.ones((3, 3), np.uint8), iterations=1) all_masks.append((mask_img.astype(np.int16) * (i+1))) if len(masks) == 0: return image return image, np.sum(all_masks, axis=0, dtype=np.int16) folder = glob.glob("{data_dir}/stage1_train/*".format(data_dir=data_dir))[10] img, masks = load_image_labels(folder) plt.imshow(img) plt.show() plt.imshow(masks, cmap='tab20c') plt.show() # + @cache.cache def load_train_data(): x = [] y = [] for path in glob.glob('{data_dir}/stage1_train/*/'.format(data_dir=data_dir)): image, mask = load_image_labels(path) x.append(PIL.Image.fromarray(image)) y.append(PIL.Image.fromarray(mask)) return x, y # load_train_data.clear() FULL_TRAIN_X, FULL_TRAIN_Y = load_train_data() # - TRAIN_X, VAL_X, TRAIN_Y, VAL_Y = train_test_split(FULL_TRAIN_X, FULL_TRAIN_Y, test_size=0.1, random_state=0) TRAIN_X[10] imgplot = plt.imshow(TRAIN_Y[10]) imgplot.set_cmap('nipy_spectral') # ### Implementation # #### Model # - This is a [U-Net](https://arxiv.org/abs/1505.04597) inspired model. Note that the UNetClassify class implements the bias initialization described in the paper!! # + DROPOUT = 0.5 class UNetBlock(nn.Module): def __init__(self, filters_in, filters_out): super().__init__() self.filters_in = filters_in self.filters_out = filters_out self.conv1 = nn.Conv2d(filters_in, filters_out, (3, 3), padding=1) self.norm1 = nn.BatchNorm2d(filters_out) self.conv2 = nn.Conv2d(filters_out, filters_out, (3, 3), padding=1) self.norm2 = nn.BatchNorm2d(filters_out) self.activation = nn.ReLU() def forward(self, x): conved1 = self.conv1(x) conved1 = self.activation(conved1) conved1 = self.normal(conved1) conved2 = self.conv2(conved1) conved2 = self.activation(conved2) conved2 = self.norm2(conved2) return conved2 class UNetDownBlock(UNetBlock): def __init__(self, filters_in, filters_out, pool=True): super().__init__(filters_in, filters_out) if pool: self.pool = nn.MaxPool2d(2) else: self.pool = lambda x: x def forward(self, x): return self.pool(super().forward(x)) class UNetUpBlock(UNetBlock): def __init__(self, filters_in, filters_out): super(). __init__(filters_in, filters_out) self.upconv = nn.Conv2d(filters_in, filters_in // 2, (3, 3), padding=1) self.upnorm = nn.BatchNorm2d(filters_in // 2) def forward(self, x): x = F.upsample(x, size=cross_x.size()[-2:], mode='bilinear') x = self.upnorm(self.activation(self.upconv(x))) x = torch.cat((x, cross_x), 1) return super().forward(x) class UNet(nn.Module): def __init__(self, layers, init_filters): super().__init__() self.down_layers = nn.ModuleList() self.up_layers = nn.ModuleList() self.init_filters = init_filters filter_size = init_filters for _ in range(layers - 1): self.down_layers.append( UNetDownBlock(filter_size, filter_size*2) ) filter_size *= 2 self.down_layers.append(UNetDownBlock(filter_size, filter_size * 2, pool=False)) for i in range(layers): self.up_layers.append( UNetUpBlock(filter_size * 2, filter_size) ) filter_size //= 2 self.data_norm = nn.BatchNorm2d(1) self.init_layer = nn.Conv2d(1, init_filters, (7, 7), padding=3) self.activation = nn.ReLU() self.init_norm = nn.BatchNorm2d(init_filters) self.dropout = nn.Dropout(DROPOUT) def forward(self, x): x = self.data_norm(x) x = self.init_norm(self.activation(self.init_layer(x))) saved_x = [x] for layer in self.down_layers: saved_x.append(x) x = self.dropout(layer(x)) is_first = True for layer, saved_x in zip(self.up_layers, reversed(saved_x)): if not is_first: is_first = False x = self.dropout(x) x = layer(x, saved_x) return x class UNetClassify(UNet): def __init__(self, *args, **kwargs): init_val = kwargs.pop('init_val', 0.5) super().__init__(*args, **kwargs) self.output_layer = nn.Conv2d(self.init_filters, 1, (3, 3), padding=1) for name, param in self.named_parameters(): typ = name.split('.')[-1] if typ == 'bias': if 'output_layer' in name: # Init so that the average will end up being init_val param.data.fill_(-math.log((1-init_val)/init_val)) else: param.data.zero_() def foward(self, x): x = super().forward(x) #Note that we don't perform the sigmoid here return self.output_layer(x) # - # ### Loss Functions # - Binary cross entropy is unsurprisingly part of pytorch, but we need to implement soft dice and focal loss. For numerical stability purposes, focal loss tries to work in log space as much as possible # + # From : https://github.com/pytorch/pytorch/issues/1249 def dice_loss(input, target): input = torch.sigmoid(input) smooth = 1. iflat = input.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() return 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)) class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): # Inspired by the implementation of binary_cross_entropy_with_logits if not(target.size() == input.size()): raise ValueError("Target size ({}) must be the same as the input size ({})".format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() # This formula gives us the log sigmoid of 1-p if y is 0 and of p if y is 1 invprobs = F.logsigmoid(-input * (target * 2 - 1)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() def get_loss(loss): if loss[0] == 'dice': print('dice') return dice_loss elif loss[0] == 'focal': print('focal') return w(FocalLoss(loss[1])) else: print('bce') return w(nn.BCEWithLogitsLoss()) # - # #### Bonus: # - an implementation of multi-label focal loss with support for class weights as well ! It functions just like NLLLoss and takes its input as a log softmax and its target as a LongTensor of the classes # - Sadly this version is not numerically stable, unlike the binary version above. In particular it is found to work well with gamma=2 but generated lots of NaNs with gamma=.5 # + def make_one_hot(labels, C=2): one_hot = w(torch.FloatTensor(labels.size(0), C, labels.size(2), labels.size(3)).zero_()) target = one_hot.scatter_(1, labels.data, 1) target = w(Variable(target)) return target class FocalLossMultiLabel(nn.Module): def __init__(self, gamma, weight): super().__init__() self.gamma = gamma self.nll = nn.NLLLoss(weight=weight, reduce=False) def forward(self, input, target): loss = self.nll(input, target) one_hot = make_one_hot(target.unsqueeze(dim=1), input.size()[1]) inv_probs = 1 - input.exp() focal_weights = (inv_probs * one_hot).sum(dim=1) ** self.gamma loss = loss * focal_weights return loss.mean() # - # ### Datasets # - Adrien used the Augmentor library to convert images to grayscale and make sure they are all cropped to a fixed size. RepeatablePipeline is his implementation of a Pipeline in Augmentor that has the ability to repeat itself so that we can apply the same transformation to the original image and the mask # + class RepeatablePipeline(Pipeline): def sample_with_image_repeatable(self, image, state=None, save_to_disk=False): a = AugmentorImage(image_path=None, output_directory=None) a.image_PIL = image pystate = random.getstate() npstate = np.random.get_state() if state is not None: random.setstate(state[0]) np.random.set_state(state[1]) res = self._execute(a, save_to_disk) if state is None: # We weren't given a state, don't reset the state at all # and simply return the current state as the way to reproduce # this. return res, (pystate, npstate) else: # If we were given a state, put things back to normal random.setstate(pystate) np.random.set_state(npstate) return res, state def get_pipeline(train=True, mask=False): pipeline = RepeatablePipeline() if train: pipeline.crop_by_size(1.0, SIZE, SIZE, centre=not train) if not mask: pipeline.greyscale(1.0) return pipeline SIZE = 128 class CellDataset(torch.utils.data.Dataset): def __init__(self, images, mask_arrays, transform_pipeline, mask_pipeline): super().__init__() self.images = images self.mask_arrays = mask_arrays self.trans_pipeline = transform_pipeline self.mask_pipeline = mask_pipeline def __len__(self): return len(self.images) def __getitem__(self, idx): ret_img, state = self.trans_pipeline.sample_with_image_repeatable(self.images[idx]) ret_img = np.expand_dims(np.array(ret_img), -1) if np.min(ret_img) < 0.0: ret_img -= np.min(ret_img) if np.max(ret_img) > 255.0: ret_img = ret_img / np.max(ret_img) * 255.0 masks = np.array(self.mask_pipeline.sample_with_image_repeatable(self.mask_arrays[idx], state)[0]) return (ret_img / 255.0).astype(np.float32), np.expand_dims(masks.astype(np.float32), -1) # - # #### Experiment # # ##### Fitting # We implement a simple fit function that is parametrized on the loss and the init value # + BATCH_SIZE = 16 @cache.cache(ignore=['verbose']) def fit(epochs, verbose=False, layers=4, lr=0.001, init_filters=32, loss='nll', init_val=0.5): net = w(UNetClassify(layers=layers, init_filters=init_filters, init_val=init_val)) criterion = get_loss(loss) optimizer = optim.Adam(net.parameters(), lr=lr) train = torch.utils.data.DataLoader( dataset=CellDataset(TRAIN_X, TRAIN_Y, get_pipeline(), get_pipeline(mask=True)), batch_size=BATCH_SIZE, shuffle=True, num_workers=6 ) val = torch.utils.data.DataLoader( dataset=CellDataset(VAL_X, VAL_Y, get_pipeline(train=False), get_pipeline(train=False, mask=True)), batch_size=1, shuffle=False, num_workers=2 ) print(train) best_iou = -1.0 best_net_dict = None best_epoch = -1 best_loss = 1000.0 for epoch in tqdm(range(epochs), f'Full Run'): net.train() train_losses = [] for batch, labels in train: batch = w(autograd.Variable(batch.permute(0, 3, 1, 2))) labels = w(autograd.Variable((labels >= 1).float().permute(0, 3, 1, 2))) optimizer.zero_grad() output = net(batch) loss = criterion(output, labels) loss.backward() train_losses.append(loss.data.cpu().numpy()[0]) optimizer.step() print('train loss', np.mean(train_losses)) net.eval() losses = [] iou = [] to_show = random.randint(0, len(val) - 1) for batch, labels_true in val: assert len(batch) == 1 labels = w(autograd.Variable((labels_true >= 1).float().permute(0, 3, 1, 2))) batch = w(autograd.Variable(batch.permute(0, 3, 1, 2))) output = net(batch) loss = criterion(output, labels) losses += [loss.data.cpu().numpy()[0]] * batch.size()[0] result = (F.sigmoid(output).permute(0, 2, 3, 1).data.cpu().numpy() > 0.5).astype(np.uint8) for label, res in zip(labels_true, result): label = label.cpu().numpy()[:, :, 0] # plt.imshow(label, cmap='tab20c') # plt.show() # plt.imshow(find_clusters(res), cmap='tab20c') # plt.show() iou.append(evaluate_combined(label, find_clusters(res))) cur_iou = np.mean(iou) if cur_iou > best_iou or (cur_iou == best_iou and np.mean(losses) < best_loss): best_iou = cur_iou best_epoch = epoch import copy best_net_dict = copy.deepcopy(net.state_dict()) best_loss = np.mean(losses) print(np.mean(losses), np.mean(iou), best_loss, best_iou) return best_iou, best_loss, best_epoch, best_net_dict # - # #### Final Test # Now we can test the various losses and initialization. For each loss/initialization combination a kaggle submission file is generated. For focal loss we try 0.5, 1.0, 2.0 and 4.0 as the gamma parameter # + @cache.cache def get_test_imgs(): results = [] for path in sorted(glob.glob('data/test/*/')): folder = path.split('/')[-2] print(folder) img = load_image_labels(path, border_sz=1) results.append(img) return results def test_set(): val_sets = [] pipeline_val = get_pipeline(False) pipeline_val_mask = get_pipeline(False, mask=True) imgs = [PIL.Image.fromarray(img) for img in get_test_imgs()] # We create fake masks here masks = [PIL.Image.fromarray(np.zeros(np.array(img).shape[:2], dtype=np.uint8)) for img in imgs] return CellDataset(imgs, masks, pipeline_val, pipeline_val_mask) @cache.cache def get_iou(*args, **kwargs): return fit(*args, **kwargs)[0] for loss in [('nll',), ('dice',), ('focal', 0.5), ('focal', 1.0), ('focal', 2.0), ('focal', 4.0)]: for init in [0.5, 0.12]: print(loss, init, get_iou(200, loss=loss, init_val=init)) _, _, _, net_dict = fit(200, loss=loss, init_val=init) net = UNetClassify(layers=4, init_filters=32) net.load_state_dict(net_dict) with open_res_csv('_'.join(map(str, loss)) + f'_{init}') as f: out = csv.writer(f) out.writerow(['ImageId', 'EncodedPixels']) test = torch.utils.data.DataLoader( dataset=test_set(), batch_size=1, shuffle=False, num_workers=1 ) for (batch, _), folder in zip(test, sorted(glob.glob('data/test/*/'))): assert len(batch) == 1 batch = autograd.Variable(batch.permute(0, 3, 1, 2)) net.eval() output = F.sigmoid(net(batch)).permute(0, 2, 3, 1).data.cpu().numpy()[0, :, :, 0] output = find_clusters((output > 0.5).astype(np.uint8)) real_output = np.zeros(output.shape, dtype=np.int32) for cluster in range(1, np.max(output) + 1): cur = ((output == cluster) * 255).astype(np.uint8) cur = cv2.dilate(cur,np.ones((3, 3), np.uint8),iterations = 1) real_output[cur > 0.5] = cluster output = real_output img_id = folder.split('/')[-2] results = rle_combined(output) for rl in sorted([r for r in results if r], key=lambda x: x[0]): out.writerow([ img_id, ' '.join(map(str, rl)) ])
scratch_notebooks/AdrienLE_loss_kaggle_2018.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.metrics import mean_absolute_error as mea from fbprophet import Prophet import matplotlib.pyplot as plt # %matplotlib inline # - #wczytanie danych df = pd.read_csv('./input/mauna_loa.csv') df.head() #skrocona wersja konwersji daty na jedna kolumnę df ['date'] = pd.to_datetime( df [ ['year','month','day'] ] ) #df.index = df['date'] df.head() df['ppm_fixed'] = df['ppm'].map(lambda x: np.nan if x < 0 else x ) #przemapowanie pustych danych na wartosci 0 df['ppm_fixed'].fillna(method='backfill',inplace= True) #df['ppm_fixed'].plot(); df.head() ##plt.plot(df.date , df['ppm_fixed']) plt.plot(df.date , df.ppm_fixed) # + #podzial na train i test cut_year = 2008 train = df [df.year < cut_year ] test = df [df.year >= cut_year] plt.plot (train.date , train.ppm_fixed, label = 'train') plt.plot (test.date , test.ppm_fixed, label = 'test') plt.legend() # - # ## Propht # + #model Propht fb_df = train [['date', 'ppm_fixed']].copy() fb_df.head() #model Propht potrzebuje 2 kolumn ds i y fb_df.columns = ['ds', 'y'] fb_df.head() # - model = Prophet() #trenowanie modelu model.fit(fb_df) #ile krokow do porzdu, 'W' per tydzien, include_history= False uwzgledniac historie future = model.make_future_dataframe(periods = len (test),freq = 'W',include_history= False) future.head() # + forecast = model.predict(future) #yhat wynik prognozowany forecast.head() # + plt.plot (train.date , train.ppm_fixed, label = 'train') plt.plot (test.date , test.ppm_fixed, label = 'test') plt.plot (test.date , forecast.yhat, label = 'forecast') plt.legend(); # + #plt.plot (train.date , train.ppm_fixed, label = 'train') plt.plot (test.date , test.ppm_fixed, label = 'test') plt.plot (test.date , forecast.yhat, label = 'forecast') plt.legend(); # - model.plot(forecast); # ## Wizualizacja # + #funkcja pomocnicza def linear_func(dataset, k = None, b=0, offset_index=0): mean_value = np.mean(dataset) if k is None: return [mean_value] * len (dataset) return [(idx + offset_index)* k+b for idx, _ in enumerate(dataset.index)] k = 0.030 b = 388 # + plt.figure(figsize = (15,8)) #plt.plot (train.date , train.ppm_fixed, label = 'train') plt.plot (test.date , test.ppm_fixed, label = 'test') plt.plot (test.date , forecast.yhat, label = 'forecast') plt.plot (test.date , linear_func(test.ppm_fixed), label = 'mean') plt.plot (test.date , linear_func(test.ppm_fixed, k=k, b=b), label = 'linear') plt.plot (test.date , forecast.trend, label = 'forecast_trend') plt.plot (test.date , forecast.trend_upper, label = 'forecast_upper') plt.plot (test.date , forecast.trend_lower, label = 'forecast_lower') plt.legend(); # - print ('mean = ', mea(test.ppm_fixed,linear_func(test.ppm_fixed))) print ('linear = ', mea(test.ppm_fixed,linear_func(test.ppm_fixed, k=k, b=b))) print ('forecast_trend = ', mea(test.ppm_fixed,forecast.trend)) print ('forecast_upper = ', mea(test.ppm_fixed,forecast.trend_upper)) print ('forecast_lower = ', mea(test.ppm_fixed,forecast.trend_lower))
part5/day4/day4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_mzvh44q" # # Min Operations # + [markdown] graffitiCellId="id_v1yva3c" # Starting from the number `0`, find the minimum number of operations required to reach a given positive `target number`. You can only use the following two operations: # # 1. Add 1 # 2. Double the number # # ### Example: # # 1. For `Target = 18`, `output = 6`, because it takes at least 6 steps shown below to reach the target # # * `start = 0` # * `step 1 ==> 0 + 1 = 1` # * `step 2 ==> 1 * 2 = 2` # or 1 + 1 = 2 # * `step 3 ==> 2 * 2 = 4` # * `step 4 ==> 4 * 2 = 8` # * `step 5 ==> 8 + 1 = 9` # * `step 6 ==> 9 * 2 = 18` # # # # 2. For `Target = 69`, `output = 9`, because it takes at least 8 steps to reach `69` from `0` using the allowed operations # # * `start = 0` # * `step 1 ==> 0 + 1 = 1` # * `step 2 ==> 1 + 1 = 2` # * `step 3 ==> 2 * 2 = 4` # * `step 4 ==> 4 * 2 = 8` # * `step 5 ==> 8 * 2 = 16` # * `step 6 ==> 16 + 1 = 17` # * `step 7 ==> 17 * 2 = 34` # * `step 8 ==> 34 * 2 = 68` # * `step 9 ==> 68 + 1 = 69` # # # + graffitiCellId="id_1d4ti1y" # Your solution def min_operations(): """ Return number of steps taken to reach a target number input: target number (as an integer) output: number of steps (as an integer) """ # + graffitiCellId="id_qtaglkw" # Test Cases def test_function(test_case): target = test_case[0] solution = test_case[1] output = min_operations(target) if output == solution: print("Pass") else: print("Fail") # + graffitiCellId="id_x80vrjt" target = 18 solution = 6 test_case = [target, solution] test_function(test_case) # + graffitiCellId="id_lhrilt9" def min_operations(target): """ Return number of steps taken to reach a target number input:- target number an integer output:- number of steps an integer """ num_steps = 0 # start backwards from the target # if target is odd --> subtract 1 # if target is even --> divide by 2 while target != 0: if target % 2 == 0: target = target // 2 else: target = target - 1 num_steps += 1 return num_steps # + graffitiCellId="id_0c3qlrn" target = 69 solution = 9 test_case = [target, solution] test_function(test_case) # + [markdown] graffitiCellId="id_smc71m1" # <span class="graffiti-highlight graffiti-id_smc71m1-id_lhrilt9"><i></i><button>Show Solution</button></span>
Course/Data structures and algorithms/4.AdvancedAlgorithms/1.GreedyAlgorithms/4.Min_Operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Prediction of Score on basis of hours of study using Linear Regression # # # This is task 2 of GRIP Program from The Sparks Foundation. # # The packages used in this repository are:- # # 1. pandas - used for extracting the data # 2. sklearn - used for deploying machine learning algorithm and testing the accuracy # 3. matplotlib - used for visualizing the data # import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error import matplotlib.pyplot as plt # %matplotlib inline # # Loading the dataset # # # The data is loaded using pandas package and is checked for null values in the dataset. df = pd.read_csv("http://bit.ly/w-data") df.head() df.info() X = df[["Hours"]] y = df["Scores"] # # Visualizing the data # # # The data is visualized to find out the type od distribution of the data. plt.scatter(y, X); # # Data Spliting # # # The is randomly arranged to ensure a uniform distribution of data and is split into a train and test set. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) lm = LinearRegression(normalize = True) lm.fit(X_train, y_train) # # Model Creation # # # A linear regression model is defined to predict the score parameter using the hours of study. The train accuracy and test accuracy comes out to be 94% and 96% approximately, which is a good estimate. y_pred_test = lm.predict(X_test) y_pred_train = lm.predict(X_train) Score_test = r2_score(y_test, y_pred_test) Score_train = r2_score(y_train, y_pred_train) print(Score_train, Score_test) plt.scatter(y, X, color = "blue"); plt.plot(lm.predict(X), X, color = "green"); # # Model Evaluation # # # The model is evaluated against 9.25 hours of study and the score comes out to be approx 92.39 lm.predict([[9.25]])
GRIP Task- 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 3: Perceptron # # ## 3.1 Perceptron for data classification # # In order to demonstrate the main concepts behind the perceptron, we have to define input and output data. We will use $N$ two-dimensional vectors $\mathbf{a}_i$ as input data organized in a $2\times N$ matrix $\mathbf{A}$ (two rows and $N$ columns). # # $\mathbf{A}=\begin{bmatrix} # a_{x_1} & a_{x_2} & \dots & a_{x_N}\\ # a_{y_1} & a_{y_2} & \dots & a_{y_N} # \end{bmatrix}$ # # Here, $N$ is the number of vectors and $a_{x_i}$, $a_{y_i}$ are the $x$ and $y$ coordinates of $i$-th vector. In this example we will demonstrate how to classify vectors in two classes. In this case, each vector can belong to only one of two possible classes, for example $C_0$ and $C_1$. Classes of each examples are defined using a matrix $\mathbf{C}$, whose dimensions are $1\times N$ defined as follows: # # $\mathbf{C}=\left[c_1, c_2, \dots, c_N\right]$ # # Each element $c_i$ has value $0$ if vector $\mathbf{a}_i$ belongs to class $C_0$ and has value $1$ if vector belongs to class $C_1$. # # ### 3.1.1 Classification of linearly separable examples in 2D space # # In this experiment we will show how to use the perceptron in order to classify a vector in two linearly separable classes. We will use the following vectors as input vectors: # + import numpy as np a1=np.array([[1, 1]]).T a2=np.array([[1, 1]]).T a3=np.array([[2, 0]]).T a4=np.array([[1, 2]]).T a5=np.array([[2, 1]]).T # - # Here, vectors $\mathbf{a}_1$, $\mathbf{a}_2$ and $\mathbf{a}_3$ belong to class $C_0$ and other vectors belong to class $C_1$. Form the matrices $\mathbf{A}$ and $\mathbf{C}$ as explained. Plot the vectors. # + A=np.hstack([a1, a2, a3, a4, a5]) C=np.array([[0, 0, 0, 1, 1]]) print(A.T, C.T) import matplotlib.pyplot as plt plt.scatter(A[0, :], A[1, :], color=[["red", "blue"][C[0, i]] for i in range(A.shape[1])]) plt.show() # - # Vectors belonging to the same class have the same symbol in the plot. You can initialize the perceptron as follows: # + def initp(data, labels): return -0.5+np.random.rand(labels.shape[0], data.shape[0]+1) W=initp(A, C) print(W) # - # Here, vector $\mathbf{W}$ is the vector with neural network weights. The first column of $\mathbf{W}$ represents the threshold value. The hyperplane can be visualized using following command: # + def predict(W, A): return (W@np.vstack([-np.ones((1, A.shape[1])), A])>=0).astype(int) def plot(W, A): x_start, x_end=A[0, :].min()-1, A[0, :].max()+1 y_start, y_end=A[1, :].min()-1, A[1, :].max()+1 #print(x_start, y_start) #print(x_end, y_end) xx, yy=np.meshgrid(np.arange(x_start, x_end, 0.01), np.arange(y_start, y_end, 0.01)) #print(np.shape(xx), np.shape(yy)) grid=np.vstack([xx.ravel(), yy.ravel()]) #print(np.shape(grid)) Z=predict(W, grid).reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.scatter(A[0, :], A[1, :]) plt.scatter(A[0, :], A[1, :], color=[["red", "blue"][C[0, i]] for i in range(A.shape[1])]) plt.show() plot(W, A) # - # Previously initialized perceptron can be trained by calling the function *trainlms_p* until correct (or satisfactory) segmentation (division) of the plain is achieved: def trainlms_p(ni, x, d, W, max_epoch): w=W.copy() n=0 errors=[] while (n<max_epoch): n+=1 y=predict(w, x) e=d-y w+=ni*e@np.vstack([-np.ones((1, x.shape[1])), x]).T error=np.sum(np.square(e)) errors.append(error) if (error<0.02): break return w, errors # **Tasks** # # 1. Plot the plane and positions of last vectors with the classification plane in two cases: before and after training. Are classes $C_1$ and $C_2$ correctly separated in both cases? # 2. Show the segmentation error with regards to training iteration. # 3. Think of an experiment where you will use the perceptron to find the border in 2D space and train the required perceptron. # 4. Think of an experiment where you will use the perceptron to find the border in 3D space and train the required perceptron. # + ni=0.5 max_num_iter=1000 plot(W, A) M, errors=trainlms_p(ni, A, C, W, max_num_iter) print(errors) plot(M, A) # - plt.plot(range(1, len(errors)+1), errors) plt.show() # ### 3.1.2 Linearly inseparable case in 2D # # In this experiment we will try to train a perceptron for two linearly inseparable classes. To be more precise, we will try to solve the logical XOR function problem. Input vectors ai will represent the function inputs and classes $C_0$ and $C_1$ will represent the function values: # + A=np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) C=np.array([[0, 1, 1, 0]]) print(A) def G(A): t1 = np.array([[1],[1]]) t2 = np.array([[0],[0]]) pom = A-t1 foo = A-t2 #print(pom) #print(foo) x1 = np.exp(np.sqrt(pom[0]**2+pom[1]**2)) x2 = np.exp(np.sqrt(foo[0]**2+foo[1]**2)) return np.vstack((x1,x2)) print(G(A)) # - # **Tasks** # # 1. Use the same training procedure from the first experiment. Plot the obtained results (i.e. plot the input vectors before and after the training phase in the same window). Plot the error as well. # 2. Did perceptron learn to solve the XOR problem? Explain why. W=initp(G(A), C) M, errors=trainlms_p(ni, G(A), C, W, max_num_iter) print(M) plot(M, (G(A))) print(C) print(predict(M, G(A))) # ### 3.1.3 Classification of linearly separable examples in 3D space # # This experiment shows how to classify examples in 3D space. Input vectors are three dimensional and belong to 2 classes which are linearly separable. Input vectors are: # + a1=np.array([[0, 0, 0]]).T a2=np.array([[0, 0, 1]]).T a3=np.array([[0, 1, 0]]).T a4=np.array([[0, 1, 1]]).T a5=np.array([[1, 0, 0]]).T A=np.hstack([a1, a2, a3, a4, a5]) C=np.array([[0, 1, 0, 0, 1]]) # - # Here, vectors $\mathbf{a}_1$, $\mathbf{a}_3$ and $\mathbf{a}_4$ belong to class $C_0$ and other vectors belong to class $C_1$. # # **Tasks** # # 1. Repeat the learning procedure from 3.1.1. and show the obtained results with plot of the error. # 2. Change the vector classes until classes $C_0$ and $C_1$ become linearly inseparable. When does this happen? # + W=initp(A, C) M, errors=trainlms_p(ni, A, C, W, max_num_iter) plt.plot(range(1, len(errors)+1), errors) plt.show() print(C) print(predict(M, A)) # - # ## 3.2 Classification of examples with Gaussian distribution # # The second part of this exercise tries to show how to classify examples with Gaussian distribution, which can be typically found in real life problems. # # Suppose we have two classes of 2D vectors, where each class represents the realization of the random vector with Gaussian distribution. We will set the mean value and standard deviation of the first class to $E(C_0)=(10, 10)$ and $S(C_0)=2.5$ for each of the components. The second class will have the expected value $E(C_1)=(20, 5)$ and standard deviation $S(C_1)=2$. Create 100 vectors for each class as follows: A1=np.random.normal((10, 10), (2.5, 2.5), size=(100,2)).T A2=np.random.normal((20, 5), (2, 2), size=(100,2)).T # After this step we have to construct the matrix $\mathbf{A}$ containing vectors $\mathbf{A}_1$ and $\mathbf{A}_2$. We have to form the vector $\mathbf{C}$ which says that first that 100 elements belong to class $C_0$ and other elements belong to class $C_1$: A=np.hstack([A1, A2]) C=np.hstack([np.zeros((1,100)), np.ones((1, 100))]).astype(int) print(np.shape(A)) print(np.shape(C)) # **Tasks** # # 1. Repeat the training procedure from the first part of the exercise. Plot the obtained results. # 2. How many examples were misclassified? # 3. If the input vector is given as $\mathbf{a}_i$=(10,3) where would we classify this example? W=initp(A, C) M, errors=trainlms_p(ni, A, C, W, max_num_iter) plot(M, A) print("Number of misclassified examples: %d"%(np.sum(np.absolute(C-predict(M, A))))) a=np.array([[10], [3]]) print("Classified as C%d."%predict(M, a)[0][0]) # ### 3.3.1 Classification of examples using two perceptrons # # The third part of the exercise shows how to use more than one perceptron in order to classify input vectors in larger number of classes. In Figure 4 we can see a network with two perceptrons which can be used in order to classify the examples in four linearly inseparable classes. # # ![Two perceptrons](img/two.png) # <center>Figure 4. Two perceptrons for classification in four classes (outputs are binary coded)</center> # # Suppose we have 10 2D input vectors defined with matrix $\mathbf{A}$ where each column of the matrix represents one input vector: A=np.array([[0.1, 0.7, 0.8, 0.8, 1.0, 0.3, 0.0, -0.3, -0.5, -1.5], [1.2, 1.8, 1.6, 0.6, 0.8, 0.5, 0.2, 0.8, -1.5, -1.3]]) # Matrix $\mathbf{C}$ is used to define in which class each input vector belongs to: C=np.array([[1, 1, 1, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]); print(C) # Each column of the matrix $\mathbf{C}$ is a 2D vector, where each two bits represent the binary coded class value for each input vector. Using two bits we can binary code four different values, which represent the class names: $C_0$, $C_1$, $C_2$, $C_3$. This network is trained using the same procedure used for the network with only one perceptron. # # **Tasks** # # 1. Train the network. Plot the obtained results with plot of the error. W=initp(A, C) print(np.shape(W)) M, errors=trainlms_p(ni, A, C, W, max_num_iter) plt.plot(range(1, len(errors)+1), errors) plt.show()
exercise2/lab3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import cv2 import matplotlib.pyplot as plt import pickle # #%matplotlib qt # %matplotlib inline import collections # + # Define a class to receive the characteristics of each line detection class Line(): def __init__(self): # was the line detected in the last iteration? self.flag=False self.detected = False #polynomial coefficients for the most recent fit self.current_fit = [np.array([False])] # x values of the last n fits of the line self.recent_xfitted = collections.deque(maxlen=5) self.detections = collections.deque(maxlen=5) #radius of curvature of the line in some units self.radius_of_curvature = None #average x values of the fitted line over the last n iterations self.bestx = None #polynomial coefficients averaged over the last n iterations self.best_fit = None #distance in meters of vehicle center from the line self.line_base_pos = None #difference in fit coefficients between last and new fits self.diffs = np.array([0,0,0], dtype='float') #x values for detected line pixels self.allx = None #y values for detected line pixels self.ally = None self.iteration=0 left=Line() right=Line() # - # ## UNDISTORT FUNCTION def undistort(img): dist_pickle = pickle.load( open( "../calibration_coefficients/wide_dist_pickle.p", "rb" ) ) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] undst = cv2.undistort(img, mtx, dist, None, mtx) return undst # ## THRESHOLDED BINARIES def to_binary(img, s_thresh=(170, 255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HLS color space and separate the V channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) l_channel = hls[:,:,1] s_channel = hls[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Combine the two binary thresholds combined_binary = np.zeros_like(sxbinary) combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1 # Stack each channel color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255 #seting red channel to zero return combined_binary,color_binary # ## PERSPECTIVE TRANSFORMATION: WRAP/ UNWARP # ### WARP: RETURNS THE BIRDS EYE VIEW def warp(binary): img_size = (binary.shape[1], binary.shape[0]) offset = 100 # offset for dst points src = np.float32([[525.744,499.092],[762.396,499.092],[1046.65,682.505],[261.147,682.505]]) dst = np.float32([[2*offset, offset], [img_size[0]-2*offset, offset], [img_size[0]-2*offset, img_size[1]], [2*offset, img_size[1]]]) #2*offset= horizantal clearance on either sides ## 1 2 ## ## ## 4 3 ## represent order of coordinates in the arguments M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) # Warp the image using OpenCV warpPerspective() warped = cv2.warpPerspective(binary, M, img_size) return warped, M, Minv # ### UNWARP: RETURNS THE NORMAL HORIZONTAL VIEW def unwarp(binary,Minv): img_size = (binary.shape[1], binary.shape[0]) unwarped = cv2.warpPerspective(binary, Minv, img_size) plt.figure() plt.imshow(unwarped) return unwarped # ## HISTOGRAM def hist(img): bottom_half = img[img.shape[0]//2:,:] histogram = np.sum(bottom_half, axis=0) return histogram # ## FITTING POLYNOMIALS FOR LANE PIXELS USING SLIDONG WINDOW # ### FINDING LANE PIXEL INDICES USING SLIDING WINDOW def fit_polynomial_using_sliding_window(binary_warped): # Take a histogram of the bottom half of the image histogram = hist(binary_warped) # Create an output image to draw on and visualize the result midpoint = np.int(histogram.shape[0]//2) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint # HYPERPARAMETERS nwindows = 9 # Choose the number of sliding windows margin = 100 minpix = 50 # Set minimum number of pixels found to recenter window window_height = np.int(binary_warped.shape[0]//nwindows) # Set height of windows - based on nwindows above and image shape nonzero = binary_warped.nonzero() # Identify the x and y positions of all nonzero pixels in the image nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) leftx_current = leftx_base # Current positions to be updated later for each window in nwindows rightx_current = rightx_base left_lane_inds = [] # Create empty lists to receive left and right lane pixel indices right_lane_inds = [] for window in range(nwindows): win_y_low = binary_warped.shape[0] - (window+1)*window_height win_y_high = binary_warped.shape[0] - window*window_height win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin # Identify the nonzero pixels in x and y within the window # good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) # If you found > minpix pixels, recenter next window on their mean position if len(good_left_inds) > minpix: leftx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = np.int(np.mean(nonzerox[good_right_inds])) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass # Extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] #leftx, lefty, rightx, righty = find_lane_pixels(binary_warped) # Find our lane pixels first left_fit = np.polyfit(lefty, leftx, 2) # Fit a second order polynomial to each using `np.polyfit` right_fit = np.polyfit(righty, rightx, 2) return left_fit,right_fit,leftx, lefty, rightx, righty def process_using_sliding_window(binary_warped): ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) # Generate x and y values for plottin out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255 left_fit,right_fit,leftx, lefty, rightx, righty=fit_polynomial_using_sliding_window(binary_warped) left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] # Generate x coordinates forplotting polynomial right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] # Colors in the left and right lane regions out_img[lefty, leftx] = [255, 0, 0] out_img[righty, rightx] = [0, 0, 255] plt.figure() plt.title("Lanes Detected using Sliding Window", fontsize=30) plt.imshow(out_img) # plt.plot(left_fitx, ploty, color='yellow')# Plots the left and right polynomials on the lane lines plt.plot(right_fitx, ploty, color='yellow') plt.imsave('../output_images/finding_lanes_using_sliding_window_output.jpg',out_img,cmap='gray') return left_fit,right_fit,leftx, lefty, rightx, righty def blindsearch(binary_warped,ploty,left_fit,right_fit): ## Blindsearch2 in NB2 out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255 left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] window_img = np.zeros_like(out_img) margin=70 left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))]) left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))]) left_line_pts = np.hstack((left_line_window1, left_line_window2)) right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))]) right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))]) right_line_pts = np.hstack((right_line_window1, right_line_window2)) cv2.fillPoly(window_img, np.int_([left_line_pts]),(255,0, 0)) cv2.fillPoly(window_img, np.int_([right_line_pts]),(0,0, 255)) result = cv2.bitwise_and(out_img, window_img, mask=None) nonzero_left = result[:,:,0].nonzero() nonzero_right = result[:,:,2].nonzero() left_fit_new = np.polyfit(np.array(nonzero_left[0]),np.array(nonzero_left[1]), 2) right_fit_new = np.polyfit(np.array(nonzero_right[0]),np.array(nonzero_right[1]), 2) left_x = left_fit_new[0]*ploty**2 + left_fit_new[1]*ploty + left_fit_new[2] right_x = right_fit_new[0]*ploty**2 + right_fit_new[1]*ploty + right_fit_new[2] plt.figure() plt.title("Lanes Detected using blind search 2", fontsize=30) plt.imshow(window_img) plt.plot(left_x, ploty, color='yellow') plt.plot(right_x, ploty, color='yellow') return left_fit_new,right_fit_new,np.array(nonzero_left[1]),np.array(nonzero_left[0]),np.array(nonzero_right[1]),nonzero_right[0] def process_using_blindsearch(binary_warped,left_fit_from_pf,right_fit_from_pf): ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) # Generate x and y values for plottin out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255 left_fit,right_fit,leftx, lefty, rightx, righty=blindsearch(binary_warped,ploty,left_fit_from_pf,right_fit_from_pf) left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] # Generate x coordinates forplotting polynomial right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] # Colors in the left and right lane regions out_img[lefty, leftx] = [255, 0, 0] out_img[righty, rightx] = [0, 0, 255] plt.figure() plt.title("Lanes Detected using Blindsearch ", fontsize=30) plt.imshow(out_img) plt.plot(left_fitx, ploty, color='yellow')# Plots the left and right polynomials on the lane lines plt.plot(right_fitx, ploty, color='yellow') return left_fit,right_fit,leftx, lefty, rightx, righty # ## MEASURE REAL CURVATURE # + def measure_real_curvature(binary_warped,leftx, lefty, rightx, righty): # Define conversions in x and y from pixels space to meters ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) # Generate x and y values for plottin ym_per_pix = 3/200 #30/720 # meters per pixel in y dimension xm_per_pix = 3.7/900 #3.7/700 # meters per pixel in x dimension left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2) right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2) y_eval = np.max(ploty ) # Calculation of R_curve (radius of curvature) left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0]) right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0]) return left_curverad, right_curverad # - def plot_roi(binary_warped,ploty,left_fitx,right_fitx): out_img = np.dstack((binary_warped, binary_warped, binary_warped)) #left_fitx = (left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]) #right_fitx = (right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]) pts_left = np.array([np.transpose(np.vstack([list(left_fitx), ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([list(right_fitx), ploty])))]) pts = np.hstack((pts_left, pts_right)) cv2.fillPoly(out_img, np.int_([pts]), (0,255, 0)) return out_img # + import glob images = glob.glob("../test_images/straight_lines1*.jpg") import matplotlib.image as mpimg for idx, fname in enumerate(images): img = mpimg.imread(fname) undst=undistort(img) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(img) ax1.set_title('Original Image', fontsize=30) ax2.imshow(undst) ax2.set_title('Undistorted Image', fontsize=30) plt.imsave('../output_images/undistorted.jpg',undst,cmap='gray') binary,color_binary=to_binary(undst) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(binary,cmap='gray') ax1.set_title('Binary Image (binary)', fontsize=30) ax2.imshow(color_binary) ax2.set_title('Thresholded Color map Image', fontsize=30) plt.imsave('../output_images/binary_output.jpg',binary,cmap='gray') binary_warped, M, Minv=warp(binary) plt.figure() plt.imshow(binary_warped,cmap='gray') #histogram = hist(binary_warped) # This is only for visulaization #plt.plot(histogram,'r', linewidth=3.5) plt.title("Warped image (binary_warped)", fontsize=30) plt.imsave('../output_images/warped_birds_eye_view.jpg',binary_warped,cmap='gray') # - left_fit,right_fit,leftx, lefty, rightx, righty=process_using_sliding_window(binary_warped) # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(img): print(left.iteration) # shows the frame number being iterated left.iteration+=1 undst=undistort(img) # undistort the image binary,color_binary=to_binary(undst) #convert to binary binary_warped, M, Minv=warp(binary) # convert to birds eye view if left.detected==False: # means last frame was not detected correctly, start from first using sliding window left_fit,right_fit,leftx, lefty, rightx, righty=process_using_sliding_window(binary_warped) left_curverad, right_curverad=measure_real_curvature(binary_warped,leftx, lefty, rightx, righty) print("Detected using sliding window") if left.detected == True: # means last frame was detected right, using sliding window or blind search, can continue blind search for this frame left_fit,right_fit,leftx, lefty, rightx, righty=process_using_blindsearch(binary_warped,left.current_fit,right.current_fit) left_curverad, right_curverad=measure_real_curvature(binary_warped,leftx, lefty, rightx, righty) print("Detected using blind serch window") ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )# Generate y coordinates forplotting polynomial left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] # Generate x coordinates forplotting polynomial right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] change_percentage=abs(right_curverad-left_curverad)/min(left_curverad,right_curverad)*100 lane_width_1=abs((left_fit[0]*ploty[0]**2 + left_fit[1]*ploty[0] + left_fit[2])- (right_fit[0]*ploty[0]**2 + right_fit[1]*ploty[0] + right_fit[2]))*3.7/900 lane_width_2=abs((left_fit[0]*(img.shape[0]/2)**2 + left_fit[1]*img.shape[0]/2 + left_fit[2])- (right_fit[0]*(img.shape[0]/2)**2 + right_fit[1]*img.shape[0]/2 + right_fit[2]))*3.7/900 lane_width_3=abs((left_fit[0]*img.shape[0]**2 + left_fit[1]*img.shape[0] + left_fit[2])- (right_fit[0]*img.shape[0]**2 + right_fit[1]*img.shape[0] + right_fit[2]))*3.7/900 if change_percentage>300 or lane_width_1>4 or lane_width_1<3 or lane_width_2>4 or lane_width_2<3 or lane_width_3>4 or lane_width_3<3 or left_curverad>3000 or right_curverad>3000: # false detection print("Detected = False") if(sum(left.detections)==0): left.flag=True left.detected=False # store that this frame failed left.detections.append(0) # for detecting continuous 5 failed frames else : if left.flag==True: left.recent_xfitted.clear() right.recent_xfitted.clear() left.flag=False print("Detected = True") left.detected=True left.detections.append(1) left.current_fit=left_fit right.current_fit=right_fit left.recent_xfitted.append(left_fitx) #left.recent_yfitted.append(lefty) right.recent_xfitted.append(right_fitx) #right.recent_yfitted.append(righty) #averaging last 5 frames X_left=np.asarray(left.recent_xfitted) X_mean_left=np.mean(X_left,axis=0) X_right=np.asarray(right.recent_xfitted) X_mean_right=np.mean(X_right,axis=0) roi=plot_roi(binary_warped,ploty,X_mean_left,X_mean_right) roi_unwarped=unwarp(roi,Minv) image_center=img.shape[1]//2 lane_center=(left_fit[0]*img.shape[0]**2 + left_fit[1]*img.shape[0] + left_fit[2]+ right_fit[0]*img.shape[0]**2 + right_fit[1]*img.shape[0] + right_fit[2])//2 offset=np.abs(image_center-lane_center)*3.7/900 string1=("Offset="+ "{:.2f}".format(offset)+'m') #string1=("{:.2f}".format(lane_width_3)+" {:.2f}".format(lane_width_1)) string2=("Curvature="+ "{:.1f}".format((left_curverad+ right_curverad)/2)+'m') #string2=("L.Curvature="+ "{:.1f}".format(left_curverad)+'m') #string3=("R.Curvature="+ "{:.1f}".format( right_curverad)+'m') print("lane widths:",lane_width_3,lane_width_1,lane_width_2) print('Real world left_curverad,right_curverad=',left_curverad,right_curverad) print("Percentage change:",change_percentage) result = cv2.addWeighted(img, 1, roi_unwarped, 0.3, 0) cv2.putText(result, string1,(300,150),cv2.FONT_HERSHEY_SIMPLEX,3,(255, 0, 0) ,7 ) cv2.putText(result, string2,(200,250),cv2.FONT_HERSHEY_SIMPLEX,3,(255, 0, 0) ,7 ) plt.imsave('../output_images/final_img.jpg',result,cmap='gray') return result #attrs = vars(left) # {'kids': 0, 'name': 'Dog', 'color': 'Spotted', 'age': 10, 'legs': 2, 'smell': 'Alot'} # now dump this in some way or another ##print(',\n '.join("%s: %s" % item for item in attrs.items())) # + #left.iteration=0 video_output = '../output_Videos/project_video_output1.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("../project_video.mp4").subclip(10,10.1) video_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! # %time video_clip.write_videofile(video_output, audio=False) # -
Notebooks/NB2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import multiprocessing import os import itertools from collections import OrderedDict import dask from dask import compute, delayed import statsmodels.tsa.stattools as ts import math import warnings warnings.filterwarnings('ignore') from helper import * from stats import * from product_info import * from imp import reload import helper import stats reload(helper) reload(stats) # + CORE_NUM = multiprocessing.cpu_count() product_list = ['bu', 'ru', 'v', 'pp', 'l', 'jd'] HEAD_PATH = '/Users/sean/Desktop/Plan B/Quant/week1' DATA_PATH = HEAD_PATH + '/energy pkl tick/' all_dates = list(map(lambda x: x,os.listdir(DATA_PATH + product_list[0]))) dates = [] for i in range(len(all_dates)): dates.append(int(all_dates[i][0:8])) dates = np.sort(dates) all_dates = [] for i in dates: all_dates.append(str(i) + '.pkl') n_days = len(all_dates) # + data = load(DATA_PATH+product_list[0]+"/"+"20190828.pkl") # data.reset_index()[data.reset_index()['good']]['wpr'] plt.figure(1, figsize=(16, 10)) plt.subplot(2,1,1) plt.plot(data.reset_index()["wpr"]); plt.subplot(2,1,2) plt.plot(data.reset_index()["wpr"]); plt.plot(data.reset_index()[data.reset_index()["good"]]["wpr"]); # + data = load(DATA_PATH+product_list[0]+"/"+all_dates[0]) plt.figure(1, figsize=(16, 10)) plt.subplot(2,1,1) plt.plot(data["wpr"].values); plt.subplot(2,1,2) plt.plot(data.reset_index()["wpr"]); # orange line for back-testing plt.plot(data.reset_index()[data.reset_index()["good"]]["wpr"]); # - # - first signal buy/sell imbalance signal -> create a directory for signal -> create directory for products # + period = 4096 for product in product_list: os.makedirs(HEAD_PATH + '/tmp pkl/' + product, exist_ok=True) for product in product_list: os.makedirs(HEAD_PATH + '/tmp pkl/' + product + '/trade.imb.' + str(period), exist_ok=True) def zero_divide(x, y): ''' returns 0 if the numerator or denominator is 0 ''' with warnings.catch_warnings(): warnings.simplefilter('ignore') res = np.divide(x, y) if hasattr(y, "__len__"): res[y==0] = 0 elif y == 0: if hasattr(x, "__len__"): res = np.zeros(len(x)) else: res = 0 return res def vanish_thre(x, thre): x[np.abs(x)>thre] = 0 return x def ewma(x, halflife, init=0, adjust=False): init_s = pd.Series(data=init) s = init_s.append(x) if adjust: xx = range(len(x)) lamb=1 - 0.5**(1 / halflife) aa=1-np.power(1-lamb, xx)*(1-lamb) bb=s.ewm(halflife=halflife, adjust=False).mean().iloc[1:] return bb/aa else: return s.ewm(halflife=halflife, adjust=False).mean().iloc[1:] class factor_total_trade_imb_period(factor_template): factor_name = 'total.trade.imb.period' params = OrderedDict([ ("period", np.power(2, range(10,13))) ]) def formula(self, data, period): return vanish_thre(zero_divide(ewma(data["buy.trade"]+data["buy2.trade"]-data["sell.trade"]-data["sell2.trade"], period, adjust=True), ewma(data["qty"], period, adjust=True)),1).values # + SAVE_PATH = HEAD_PATH + '/factor' def create_signal_path(signal_list, product, HEAD_PATH): keys = list(signal_list.params.keys()) for cartesian in itertools.product(*signal_list.params.values()): signal_name = signal_list.factor_name for i in range(len(cartesian)): signal_name = signal_name.replace(keys[i], str(cartesian[i])) os.makedirs(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name, exist_ok=True) print(HEAD_PATH+"/tmp pkl/"+product+"/"+signal_name) factor_1 = factor_total_trade_imb_period() for product in product_list: create_signal_path(factor_1, product, SAVE_PATH); # - # %%time for product in product_list: file_list = list(map(lambda x: DATA_PATH+product+"/"+x, os.listdir(DATA_PATH + product))) parLapply(CORE_NUM, file_list, build_composite_signal,signal_list=factor_1, product=product, HEAD_PATH=SAVE_PATH) ; SAVE_PATH to_choose = np.where(np.mod(range(n_days),10)==0) signal_name = 'total.trade.imb.4096' for product in product_list: all_signal = np.array([]) for file in np.array(all_dates)[to_choose]: data = load(HEAD_PATH + '/energy pkl tick/' + product + '/' + file) S = load(SAVE_PATH + '/tmp pkl/' + product + '/' + signal_name + '/' + file) S = S[data['good']] all_signal = np.concatenate((all_signal, S), axis=0) sample_signal = all_signal[np.arange(1, len(all_signal) + 1) % period == 0] print('ADF Stationary Test') print('-------------------') adf_res = ts.adfuller(sample_signal, maxlag=int(pow(len(sample_signal)-1,(1/3))), regression='ct', autolag=None) print(product) if adf_res[1] < 0.05: print("STAT: ", adf_res[0], "STATIONARY") else: print("STAT: ", adf_res[0], "NON-STATIONARY") print('+++++++++++++++++++') kpss_res = ts.kpss(sample_signal, regression='c', lags=int(3*math.sqrt(len(sample_signal))/13)) print(product) if kpss_res[1] > 0.05: print("STAT: ", adf_res[0], "STATIONARY") else: print("STAT: ", adf_res[0], "NON-STATIONARY") # + class factor_trade_imb_period(factor_template): factor_name = 'trade.imb.period' params = OrderedDict([ ("period", np.array([1024, 2048, 4096])) ]) def formula(self, data, period): return ewma(zero_divide(data['buy.trade'] - data['sell.trade'], data['buy.trade'] + data['sell.trade']), period, adjust=True).values factor_2 = factor_trade_imb_period() for product in product_list: create_signal_path(factor_2, product, SAVE_PATH); # - DATA_PATH # %%time for product in product_list: file_list = list(map(lambda x: DATA_PATH + product + '/' + x, os.listdir(DATA_PATH + product))) parLapply(CORE_NUM, file_list, build_composite_signal, signal_list = factor_2, product=product, HEAD_PATH=SAVE_PATH); to_choose = np.where(np.mod(range(n_days),10)==0) signal_name = 'trade.imb.4096' for product in product_list: all_signal = np.array([]) for file in np.array(all_dates)[to_choose]: data = load(HEAD_PATH + '/energy pkl tick/' + product + '/' + file) S = load(SAVE_PATH + '/tmp pkl/' + product + '/' + signal_name + '/' + file) S = S[data['good']] all_signal = np.concatenate((all_signal, S), axis=0) sample_signal = all_signal[np.arange(1, len(all_signal) + 1) % period == 0] print('ADF Stationary Test') print('-------------------') adf_res = ts.adfuller(sample_signal, maxlag=int(pow(len(sample_signal)-1,(1/3))), regression='ct', autolag=None) print(product) if adf_res[1] < 0.05: print("STAT: ", adf_res[0], "STATIONARY") else: print("STAT: ", adf_res[0], "NON-STATIONARY") print('+++++++++++++++++++') kpss_res = ts.kpss(sample_signal, regression='c', lags=int(3*math.sqrt(len(sample_signal))/13)) print(product) if kpss_res[1] > 0.05: print("STAT: ", adf_res[0], "STATIONARY") else: print("STAT: ", adf_res[0], "NON-STATIONARY") # + def get_atr(file, product, period_list, HEAD_PATH): data = load(file) for period in period_list: S = (data['max.' + str(period)] - data['min.' + str(period)])/data['wpr'] save(S, HEAD_PATH + '/tmp pkl/' + product + '/atr.' + str(period) + '/' + file[-12:]) for product in product_list: for period in [1024, 2048, 4096]: os.makedirs(SAVE_PATH + '/tmp pkl/' + product + '/atr.' + str(period), exist_ok=True) # - # %%time for product in product_list: file_list = list(map(lambda x: DATA_PATH + product + '/' + x, os.listdir(DATA_PATH + product))) parLapply(CORE_NUM, file_list, get_atr, product=product, period_list=np.array([1024,2048,4096]), HEAD_PATH=SAVE_PATH); # + to_choose = (np.arange(n_days)+1) % 10 == 0 signal_name = 'total.trade.imb.4096' all_product_signal = dict([]) for product in product_list: all_signal = np.array([]) for file in np.array(all_dates)[to_choose]: data = load(HEAD_PATH + '/energy pkl tick/' + product + '/' + file) S = load(SAVE_PATH + '/tmp pkl/' + product + '/' + signal_name + '/' + file) S = S[data['good']] all_signal = np.concatenate((all_signal, S), axis=0) all_product_signal[product] = all_signal plt.figure(1, figsize=(16,10)) plt.subplot(2,1,1) # plot the 'total.trade.imb.4096' signal for product in product_list: plt.plot(all_product_signal[product], label = product) plt.title(signal_name) plt.legend() to_choose = (np.arange(n_days)+1) % 10 == 0 signal_name = 'trade.imb.4096' all_product_signal = dict([]) for product in product_list: all_signal = np.array([]) for file in np.array(all_dates)[to_choose]: data = load(HEAD_PATH + '/energy pkl tick/' + product + '/' + file) S = load(SAVE_PATH + '/tmp pkl/' + product + '/' + signal_name + '/' + file) S = S[data['good']] all_signal = np.concatenate((all_signal, S), axis=0) all_product_signal[product] = all_signal plt.subplot(2,1,2) # plot the 'trade.imb.4096' signal for product in product_list: plt.plot(all_product_signal[product], label = product) plt.title(signal_name) plt.legend() # + def sharpe(x): return zero_divide(np.mean(x)* np.sqrt(250), np.std(x, ddof=1)) def drawdown(x): y = np.cumsum(x) return np.max(y)-np.max(y[-1:]) def max_drawdown(x): y = np.cumsum(x) return np.max(np.maximum.accumulate(y)-y) def get_hft_summary(result, thre_mat, n): all_result = pd.DataFrame(data={"daily.result" : result}) daily_num = all_result['daily.result'].apply(lambda x: x['num']) daily_pnl = all_result['daily.result'].apply(lambda x: x['pnl']) daily_ret = all_result['daily.result'].apply(lambda x: x['ret']) total_num = daily_num.sum() if len(total_num) != len(thre_mat): raise selfException('Mismatch!') total_pnl = daily_pnl.sum() total_ret = daily_ret.sum() avg_pnl = zero_divide(total_pnl, total_num) avg_ret = zero_divide(total_ret, total_num) total_sharpe = sharpe(daily_pnl) total_drawdown = drawdown(daily_pnl) total_max_drawdown = max_drawdown(daily_pnl) sharpe_ret = sharpe(daily_ret) drawdown_ret = drawdown(daily_ret) max_drawdown_ret = max_drawdown(daily_ret) final_result = pd.DataFrame(data=OrderedDict([("open", thre_mat["open"]), ("close", thre_mat["close"]), ("num", total_num), ("avg.pnl", avg_pnl), ("total.pnl", total_pnl), ("sharpe", total_sharpe), ("drawdown", total_drawdown), ("max.drawdown", total_max_drawdown), ("avg.ret", avg_ret), ("total.ret",total_ret), ("sharpe.ret", sharpe_ret), ("drawdown.ret", drawdown_ret), ("max.drawdown.ret", max_drawdown_ret), ("mar", total_pnl/total_max_drawdown), ("mar.ret", total_ret/max_drawdown_ret)]), index=thre_mat.index) return OrderedDict([("final.result", final_result), ("daily.num", daily_num), ("daily.pnl", daily_pnl), ("daily.ret", daily_ret)]) def get_signal_pnl(file, product, signal_name, thre_mat, reverse=1, tranct=1.1e-4, max_spread=0.61, tranct_ratio=True, HEAD_PATH = '/Users/sean/Desktop/Plan B/Quant/week1', SAVE_PATH="/factor", atr_filter=0): # load data data = load(HEAD_PATH + '/energy pkl tick/' + product + '/' + file) S = load(HEAD_PATH + SAVE_PATH + '/tmp pkl/' + product + '/' + signal_name + '/' + file) pred = S * reverse pred = pred[data['good']] atr = load(HEAD_PATH + SAVE_PATH + '/tmp pkl/' + product + '/' + 'atr.4096' + '/' + file) atr = atr[data['good']].reset_index(drop=True) data = data[data['good']].reset_index(drop=True) # load signal ## we don't know the signal is positive correlated or negative correlated result = pd.DataFrame(data=OrderedDict([ ("open", thre_mat['open'].values), ("close", thre_mat['close'].values), ("num", 0), ("avg.pnl", 0), ("pnl", 0), ("avg.ret", 0), ("ret", 0) ]), index=thre_mat.index) count = 0; cur_spread = data["ask"]-data["bid"] for thre in thre_mat.iterrows(): count = count+1 buy = pred>thre[1]["open"] sell = pred<-thre[1]["open"] signal = pd.Series(data=0, index=data.index) position = signal.copy() signal[buy] = 1 signal[sell] = -1 signal[atr<atr_filter]=0 scratch = -thre[1]["close"] position_pos = pd.Series(data=np.nan, index=data.index) position_pos.iloc[0] = 0 position_pos[(signal==1) & (data["next.ask"]>0) & (data["next.bid"]>0) & (cur_spread<max_spread)] = 1 position_pos[(pred< -scratch) & (data["next.bid"]>0) & (cur_spread<max_spread)] = 0 position_pos.ffill(inplace=True) pre_pos = position_pos.shift(1) notional_position_pos = pd.Series(data=0, index=data.index) notional_position_pos[position_pos==1] = 1 notional_position_pos[(position_pos==1) & (pre_pos==1)] = np.nan notional_position_pos[(notional_position_pos==1)] = 1/data["next.ask"][(notional_position_pos==1)] notional_position_pos.ffill(inplace=True) position_neg = pd.Series(data=np.nan, index=data.index) position_neg.iloc[0] = 0 position_neg[(signal==-1) & (data["next.ask"]>0) & (data["next.bid"]>0) & (cur_spread<max_spread)] = -1 position_neg[(pred> scratch) & (data["next.ask"]>0) & (cur_spread<max_spread)] = 0 position_neg.ffill(inplace=True) pre_neg = position_neg.shift(1) notional_position_neg = pd.Series(data=0, index=data.index) notional_position_neg[position_neg==-1] = -1 notional_position_neg[(position_neg==-1) & (pre_neg==-1)] = np.nan notional_position_neg[(notional_position_neg==-1)] = -1/data["next.bid"][(notional_position_neg==-1)] notional_position_neg.ffill(inplace=True) position = position_pos + position_neg notional_position = notional_position_pos+notional_position_neg position.iloc[0] = 0 position.iloc[-2:] = 0 notional_position.iloc[0] = 0 notional_position.iloc[-2:] = 0 change_pos = position - position.shift(1) notional_change_pos = notional_position-notional_position.shift(1) change_pos.iloc[0] = 0 notional_change_pos.iloc[0] = 0 change_base = pd.Series(data=0, index=data.index) change_buy = change_pos>0 change_sell = change_pos<0 if (tranct_ratio): change_base[change_buy] = data["next.ask"][change_buy]*(1+tranct) change_base[change_sell] = data["next.bid"][change_sell]*(1-tranct) else: change_base[change_buy] = data["next.ask"][change_buy]+tranct change_base[change_sell] = data["next.bid"][change_sell]-tranct final_pnl = -sum(change_base*change_pos) ret = -sum(change_base*notional_change_pos) num = sum((position!=0) & (change_pos!=0)) if num == 0: result.loc[thre[0], ("num", "avg.pnl", "pnl", "avg.ret", "ret")] = (0,0,0,0,0) return result else: avg_pnl = np.divide(final_pnl, num) avg_ret = np.divide(ret,num) result.loc[thre[0], ("num", "avg.pnl", "pnl", "avg.ret", "ret")] = (num, avg_pnl, final_pnl, avg_ret,ret) return result # + # %%time signal_name = 'total.trade.imb.4096' all_trade_stat = dict([]) open_list = np.arange(0.1, 0.4, 0.02) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([ ("open", thre_list[:, 0]), ("close", thre_list[:, 1]) ])) for product in product_list: print(product) spread = product_info[product]['spread'] tranct = product_info[product]['tranct'] tranct_ratio = product_info[product]['tranct.ratio'] with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in all_dates])[0] trade_stat = get_hft_summary(result, thre_mat, n_days) all_trade_stat[product] = trade_stat save(all_trade_stat, HEAD_PATH+"/"+"energy_trade_stat_total_trade_imb_4096.pkl") # - # %%time signal_name = "trade.imb.4096" all_trade_stat = dict([]) from collections import OrderedDict import itertools open_list = np.arange(0.06, 0.2, 0.02) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([("open", thre_list[:, 0]), ("close", thre_list[:, 1])])) for product in product_list: print(product) spread = product_info[product]["spread"] tranct = product_info[product]["tranct"] tranct_ratio = product_info[product]["tranct.ratio"] with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in all_dates])[0] trade_stat = get_hft_summary(result, thre_mat, n_days) all_trade_stat[product] = trade_stat save(all_trade_stat, HEAD_PATH+"/"+"energy_trade_stat_trade_imb_4096.pkl") # + date_str = [n[0:8] for n in all_dates] format_dates = np.array([pd.to_datetime(d) for d in date_str]) all_trade_stat = load(HEAD_PATH + '/' + 'energy_trade_stat_trade_imb_4096.pkl') all_pnl = np.zeros([n_days, len(product_list)]) i = 0 for product in product_list: spread = product_info[product]['spread'] trade_stat = all_trade_stat[product] good_strat = trade_stat['final.result']['avg.pnl']>1*spread if sum(good_strat) == 0: continue; good_pnl = trade_stat['daily.ret'].loc[:, good_strat].sum(axis=1)/sum(good_strat) all_pnl[:, i] = good_pnl i += 1 plt.figure(1, figsize=(16,10)) plt.subplot(3,2,i) plt.title("") plt.xlabel("") plt.ylabel("pnl") plt.title(product) plt.plot(format_dates, good_pnl.cumsum()); # - all_portfolio = np.array(np.mean(all_pnl[:,:i], axis=1)) plt.figure(1, figsize=(16, 10)); plt.title(""); plt.xlabel("date"); plt.ylabel("pnl"); plt.title("portfolio"); plt.plot(format_dates, all_portfolio.cumsum()); all_trade_stat = load(HEAD_PATH+"/"+"energy_trade_stat_total_trade_imb_4096.pkl") all_pnl = np.zeros([n_days, len(product_list)]) i = 0 for product in product_list: spread = product_info[product]["spread"] trade_stat = all_trade_stat[product] good_strat = trade_stat["final.result"]["avg.pnl"]>1*spread if sum(good_strat)==0: continue; good_pnl = trade_stat["daily.ret"].loc[:, good_strat].sum(axis=1)/sum(good_strat) all_pnl[:,i] = good_pnl i = i+1 plt.figure(1, figsize=(16,10)) plt.subplot(3,2,i) plt.title(""); plt.xlabel(""); plt.ylabel("pnl"); plt.title(product); plt.plot(format_dates, good_pnl.cumsum()); all_portfolio = np.array(np.mean(all_pnl[:,:i], axis=1)) plt.figure(1, figsize=(16, 10)); plt.title(""); plt.xlabel("date"); plt.ylabel("pnl"); plt.title("portfolio"); plt.plot(format_dates, all_portfolio.cumsum()); # #### see in-sample and out-of-sample performance # + # see in-sample and out-of-sample performance train_sample = np.array(all_dates) < "201807" test_sample = np.array(all_dates) > "201807" print(sum(train_sample)) print(sum(test_sample)) # + # %%time signal_name = "trade.imb.4096" train_trade_stat = dict([]) open_list = np.arange(0.06, 0.2, 0.002) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([("open", thre_list[:, 0]), ("close", thre_list[:, 1])])) for product in product_list: spread = product_info[product]["spread"] print(product, spread) tranct = product_info[product]["tranct"] print(product, tranct) tranct_ratio = product_info[product]["tranct.ratio"] print(product, tranct_ratio) with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in np.array(all_dates)[train_sample]])[0] trade_stat = get_hft_summary(result, thre_mat, sum(train_sample)) train_trade_stat[product] = trade_stat save(train_trade_stat, HEAD_PATH+"/energy_train_trade_stat.pkl") # - i = 0 train_trade_stat = load(HEAD_PATH + '/energy_train_trade_stat.pkl') for product in product_list: spread = product_info[product]['spread'] trade_stat = train_trade_stat[product] good_strat = (trade_stat["final.result"]["avg.pnl"]>1*spread) & (trade_stat["final.result"]["num"]>0) if sum(good_strat)==0: continue good_pnl = trade_stat["daily.ret"].loc[:, good_strat].sum(axis=1)/sum(good_strat) i = i+1 plt.figure(1, figsize=(16,10)) plt.subplot(3,2,i) plt.title(""); plt.xlabel(""); plt.ylabel("pnl"); plt.title(product); plt.plot(format_dates[train_sample], good_pnl.cumsum()); # + # %%time signal_name = "trade.imb.4096" test_trade_stat = dict([]) open_list = np.arange(0.06, 0.2, 0.002) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([("open", thre_list[:, 0]), ("close", thre_list[:, 1])])) for product in product_list: spread = product_info[product]["spread"] tranct = product_info[product]["tranct"] tranct_ratio = product_info[product]["tranct.ratio"] with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in np.array(all_dates)[test_sample]])[0] trade_stat = get_hft_summary(result, thre_mat, sum(test_sample)) test_trade_stat[product] = trade_stat save(test_trade_stat, HEAD_PATH+"/energy_test_trade_stat.pkl") # + i = 0 train_trade_stat = load(HEAD_PATH+"/energy_train_trade_stat.pkl") test_trade_stat = load(HEAD_PATH+"/energy_test_trade_stat.pkl") test_all_pnl = np.zeros([sum(test_sample), len(product_list)]) train_all_pnl = np.zeros([sum(train_sample), len(product_list)]) for product in product_list: spread = product_info[product]['spread'] trade_stat = train_trade_stat[product] good_strat = (trade_stat['final.result']['avg.pnl']>1*spread) & (trade_stat['final.result']['num']>0) if sum(good_strat)==0: continue train_pnl = trade_stat['daily.ret'].loc[:, good_strat].sum(axis=1)/sum(good_strat) trade_stat = test_trade_stat[product] test_pnl = trade_stat['daily.ret'].loc[:, good_strat].sum(axis=1)/sum(good_strat) print(product, "train sharpe: ", sharpe(train_pnl)) print(product, "test sharpe: ", sharpe(test_pnl)) print(sum(good_strat)) test_all_pnl[:, i] = test_pnl train_all_pnl[:, i] = train_pnl i += 1 plt.figure(1, figsize=(16, 10)); plt.subplot(3,2,i); plt.title(""); plt.xlabel(""); plt.ylabel("pnl"); plt.title(product); plt.plot(format_dates[test_sample], test_pnl.cumsum()); # - train_portfolio = np.array(np.mean(train_all_pnl[:,:i], axis=1)) test_portfolio = np.array(np.mean(test_all_pnl[:,:i], axis=1)) all_portfolio = np.append(train_portfolio, test_portfolio) all_portfolio plt.figure(1, figsize=(16, 10)); plt.title(""); plt.xlabel("date"); plt.ylabel("pnl"); plt.title("portfolio"); plt.plot(format_dates, all_portfolio.cumsum()); plt.plot(format_dates[test_sample], all_portfolio.cumsum()[test_sample]); print("Portfolio Train Sharpe: ", sharpe(train_portfolio)) print("Portfolio Test Sharpe: ", sharpe(test_portfolio)) # + # %%time signal_name = "total.trade.imb.4096" train_trade_stat = dict([]) open_list = np.arange(0.1, 0.4, 0.02) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([("open", thre_list[:, 0]), ("close", thre_list[:, 1])])) for product in product_list: spread = product_info[product]["spread"] tranct = product_info[product]["tranct"] tranct_ratio = product_info[product]["tranct.ratio"] with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in np.array(all_dates)[train_sample]])[0] trade_stat = get_hft_summary(result, thre_mat, sum(train_sample)) train_trade_stat[product] = trade_stat save(train_trade_stat, HEAD_PATH+"/energy_total_train_trade_stat.pkl") # + # %%time signal_name = "total.trade.imb.4096" test_trade_stat = dict([]) open_list = np.arange(0.1, 0.4, 0.02) thre_list = [] for cartesian in itertools.product(open_list, np.array([0.2, 0.4, 0.6, 0.8, 1.0])): thre_list.append((cartesian[0], -cartesian[0] * cartesian[1])) thre_list = np.array(thre_list) thre_mat = pd.DataFrame(data=OrderedDict([("open", thre_list[:, 0]), ("close", thre_list[:, 1])])) for product in product_list: spread = product_info[product]["spread"] tranct = product_info[product]["tranct"] tranct_ratio = product_info[product]["tranct.ratio"] with dask.config.set(scheduler='processes', num_workers=CORE_NUM): f_par = functools.partial(get_signal_pnl, product=product, signal_name=signal_name, thre_mat=thre_mat, reverse=1, tranct=tranct, max_spread=spread*1.1, tranct_ratio=tranct_ratio, atr_filter=0.01) result = compute([delayed(f_par)(file) for file in np.array(all_dates)[test_sample]])[0] trade_stat = get_hft_summary(result, thre_mat, sum(train_sample)) test_trade_stat[product] = trade_stat save(test_trade_stat, HEAD_PATH+"/energy_total_test_trade_stat.pkl") # + i = 0 train_trade_stat = load(HEAD_PATH+"/energy_total_train_trade_stat.pkl") test_trade_stat = load(HEAD_PATH+"/energy_total_test_trade_stat.pkl") test_all_pnl = np.zeros([sum(test_sample), len(product_list)]) train_all_pnl = np.zeros([sum(train_sample), len(product_list)]) for product in product_list: spread = product_info[product]['spread'] trade_stat = train_trade_stat[product] good_strat = (trade_stat['final.result']['avg.pnl']>1*spread) & (trade_stat['final.result']['num']>0) if sum(good_strat)==0: continue train_pnl = trade_stat['daily.ret'].loc[:, good_strat].sum(axis=1)/sum(good_strat) trade_stat = test_trade_stat[product] test_pnl = trade_stat['daily.ret'].loc[:, good_strat].sum(axis=1)/sum(good_strat) print(product, "train sharpe: ", sharpe(train_pnl)) print(product, "test sharpe: ", sharpe(test_pnl)) print(sum(good_strat)) test_all_pnl[:, i] = test_pnl train_all_pnl[:, i] = train_pnl i += 1 plt.figure(1, figsize=(16, 10)); plt.subplot(3,2,i); plt.title(""); plt.xlabel(""); plt.ylabel("pnl"); plt.title(product); plt.plot(format_dates[test_sample], test_pnl.cumsum()); # + # See PnL for the portfolio train_portfolio = np.array(np.mean(train_all_pnl[:,:i], axis=1)) test_portfolio = np.array(np.mean(test_all_pnl[:,:i], axis=1)) all_portfolio = np.append(train_portfolio, test_portfolio) all_portfolio plt.figure(1, figsize=(16, 10)); plt.title(""); plt.xlabel("date"); plt.ylabel("pnl"); plt.title("portfolio"); plt.plot(format_dates, all_portfolio.cumsum()); plt.plot(format_dates[test_sample], all_portfolio.cumsum()[test_sample]); print("Portfolio Train Sharpe: ", sharpe(train_portfolio)); print("Portfolio Test Sharpe: ", sharpe(test_portfolio)); # -
Strategy&Research/2.Single Factor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 64-bit (''khaykingleb'': conda)' # language: python # name: python3 # --- # + [markdown] id="Np9ZUcpy8ivW" # # Keyword Spotting # + [markdown] id="QKvRvy3pEIcr" # Download the repository and needed packages: # + id="VK4nvG4h9BPY" # !git clone https://github.com/khaykingleb/KWS.git # + id="8m7GY2iV9He3" # !pip install -r KWS/requirements.txt # + id="SW4-t3aJ8ivb" # %matplotlib inline # %config InlineBackend.figure_format = "svg" import matplotlib.pyplot as plt import tqdm from pathlib import Path from IPython import display import pandas as pd import torch import torchaudio import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader import sys sys.path.append('KWS/') from configs import * from main import main from kws.augmentations import LogMelSpec # + [markdown] id="aVojQ8Lv8ive" # ## Downloading data # + id="R4mcpA6bCg-X" language="bash" # mkdir ./KWS/data # # wget http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz \ # -O ./KWS/data/speech_commands_v0.01.tar.gz # # mkdir ./KWS/data/speech_commands && tar -C ./KWS/data/speech_commands \ # -xvzf ./KWS/data/speech_commands_v0.01.tar.gz 1> log # # rm ./KWS/data/speech_commands_v0.01.tar.gz # + id="-d5eUrpHsaSn" language="bash" # mkdir ./KWS/saved # + [markdown] id="LW7bfz178ivj" # ## Streaming # + [markdown] id="S_v6ML9eMAb7" # For this part, the ["Streaming keyword spotting on mobile devices"](https://arxiv.org/pdf/2005.06720v2.pdf) paper is used. # # "Streaming mode means that the model receives portion of the input sequence and classifies it incrementally. In a KWS application we do not know when the keyword starts or ends, so we need to process every audio packet and return the classification results in real time every 20ms (for example) — it is called streaming inference." # + [markdown] id="6Nqg6hk7WNub" # But, at first, let's train our base model to use it as a benchmark for comparision. # + id="xIWtS-JnaqfJ" config = Config() # + id="yeNlInj5auyY" colab={"base_uri": "https://localhost:8080/", "height": 498} outputId="b2eb6a90-517f-48d1-ddbe-b152993b36b1" config.model_name = "base_2x64" base_2x64_result = main(config) # + [markdown] id="xsSRQuOHWWvA" # Now we can try streaming: # + id="DHPYZhJpxDMJ" colab={"base_uri": "https://localhost:8080/"} outputId="363a2e3a-1c27-44c3-9689-973b8704b916" from kws.models import CRNNStreaming streaming_model = CRNNStreaming(config=config).to(config.device) streaming_model.load_state_dict(torch.load("KWS/saved/base_2x64_best.pth")["state_dict"]) # + id="GwdFBa_yET-M" def visualize_audio(wav: torch.Tensor, sr: int = 22050): if wav.dim() == 2: wav = wav.mean(dim=0) plt.figure(figsize=(14, 6)) plt.plot(wav, alpha=0.7, c="green") plt.xlabel("Time", size=16) plt.ylabel("Amplitude", size=16) plt.show() display.display(display.Audio(wav, rate=sr)) # + id="jfExuQAhyQxj" CURRENT_DIR = Path().absolute().resolve() words = [ "on", "sheila", "off" ] audio_overall = torch.Tensor([]) for word in words: try: audio_path = CURRENT_DIR / f"KWS/data/speech_commands/{word}/" \ "004ae714_nohash_0.wav" audio, sr = torchaudio.load(audio_path) except RuntimeError: continue audio_overall = torch.cat((audio_overall, audio), dim=-1) # + id="ntRLZXXMyuAF" colab={"base_uri": "https://localhost:8080/", "height": 601} outputId="5147d684-712f-4eac-d8cf-15126de25710" visualize_audio(audio_overall, sr) # + id="4_agEDL06EDo" def plot_audio_inference(streaming_model, audio, config): streaming_model.eval() melspec = LogMelSpec(is_train=False, config=config) spectrogram = melspec(audio.to(config.device)) if len(spectrogram.shape) == 2: spectrogram.unsqueeze(0) hidden = None history = [] streaming_model.set_buffer(spectrogram) for T in range(config.max_window_length, spectrogram.shape[2] + 1, config.streaming_step_size): window = spectrogram[:, :, T - config.max_window_length:T] if hidden is not None: hidden.to(config.device) probs, hidden = streaming_model.inference(window.to(config.device), hidden) history.append(probs[0][1]) plt.plot(history) plt.title("Probability of a keyword", loc="left") plt.xlabel("Frame") plt.ylabel("Probability") # + id="0JyfvU2f8XFQ" colab={"base_uri": "https://localhost:8080/", "height": 396} outputId="ebaabc4c-6d4b-4cee-ed03-ab5896bf9e51" plot_audio_inference(streaming_model, audio_overall, config) # + [markdown] id="lCqhSdH8a9vu" # As we see, everything works fine, as we expected: the probability of a keyword increases on the timeframes where some part of the `sheila` word is presented in the corresponding spectrogram. # + [markdown] id="Ou5cj_8XO7vQ" # ## Speeding up and Compression # + [markdown] id="tGfBjhgvPOt_" # ### Distillation # + [markdown] id="6xiewOnJVUiv" # For this part, this [blog](https://medium.com/neuralmachine/knowledge-distillation-dc241d7c2322) was used. Here, I've tried 50 and 100 epochs to achieve the threshold using the small model ($1x20$, that is a model with a GRU layer and hidden of size 20). # + id="EIOcy62u9imB" small_config = SmallConfig() # + id="_SsrwFq93vAC" config.num_epochs = 100 config.model_name = "distilled_1x20" small_config.path_to_load = "KWS/saved/base_2x64_best.pth" # + colab={"base_uri": "https://localhost:8080/", "height": 498} id="KdeHxFlzXZiu" outputId="ec8c9cc7-7fbd-4102-f03e-db411606814b" distilled_1x20_result = main(config, small_config) # + [markdown] id="6lLQrxXmFJll" # Also, I have tried to use different temperature and alpha in the weighted loss $\mathcal{L} = \alpha\mathcal{L}_{distillation} + (1 - \alpha)\mathcal{L}_{student}$ for smaller models but it would require a lot of epochs—around 200 or 300—to reach the threshold quality. # + id="0x59rRIZFone" small_config_new = SmallConfig() # + id="l7mNje4VF1vA" config.model_name = "distilled_1x8" config.num_epochs = 150 small_config_new.temperature = 100 small_config_new.hidden_size = 16 small_config_new.path_to_load = "KWS/saved/base_2x64_best.pth" # + id="B7O6LAZEGF-5" distilled_1x8_result = main(config, small_config_new) # + [markdown] id="RvVDlruxPbhi" # ### Quantization # + [markdown] id="JRgbuBh6q7pb" # It is used in post-training. So, we will compare—in the `results` section—all pretrained models on the validation set. # + id="E_H5Flg9rSrL" from kws.trainer import validation from kws.metrics import get_auc_FA_FR from kws.datasets import SpeechCommandDataset from kws.collate_fn import Collator from kws.utils import * # + id="AmJ3YOF-rQ0q" seed_everything(seed=config.seed) dataset = SpeechCommandDataset(path_to_dir=config.path_to_data, keywords=config.keyword) indexes = torch.randperm(len(dataset)) val_indexes = indexes[int(len(dataset) * config.train_ratio):] df_val = dataset.csv.iloc[val_indexes].reset_index(drop=True) val_set = SpeechCommandDataset(csv=df_val) val_loader = DataLoader(val_set, batch_size=config.batch_size, shuffle=False, collate_fn=Collator(), num_workers=config.num_workers, pin_memory=True) # + id="ILo5aE1Krf9v" def evaluate_on_validation_set(model, val_loader, config, log_melspec=LogMelSpec(is_train=False, config=config)): model.eval() all_probs, all_labels = [], [] with Timer(verbose=config.verbose) as timer: for _, (batch, labels) in enumerate(val_loader): batch = log_melspec(batch) output = model(batch) probs = F.softmax(output, dim=-1) all_probs.append(probs[:, 1]) all_labels.append(labels) time = timer.get_time() auc_fa_fr = get_auc_FA_FR(torch.cat(all_probs, dim=0), all_labels) macs, num_params = profile(model, torch.zeros(1, 1, 40, 50), verbose=False) size = get_size_in_megabytes(model) result_val = { "model": config.model_name, "macs": macs, "num_params": num_params, "size": size, "time": time, "quality": auc_fa_fr } return result_val # + [markdown] id="Dm-CFW_QvJLo" # 1. `base_2x64_32fp` # + id="echK93bItLcV" colab={"base_uri": "https://localhost:8080/"} outputId="c374565e-dc4e-4c83-95cb-d2f603aadc17" base_2x64_32fp_model = CRNNStreaming(config=config).to(config.device) base_2x64_32fp_model.load_state_dict(torch.load("KWS/saved/base_2x64_best.pth")["state_dict"]) config.model_name = "base_2x64_32fp" base_2x64_fp32_result_val = evaluate_on_validation_set(base_2x64_32fp_model, val_loader, config) # + [markdown] id="h-h3AFilvMHm" # 2. `base_2x64_int8` # + id="p9YSgL95vHC9" colab={"base_uri": "https://localhost:8080/"} outputId="b9008126-cd48-4c85-f62f-5a975f7a2608" base_2x64_int8_model = CRNNStreaming(config=config) base_2x64_int8_model.load_state_dict(torch.load("KWS/saved/base_2x64_best.pth")["state_dict"]) base_2x64_int8_model = torch.quantization.quantize_dynamic( base_2x64_int8_model, {nn.GRU, nn.Linear}, dtype=torch.qint8 ) config.model_name = "base_2x64_int8" base_2x64_int8_result_val = evaluate_on_validation_set(base_2x64_int8_model, val_loader, config) # + [markdown] id="7o0GgDB_wHQF" # 3. `distilled_1x20_fp32` # + id="vFP3Z2zhwKtV" colab={"base_uri": "https://localhost:8080/"} outputId="46cd2d1e-39c9-4c91-b374-d65c699988bf" distilled_1x20_fp32_model = CRNNStreaming(config=small_config) distilled_1x20_fp32_model.load_state_dict(torch.load("KWS/saved/distilled_1x20_best.pth")["state_dict"]) config.model_name = "distilled_1x20_fp32" distilled_1x20_fp32_result_val = evaluate_on_validation_set(distilled_1x20_fp32_model, val_loader, config) # + [markdown] id="nqc2nVFoxksk" # 4. `distilled_1x20_int8` # + id="5SHOW3MExkL9" colab={"base_uri": "https://localhost:8080/"} outputId="7327a716-5244-4f24-8356-7e91f154cbc1" distilled_1x20_int8_model = CRNNStreaming(config=small_config) distilled_1x20_int8_model.load_state_dict(torch.load("KWS/saved/distilled_1x20_best.pth")["state_dict"]) distilled_1x20_int8_model = torch.quantization.quantize_dynamic( distilled_1x20_int8_model, {nn.GRU, nn.Linear}, dtype=torch.qint8 ) config.model_name = "distilled_1x20_int8" distilled_1x20_int8_result_val = evaluate_on_validation_set(distilled_1x20_int8_model, val_loader, config) # + [markdown] id="fv5AnQHwYe1W" # # Results # + id="5H5q_QTc4HBo" results = [ base_2x64_fp32_result_val, base_2x64_int8_result_val, distilled_1x20_fp32_result_val, distilled_1x20_int8_result_val ] # + id="3kW9R0yI5urr" results = pd.DataFrame(results) results["compression_rate"] = \ results[results["model"] == "base_2x64_32fp"]["size"].values / results["size"] results["speedup_rate"] = \ results[results["model"] == "base_2x64_32fp"]["macs"].values / results["macs"] # + colab={"base_uri": "https://localhost:8080/", "height": 173} id="1Xg_ywub-Jr1" outputId="02821713-32ae-4c9b-ba4e-c0099986634a" results # + id="MK92yf7U9M_C" def plot_comparision(column): fig, ax = plt.subplots(figsize=(10, 6)) plt.title(f"AUC FA-FR vs. {column.capitalize()}") plt.ylabel("AUC FA-FR") plt.xlabel(column.capitalize()) results_base = results[results["model"].str.startswith("base")] plt.plot(results_base[column], results_base["quality"]) plt.scatter(results_base[column], results_base["quality"]) results_distill = results[results["model"].str.startswith("distilled")] plt.plot(results_distill[column], results_distill["quality"]) plt.scatter(results_distill[column], results_distill["quality"]) for _, result in results.iterrows(): ax.annotate(result["model"], xy=(result[column], result["quality"]), xytext=(result[column], result["quality"] + 1.5e-7)) # + colab={"base_uri": "https://localhost:8080/", "height": 541} id="f00iRv0WBSFy" outputId="4d5acfa3-6f90-4597-fcb5-7b56f28da78b" plot_comparision("size") plt.xlim(0, 0.33); # + colab={"base_uri": "https://localhost:8080/", "height": 541} id="hh1wuNXgCEYL" outputId="ee1015c2-ecac-49a3-b5a0-4eb41f7b6d82" plot_comparision("macs") plt.xlim(0, 410000); # + [markdown] id="54oSQWqN7a5E" # As we can see the `distilled_1x20_int8` model, which is a small student model with quantization, is the best one to accelerate our base model `base_2x64_32fp`. Obviously, we can see a little drop in the quality of such a model. # + [markdown] id="A-ib36IcCVr6" # Expected grade is: # + colab={"base_uri": "https://localhost:8080/"} id="0GJtU6RaCqRM" outputId="ed2e877c-8ee4-4096-cede-760a95c75c9a" best_model = results[results["model"] == "distilled_1x20_int8"] round(2 + min(4, 4 * (best_model["speedup_rate"].values / 10)[0]) + \ min(4, 4 * (best_model["speedup_rate"].values / 10)[0]), 2) # + [markdown] id="ql257ExwD3_e" # What worked and what didn't work? # + [markdown] id="OvvsjFTJIbOs" # I guess, everything worked as expected: # # * Streaming showed a high probability of a keyword when the component of the word `sheila` is presented on the corresponding spectrogram. # # * Distillation decreased the number of parameters of our model. Though, there is a limit to it: I couldn't train `distilled_1x8` model for the desired quality threshold. # # * Quantization made our model a little bit faster. # + [markdown] id="MyuS37a7EBqV" # What were the major challenges? # + [markdown] id="0S3jlIcCH3c_" # 1) Constantly changing base model and the notebook. # # 2) Few materials with examples on the topic. # # + id="f4YH0UqmdjPb"
notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load Packages # + import sklearn.datasets import sklearn.linear_model import sklearn.tree import sklearn.ensemble import sklearn.model_selection import sklearn.metrics import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # - # # Load Additional Packages (if you want to use other modules in Scikit Learn) # + # Load additional scikit learn packages! if you need from scipy.stats import randint from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV # - # # Load Data Points (Do not modify the following block) # + olivetti_faces = sklearn.datasets.fetch_olivetti_faces(random_state=0,) print(olivetti_faces['DESCR']) example_indices = [0, 10, 62, 70] for idx in example_indices: plt.title(olivetti_faces['target'][idx]) plt.imshow(olivetti_faces['images'][idx]) plt.gray() plt.show() X = olivetti_faces['data'] y = olivetti_faces['target'] X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.3, random_state=0) # - # # Classification with Scikit Learn Library (Programming Assignment) # ### Variable Explanation (Do not change variable names) # - 'olivetti_faces' is a variable containing a set of face images # - 'X_train' is feature vectors of training dataset # - 'y_train' is target labels of training dataset # - 'X_test' is feature vectors of test dataset # - 'y_test' is target labels of test dataset # - 'y_pred' was initialized as zero vectors and fill 'y_pred' with predicted labels # # ### Find the best model and hyperparameter for face recognition #TODO #1. Create a classification object in scikit learn package (such as perceptron, logistic regression, or other classification algorithms) face = sklearn.linear_model.LogisticRegression(C=2000,max_iter=100000000) #2. Fit the object to training dataset face.fit(X_train, y_train) #3. Predict the label of test data point (X_test) # - Do not change the variable name "y_pred" y_pred = face.predict(X_test) # ### Print accuracy (do not modify the following block) print('Accuracy: %.2f' % sklearn.metrics.accuracy_score(y_test, y_pred)) # + tol = [float(x/1000000) for x in range(1, 1000000, 1)] random_state = [int(x) for x in range(1, 50, 1)] random_grid = {'tol' : tol, 'random_state' : random_state,} face1 = sklearn.linear_model.LogisticRegression(max_iter=100000000, C=1000) rnd_search = RandomizedSearchCV(face1, param_distributions=random_grid, n_iter=50, cv=2, scoring='accuracy', verbose = 2, n_jobs=4, random_state=42) rnd_search.fit(X_train, y_train) # - rnd_search.best_params_
finalterm_project - randomizedsearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW2 # # # Before submitting your **HTpy-HW2.ipynb** file, perform the following # * `Kernel>Restart & Run All` # * Make sure that there are no errors # # The following includes problems that need coding and others that only need to perform simple operations (python as a calculator). from Libraries import thermodynamics as thermo import numpy as np import matplotlib.pyplot as plt import scipy.constants as csts # ## 1 Insulation # # This problem is an expansion of PB 3 in `HTpy-HW2.ipynb` # # Assuming that the outer surface temperature of the pipe is $150^\circ C$, find an insulation that works (i.e. an insulation that will not melt, for which you can find the price and thermal conductivity online). Derive the necessary equations that allow for determination of the different costs as a function of the thickness. Generate a plot for at least 3 different thicknesses. # # Hint: Derive the heat loss for a variable thickness of insulation, then apply this formula for a range of thicknesses. Compare savings of gas vs cost of insulation. # ### Solution # Assumptions: # * Heat transfer is steady and 1D # * Conservation of energy applied to insulation # * Radiation and Convection heat transfers are negligible # * Room temperature on other side of insulation is 25 degrees Celsius # Given: # * Insulation is Glass Mineral Wool # * k = 0.035 W/m*K # *Price for 2 in x 48 in x 24 in = $\$18.10$ (Grainger) # *Price of natural gas: \$0.02 # *Diameter of pipe: $R_{1} = 0.05 m$ # *Length of pipe: $L = 25 m$ # Conservation of Energy: The control volume is the insulation # $$ # E_{in} - E_{out} = 0 # $$ # # $$ # q_{in} - q_{out} = 0 # $$ # Resulting in the fact that $q_{in}$ is equal to $q_{out}$, which in this case, $q_{out} = q_{cond}$. # # Because the control volume is a cylinder, the heat transfer will only occur in the radial direction, but that varies based on the radius, which isn't constant. # # $$ # q_{cond} = \frac{2\pi kL (T_{s}- T_{1})}{ln \frac{R_{2}}{R_{1}}} # $$ # # The outer radius $R_{2}$ is the thickness of the insulation (t) plus the radius of the pipe, $R_{1}$, so the equation then becomes # $$ # q_{cond} = \frac{2\pi kL (T_{s}- T_{1})}{ln \frac{t}{R_{1}}} # $$ # The price of mineral wool was $\$18.10$ for a 2'' x 48'' x 24'' size roll. This results in a total volume of: # $$ # 2*48*24 = 2304 in^3 # $$ # Therefore the price per unit volume is # $$ # \frac{\$18.10}{2304} = \$0.008 /in^3 # $$ # In meters that is $\$479.40 /m^3$. # # The cost as a function of thickness, calculating the volume using the equation for a hollow sphere, would then be # # $$ # cost = 479.4 * \pi *L*((R_{1}+t)^2 - R_{1}^2) # $$ # + R_1 = 0.05 #Inner radius in meters L = 25 #Length of pipe in meters t = [0.06, 0.08, 0.1] #Thickness of insulation in meters cost = [0, 0, 0] gas_cost = [0, 0, 0] q = [0,0,0] k = 0.035 #W/m K T_s = 150 # Celcius T_1 = 25 #Celcius price = 0.02 # $/MJ i = 0 while (i<3): cost[i]= 479.4 * np.pi *L*((R_1+t[i])**2 - R_1**2) #Cost of insulation as a function of thickness. q[i] = (2*np.pi*k*L*(T_s - T_1))/(np.log(t[i]/R_1)) #Heat loss from pipe as a function of insulation thickness gas_cost[i] = (0.02/10**6)*q[i]*(365*60*60*24) #Cost of gas as a function of heat loss in a year i = i+1 print('The cost of insulation for insulation thickness 0.06 m, 0.08 m, and 0.1 m thickness respectively') print(cost) print('The cost of gas for a year with insulation thickness 0.06 m, 0.08 m, and 0.1 m thickness respectively') print(gas_cost) # - plt.figure(figsize=(6,4), dpi=100) plt.plot(t,gas_cost, lw=2, label='Gas Cost') plt.plot(t,cost, lw=2, label='Insulation Cost') plt.xlim([t[0],t[2]]) plt.ylim([cost[0],gas_cost[0]]) plt.xlabel('$t$ (m)') plt.ylabel('Cost (\$)') plt.legend() plt.show # The ideal thickness is approximately 0.095 meters. It can be seen the cost of gas decreases with increasing thickness, which means less heat is being lost. However, the cost of the insulation increases in relation to the amount of heat loss. # ## 2 Window # # Set up equations and apply realistic numerical values to them to discuss heat losses of a single pane window, a single pane window with storm window and a double paned window with air trapped at a vacuum of $10^{-3} \mathrm{torr}$ in a gap of $5\mathrm{mm}$. Do not consider the effects of radiation for any of the window. # # <img src="ThermalCondAir.png" alt="my awesome sketch" width=100% > # ### Solution # Assumptions: # * Heat transfer is steady and 1D # * Conservation of energy applied to window pane # * No energy being generated in the window # * Conduction is the only form of heat transfer # * Steady-state # # * Standard window # * thickness = 2.38125 mm (Craig) # * k = 0.96 $\frac{W}{m K}$ (Engineering Toolbox) # * R = 0.00248 $\frac{m^2K}{W}$ # * Storm window # * R = 0.352 $\frac{m^2K}{W}$ for a storm window plus a window pane (ColoradoEnergy) # # * Outside Tmperature # * $T_{infty}$ = -18 $^\circ{C}$ # * Inside Temperature # * $T_{s}$ = 20 $^\circ{C}$ # # * For air, k = 0.003 $\frac{W}{m K}$ # * $R = \frac{.005}{0.003} = 1.667 \frac{m^2K}{W}$ # ### Single Pane # Conservation of Energy: The control volume is the single pane window # $$ # E_{in} - E_{out} = 0 # $$ # # $$ # q''_{in} - q''_{out} = 0 # $$ # # $$ # q''_{in} = q''_{out} = q''_{cond} # $$ # # Since the window is a single pane, Fourier's Law is # $$ # q''_{cond} = -k \frac{T_{infty}-T_{0}}{t} # $$ # # Utilizing the thermal resistance model where for conduction, $R = \frac{L}{k}$, the equation then becomes # # $$ # q''_{cond} = -\frac{1}{R''_{cond}}(T_{infty} - T_{s}) # $$ # # + from Libraries import HT_thermal_resistance as res Rth = [] Rth.append(res.Resistance("$R'_{cond,wndw}$",'W/m')) import schemdraw as schem import schemdraw.elements as e # Create a new circuit d = schem.Drawing() #create a dot for inside temperature d.add( e.DOT, label='$T_{\infty}$') #create the first resistance R0 = d.add( e.RES, d='right', label=Rth[0].name ) d.add( e.DOT, label='$T_{s}$') L1 = d.add(e.LINE, toplabel = "$q''$", endpts = [[-0.25, 0], [-2.25, 0]]) d.labelI(L1, arrowofst = 0) d.draw() # + R_window = 0.00248 #m^2 K / W T_infty = -18 # Celcius T_s = 20 #Celcius q_flux = (-1/R_window)*(T_infty - T_s) #W/m^2 print('The heat flux through the single window pane is %.2f W/m^2' %q_flux) # - # ### Single Pane with Storm Window # Conservation of Energy: The control volume is the single pane window with the storm window # $$ # E_{in} - E_{out} = 0 # $$ # # $$ # q''_{cond} - q''_{loss} = 0 # $$ # # $$ # q''_{loss} = q''_{cond} # $$ # # Using the thermal resistance model, the heat flux of conduction then becomes # # $$ # q''_{loss} = \frac{1}{R_{total}}(T_{infty} - T_s) # $$ # # Where $$R_{total} = R_{window} + R_{storm window}$$ # # since the standard and storm window are in series, the total thermal resistances add together for the total thermal resistance. # # # + from Libraries import HT_thermal_resistance as res Rth = [] Rth.append(res.Resistance("$R''_{cond,strm}$",'W/m')) Rth.append(res.Resistance("$R''_{cond,windw}$",'W/m')) import schemdraw as schem import schemdraw.elements as e # Create a new circuit d = schem.Drawing() #create a dot for inside temperature d.add( e.DOT, label='$T_{\infty}$') #create the first resistance R0 = d.add( e.RES, d='right', label=Rth[0].name ) d.add( e.DOT, label='$T_1$') R1 = d.add( e.RES, d='right', label=Rth[1].name ) d.add( e.DOT, label='$T_{s}$') L1 = d.add(e.LINE, toplabel = "$q''$", endpts = [[-0.25, 0], [-2.25, 0]]) d.labelI(L1, arrowofst = 0) d.draw() # - R_stormAndpane = 0.352 #m^2 K / W R_total = R_stormAndpane T_infty = -18 #Celcius T_s = 20 #Celcius q_flux2 = (-1/R_total)*(T_infty - T_s) print('The heat flux through the window pane and storm window is %.2f W/m^2' %q_flux2) # ### Double Pane with Air Trapped # Conservation of Energy: The control volume is the double window pane with the trapped air. # $$ # E_{in} - E_{out} = 0 # $$ # # $$ # q''_{cond} - q''_{loss} = 0 # $$ # # $$ # q''_{loss} = q''_{cond} # $$ # With the thermal resistance model, the three conductive resistances will add up, so the equation for heat flux is # # $$ # q''_{cond} = \frac{1}{R''_{total}}(T_{infty} - T_s) # $$ # # Where # $$R''_{total} = R''_{window} + R''_{air} + R''_{window}$$ # # + from Libraries import HT_thermal_resistance as res Rth = [] Rth.append(res.Resistance("$R''_{cond,windw}$",'W/m')) Rth.append(res.Resistance("$R''_{cond,air}$",'W/m')) Rth.append(res.Resistance("$R''_{cond,windw}$",'W/m')) import schemdraw as schem import schemdraw.elements as e # Create a new circuit d = schem.Drawing() #create a dot for inside temperature d.add( e.DOT, label='$T_{\infty}$') #create the first resistance R0 = d.add( e.RES, d='right', label=Rth[0].name ) d.add( e.DOT, label='$T_2$') R0 = d.add( e.RES, d='right', label=Rth[1].name ) d.add( e.DOT, label='$T_{1}$') R2 = d.add( e.RES, d='right', label=Rth[2].name ) d.add( e.DOT, label='$T_{s}$') L1 = d.add(e.LINE, toplabel = "$q''$", endpts = [[-0.25, 0], [-2.25, 0]]) d.labelI(L1, arrowofst = 0) d.draw() # - R_window = 0.00248 #m^2 K / W R_air = .005/.003 #m^2 K / W R_total = R_window + R_air + R_window T_infty = -18 #Celcius T_s = 20 #Celcius q_flux3 = (-1/R_total)*(T_infty - T_s) print('The heat flux through the window pane and storm window is %.2f W/m^2' %q_flux3) # ### Works Cited # # ColoaradoEnergy.org. (n.d.). R-Value Table. Professionals Corner. Retrieved February 24, 2021 from https://www.coloradoenergy.org/procorner/stuff/r-values.htm # # <NAME>. (2017). What is the thickness of glass used in most residential replacement windows? Retrieved on February 24, 2021 from https://www.energyswingwindows.com/about-us/news-and-events/27217-what-is-the-thickness-of-glass-used-in-most-residential-replacement-windows.html # # Engineering ToolBox. (2003). Thermal Conductivity of some selected Materials and Gases. Retrieved February 24, 2021 from https://www.engineeringtoolbox.com/thermal-conductivity-d_429.html # # Grainger. (n.d.). 2 in x 48 in x 24 in Mineral Wool high TEMPERATURE INSULATION, DENSITY 8#, GREEN. Retrieved February 23, 2021, from https://www.grainger.com/product/ROXUL-2-in-x-48-in-x-24-in-Mineral-19NE78 # #
HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/HW 2 Assignment_Team 1_attempt_2021-02-25-22-15-44_Group1_HW2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <a id='top'> </a> # Author: [<NAME>](http://www.jamesbourbeau.com) # %load_ext watermark # %watermark -u -d -v -p numpy,matplotlib,scipy,pandas,sklearn,mlxtend # # Cosmic-ray composition effective area analysis # ### Table of contents # 1. [Load simulation DataFrame and apply quality cuts](#Load-simulation-DataFrame-and-apply-quality-cuts) # 2. [Define functions to be fit to effective area](#Define-functions-to-be-fit-to-effective-area) # 3. [Calculate effective areas](#Calculate-effective-areas) # 4. [Plot result](#Plot-result) # + # %matplotlib inline from __future__ import division, print_function from collections import defaultdict import os import numpy as np from scipy import optimize from scipy.stats import chisquare import pandas as pd import matplotlib.pyplot as plt import seaborn.apionly as sns import comptools as comp color_dict = comp.analysis.get_color_dict() # - # ### Load simulation DataFrame and apply quality cuts # [ [back to top](#top) ] # config = 'IC79' config = 'IC86.2012' df_sim = comp.load_sim(config=config, test_size=0) df_sim # + # df_sim, cut_dict_sim = comp.load_dataframe(datatype='sim', config=config, return_cut_dict=True) # selection_mask = np.array([True] * len(df_sim)) # # standard_cut_keys = ['IceTopQualityCuts', 'lap_InIce_containment', # # # 'num_hits_1_60', 'max_qfrac_1_60', # # 'InIceQualityCuts', 'num_hits_1_60'] # standard_cut_keys = ['passed_IceTopQualityCuts', 'FractionContainment_Laputop_InIce', # 'passed_InIceQualityCuts', 'num_hits_1_60'] # # for cut in ['MilliNCascAbove2', 'MilliQtotRatio', 'MilliRloglBelow2', 'StochRecoSucceeded']: # # standard_cut_keys += ['InIceQualityCuts_{}'.format(cut)] # for key in standard_cut_keys: # selection_mask *= cut_dict_sim[key] # print(key, np.sum(selection_mask)) # df_sim = df_sim[selection_mask] # - # #### Define energy binning for this analysis # + log_energy_bins = np.arange(5.0, 9.51, 0.05) # log_energy_bins = np.arange(5.0, 9.51, 0.1) energy_bins = 10**log_energy_bins energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2 energy_min_fit, energy_max_fit = 5.8, 7.0 midpoints_fitmask = (energy_midpoints >= 10**energy_min_fit) & (energy_midpoints <= 10**energy_max_fit) # - log_energy_bins np.log10(energy_midpoints[midpoints_fitmask]) # ### Define functions to be fit to effective area # + def constant(energy, c): return c def linefit(energy, m, b): return m*np.log10(energy) + b def sigmoid_flat(energy, p0, p1, p2): return p0 / (1 + np.exp(-p1*np.log10(energy) + p2)) def sigmoid_slant(energy, p0, p1, p2, p3): return (p0 + p3*np.log10(energy)) / (1 + np.exp(-p1*np.log10(energy) + p2)) # - def red_chisquared(obs, fit, sigma, n_params): zero_mask = sigma != 0 return np.nansum(((obs[zero_mask] - fit[zero_mask])/sigma[zero_mask]) ** 2) / (len(obs[zero_mask]) - n_params) # return np.sum(((obs - fit)/sigma) ** 2) / (len(obs) - 1 - n_params) np.sum(midpoints_fitmask)-3 # ### Calculate effective areas eff_area, eff_area_error, _ = comp.calculate_effective_area_vs_energy(df_sim, energy_bins) eff_area_light, eff_area_error_light, _ = comp.calculate_effective_area_vs_energy(df_sim[df_sim.MC_comp_class == 'light'], energy_bins) eff_area_heavy, eff_area_error_heavy, _ = comp.calculate_effective_area_vs_energy(df_sim[df_sim.MC_comp_class == 'heavy'], energy_bins) eff_area, eff_area_error, _ = comp.analysis.get_effective_area(df_sim, energy_bins, energy='MC') eff_area_light, eff_area_error_light, _ = comp.analysis.get_effective_area( df_sim[df_sim.MC_comp_class == 'light'], energy_bins, energy='MC') eff_area_heavy, eff_area_error_heavy, _ = comp.analysis.get_effective_area( df_sim[df_sim.MC_comp_class == 'heavy'], energy_bins, energy='MC') eff_area_light # #### Fit functions to effective area data p0 = [1.5e5, 8.0, 50.0] popt_light, pcov_light = optimize.curve_fit(sigmoid_flat, energy_midpoints[midpoints_fitmask], eff_area_light[midpoints_fitmask], p0=p0, sigma=eff_area_error_light[midpoints_fitmask]) popt_heavy, pcov_heavy = optimize.curve_fit(sigmoid_flat, energy_midpoints[midpoints_fitmask], eff_area_heavy[midpoints_fitmask], p0=p0, sigma=eff_area_error_heavy[midpoints_fitmask]) print(popt_light) print(popt_heavy) perr_light = np.sqrt(np.diag(pcov_light)) print(perr_light) perr_heavy = np.sqrt(np.diag(pcov_heavy)) print(perr_heavy) avg = (popt_light[0] + popt_heavy[0]) / 2 print('avg eff area = {}'.format(avg)) eff_area_light light_chi2 = red_chisquared(eff_area_light, sigmoid_flat(energy_midpoints, *popt_light), eff_area_error_light, len(popt_light)) print(light_chi2) heavy_chi2 = red_chisquared(eff_area_heavy, sigmoid_flat(energy_midpoints, *popt_heavy), eff_area_error_heavy, len(popt_heavy)) print(heavy_chi2) # ### Plot result # + fig, ax = plt.subplots() # plot effective area data points with poisson errors ax.errorbar(np.log10(energy_midpoints), eff_area_light, yerr=eff_area_error_light, ls='None', marker='.') ax.errorbar(np.log10(energy_midpoints), eff_area_heavy, yerr=eff_area_error_heavy, ls='None', marker='.') # plot corresponding sigmoid fits to effective area x = 10**np.arange(5.0, 9.5, 0.01) ax.plot(np.log10(x), sigmoid_flat(x, *popt_light), color=color_dict['light'], label='light', marker='None', ls='-') ax.plot(np.log10(x), sigmoid_flat(x, *popt_heavy), color=color_dict['heavy'], label='heavy', marker='None') avg_eff_area = (sigmoid_flat(x, *popt_light) + sigmoid_flat(x, *popt_heavy)) / 2 ax.plot(np.log10(x), avg_eff_area, color=color_dict['total'], label='avg', marker='None') ax.fill_between(np.log10(x), avg_eff_area-0.01*avg_eff_area, avg_eff_area+0.01*avg_eff_area, color=color_dict['total'], alpha=0.5) ax.axvline(6.4, marker='None', ls='-.', color='k') ax.set_ylabel('Effective area [m$^2$]') ax.set_xlabel('$\mathrm{\log_{10}(E_{true}/GeV)}$') # ax.set_title('$\mathrm{A_{eff} = 143177 \pm 1431.77 \ m^2}$') ax.grid() # ax.set_ylim([0, 180000]) ax.set_xlim([5.4, 8.1]) ax.set_title(config) #set label style ax.ticklabel_format(style='sci',axis='y') ax.yaxis.major.formatter.set_powerlimits((0,0)) leg = plt.legend(title='True composition') for legobj in leg.legendHandles: legobj.set_linewidth(2.0) # eff_area_outfile = os.path.join(comp.paths.figures_dir, 'effective-area-{}.png'.format(config)) # comp.check_output_dir(eff_area_outfile) # plt.savefig(eff_area_outfile) plt.show() # - # Effective area as quality cuts are sequentially applied # + df_sim, cut_dict_sim = comp.load_dataframe(datatype='sim', config='IC79', return_cut_dict=True) standard_cut_keys = ['num_hits_1_60', 'IceTopQualityCuts', 'lap_InIce_containment', # 'num_hits_1_60', 'max_qfrac_1_60', 'InIceQualityCuts'] # for cut in ['MilliNCascAbove2', 'MilliQtotRatio', 'MilliRloglBelow2', 'StochRecoSucceeded']: # standard_cut_keys += ['InIceQualityCuts_{}'.format(cut)] eff_area_dict = {} eff_area_err_dict = {} selection_mask = np.array([True] * len(df_sim)) for key in standard_cut_keys: selection_mask *= cut_dict_sim[key] print(key, np.sum(selection_mask)) eff_area, eff_area_error, _ = comp.analysis.get_effective_area(df_sim[selection_mask], energy_bins, energy='MC') # eff_area, eff_area_error = comp.analysis.effective_area.effective_area(df_sim[selection_mask], # np.arange(5.0, 9.51, 0.1)) eff_area_dict[key] = eff_area eff_area_err_dict[key] = eff_area_error # + fig, ax = plt.subplots() cut_labels = {'num_hits_1_60': 'NStations/NChannels', 'IceTopQualityCuts': 'IceTopQualityCuts', 'lap_InIce_containment': 'InIce containment', 'InIceQualityCuts': 'InIceQualityCuts'} for key in standard_cut_keys: # plot effective area data points with poisson errors ax.errorbar(np.log10(energy_midpoints), eff_area_dict[key], yerr=eff_area_err_dict[key], ls='None', marker='.', label=cut_labels[key], alpha=0.75) ax.set_ylabel('Effective area [m$^2$]') ax.set_xlabel('$\log_{10}(E_{\mathrm{MC}}/\mathrm{GeV})$') ax.grid() # ax.set_ylim([0, 180000]) ax.set_xlim([5.4, 9.6]) #set label style ax.ticklabel_format(style='sci',axis='y') ax.yaxis.major.formatter.set_powerlimits((0,0)) leg = plt.legend() plt.savefig('/home/jbourbeau/public_html/figures/effective-area-cuts.png') plt.show() # -
notebooks/effective-area.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # One Shot Learning with Siamese Networks # # This is the jupyter notebook that accompanies # ## Imports # All the imports are defined here # %matplotlib inline import torchvision import torchvision.datasets as dset import torchvision.transforms as transforms from torch.utils.data import DataLoader,Dataset import matplotlib.pyplot as plt import torchvision.utils import numpy as np import random from PIL import Image import torch from torch.autograd import Variable import PIL.ImageOps import torch.nn as nn from torch import optim import torch.nn.functional as F # ## Helper functions # Set of helper functions # + def imshow(img,text=None,should_save=False): npimg = img.numpy() plt.axis("off") if text: plt.text(75, 8, text, style='italic',fontweight='bold', bbox={'facecolor':'white', 'alpha':0.8, 'pad':10}) plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() def show_plot(iteration,loss): plt.plot(iteration,loss) plt.show() # - # ## Configuration Class # A simple class to manage configuration class Config(): training_dir = "./data/faces/training/" testing_dir = "./data/faces/testing/" train_batch_size = 64 train_number_epochs = 100 # ## Custom Dataset Class # This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair class SiameseNetworkDataset(Dataset): def __init__(self,imageFolderDataset,transform=None,should_invert=True): self.imageFolderDataset = imageFolderDataset self.transform = transform self.should_invert = should_invert def __getitem__(self,index): img0_tuple = random.choice(self.imageFolderDataset.imgs) #we need to make sure approx 50% of images are in the same class should_get_same_class = random.randint(0,1) if should_get_same_class: while True: #keep looping till the same class image is found img1_tuple = random.choice(self.imageFolderDataset.imgs) if img0_tuple[1]==img1_tuple[1]: break else: while True: #keep looping till a different class image is found img1_tuple = random.choice(self.imageFolderDataset.imgs) if img0_tuple[1] !=img1_tuple[1]: break img0 = Image.open(img0_tuple[0]) img1 = Image.open(img1_tuple[0]) img0 = img0.convert("L") img1 = img1.convert("L") if self.should_invert: img0 = PIL.ImageOps.invert(img0) img1 = PIL.ImageOps.invert(img1) if self.transform is not None: img0 = self.transform(img0) img1 = self.transform(img1) return img0, img1 , torch.from_numpy(np.array([int(img1_tuple[1]!=img0_tuple[1])],dtype=np.float32)) def __len__(self): return len(self.imageFolderDataset.imgs) # ## Using Image Folder Dataset folder_dataset = dset.ImageFolder(root=Config.training_dir) siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset, transform=transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor() ]) ,should_invert=False) # ## Visualising some of the data # The top row and the bottom row of any column is one pair. The 0s and 1s correspond to the column of the image. # 1 indiciates dissimilar, and 0 indicates similar. # + vis_dataloader = DataLoader(siamese_dataset, shuffle=True, num_workers=8, batch_size=8) dataiter = iter(vis_dataloader) example_batch = next(dataiter) # + vis_dataloader = DataLoader(siamese_dataset, shuffle=True, num_workers=8, batch_size=8) dataiter = iter(vis_dataloader) example_batch = next(dataiter) concatenated = torch.cat((example_batch[0],example_batch[1]),0) imshow(torchvision.utils.make_grid(concatenated)) print(example_batch[2].numpy()) # - # ## Neural Net Definition # We will use a standard convolutional neural network class SiameseNetwork(nn.Module): def __init__(self): super(SiameseNetwork, self).__init__() self.cnn1 = nn.Sequential( nn.ReflectionPad2d(1), nn.Conv2d(1, 4, kernel_size=3), nn.ReLU(inplace=True), nn.BatchNorm2d(4), nn.ReflectionPad2d(1), nn.Conv2d(4, 8, kernel_size=3), nn.ReLU(inplace=True), nn.BatchNorm2d(8), nn.ReflectionPad2d(1), nn.Conv2d(8, 8, kernel_size=3), nn.ReLU(inplace=True), nn.BatchNorm2d(8), ) self.fc1 = nn.Sequential( nn.Linear(8*100*100, 500), nn.ReLU(inplace=True), nn.Linear(500, 500), nn.ReLU(inplace=True), nn.Linear(500, 5)) def forward_once(self, x): output = self.cnn1(x) output = output.view(output.size()[0], -1) output = self.fc1(output) return output def forward(self, input1, input2): output1 = self.forward_once(input1) output2 = self.forward_once(input2) return output1, output2 # ## Contrastive Loss class ContrastiveLoss(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True) loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) + (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)) return loss_contrastive # ## Training Time! train_dataloader = DataLoader(siamese_dataset, shuffle=True, num_workers=8, batch_size=Config.train_batch_size) net = SiameseNetwork().cuda() criterion = ContrastiveLoss() optimizer = optim.Adam(net.parameters(),lr = 0.0005 ) counter = [] loss_history = [] iteration_number= 0 for epoch in range(0,Config.train_number_epochs): for i, data in enumerate(train_dataloader,0): img0, img1 , label = data img0, img1 , label = img0.cuda(), img1.cuda() , label.cuda() optimizer.zero_grad() output1,output2 = net(img0,img1) loss_contrastive = criterion(output1,output2,label) loss_contrastive.backward() optimizer.step() if i %10 == 0 : print("Epoch number {}\n Current loss {}\n".format(epoch,loss_contrastive.item())) iteration_number +=10 counter.append(iteration_number) loss_history.append(loss_contrastive.item()) show_plot(counter,loss_history) # ## Some simple testing # The last 3 subjects were held out from the training, and will be used to test. The Distance between each image pair denotes the degree of similarity the model found between the two images. Less means it found more similar, while higher values indicate it found them to be dissimilar. # + run_control={"marked": true} folder_dataset_test = dset.ImageFolder(root=Config.testing_dir) siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset_test, transform=transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor() ]) ,should_invert=False) test_dataloader = DataLoader(siamese_dataset,num_workers=6,batch_size=1,shuffle=True) dataiter = iter(test_dataloader) x0,_,_ = next(dataiter) for i in range(10): _,x1,label2 = next(dataiter) concatenated = torch.cat((x0,x1),0) output1,output2 = net(Variable(x0).cuda(),Variable(x1).cuda()) euclidean_distance = F.pairwise_distance(output1, output2) imshow(torchvision.utils.make_grid(concatenated),'Dissimilarity: {:.2f}'.format(euclidean_distance.item())) # -
Siamese-networks-danny-copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split, KFold, StratifiedKFold, cross_val_score, cross_validate, GridSearchCV from sklearn.metrics import accuracy_score import numpy as np import pandas as pd # - iris = load_iris() iris_data = iris.data iris_label = iris.target X_train, X_test, y_train, y_test = train_test_split(iris_data, iris_label, test_size=0.2, random_state=7) model = DecisionTreeClassifier(random_state=77) model.fit(X_train, y_train) pred = model.predict(X_test) print('Prediction accuracy: {0:.2f}%'.format(accuracy_score(y_test, pred) * 100.0)) # use KFold to improve prediction performance. kfold = KFold(n_splits=5) accuracy_list = [] # + n_iter = 0 for train_index, test_index in kfold.split(iris_data): X_train, X_test = iris_data[train_index], iris_data[test_index] y_train, y_test = iris_label[train_index], iris_label[test_index] model.fit(X_train, y_train) pred = model.predict(X_test) n_iter += 1 accuracy = np.round(accuracy_score(y_test, pred), 4) train_size = X_train.shape[0] test_size = X_test.shape[0] print('=' * 50) print('{0}-th Cross validation accuracy: {1}\nTraining data size: {2}\nValidation data size: {3}' .format(n_iter, accuracy, train_size, test_size)) print('=' * 50) print() accuracy_list.append(accuracy) # - print('Average accuracy: {0:.2f}%'.format(np.mean(accuracy_list) * 100.0)) # use Stratified KFold to improve prediction performance. iris_df = pd.DataFrame(data=iris_data, columns=iris.feature_names) iris_df['label'] = iris.target # check dataframe iris_df['label'].value_counts() # + skf = StratifiedKFold(n_splits=3) accuracy_list = [] n_iter = 0 for train_index, test_index in skf.split(iris_data, iris_label): X_train, X_test = iris_data[train_index], iris_data[test_index] y_train, y_test = iris_label[train_index], iris_label[test_index] model.fit(X_train, y_train) pred = model.predict(X_test) n_iter += 1 accuracy = np.round(accuracy_score(y_test, pred), 4) train_size = X_train.shape[0] test_size = X_test.shape[0] print('=' * 100) print('{0}-th\n- Cross validation accuracy: {1}\n- Training data size: {2}\n- Validation data size: {3}' .format(n_iter, accuracy, train_size, test_size)) print('\nVerification set index: {0}'.format(test_index)) print('=' * 100) print() accuracy_list.append(accuracy) # - print('Accuracy by cross validation: ', np.round(accuracy_list, 4)) print('Average verification accuracy: {0:.0f}%'.format(np.mean(accuracy_list) * 100.0)) # ## Easily use cross-validation # # cross_val_score( # estimator, # X, # y=None, # groups=None, # scoring=None, # cv=None, # n_jobs=None, # verbose=0, # fit_params=None, # pre_dispatch='2*n_jobs', # error_score=nan, # ) scores = cross_val_score(model, iris_data, iris_label, scoring='accuracy', cv=3) print('Accuracy by cross validation: ', np.round(scores, 4)) print('Average verification accuracy: {0:.0f}%'.format(np.mean(scores) * 100.0)) # GridSearchCV( # estimator, # param_grid, # scoring=None, # n_jobs=None, # iid='deprecated', # refit=True, # cv=None, # verbose=0, # pre_dispatch='2*n_jobs', # error_score=nan, # return_train_score=False, # ) # + iris = load_iris() iris_data = iris.data iris_label = iris.target dtc_model = DecisionTreeClassifier() X_train, X_test, y_train, y_test = train_test_split(iris_data, iris_label, test_size=0.2, random_state=121) parameters = {'max_depth': [1, 2, 3], 'min_samples_split':[2, 3]} # refit = True is default, If True, re-learn with the best parameter setting grid_dtree = GridSearchCV(dtc_model, param_grid=parameters, cv=3, refit=True) # - grid_dtree.fit(X_train, y_train) scores_df = pd.DataFrame(grid_dtree.cv_results_) scores_df[['params', 'mean_test_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score']] print('GridSearchCV Optimal parameters: ', grid_dtree.best_params_) print('GridSearchCV Highest accuracy: {0:.2f}%'.format(grid_dtree.best_score_ * 100.0)) pred = grid_dtree.predict(X_test) print('Test data set accuracy: {0:.2f}%'.format(accuracy_score(y_test,pred) * 100.0)) # + # GridSearchCV's refit returns the already trained estimator estimator = grid_dtree.best_estimator_ pred = estimator.predict(X_test) print('Test data set accuracy: {0:.2f}%'.format(accuracy_score(y_test, pred) * 100.0))
self/Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.0 # language: julia # name: julia-1.0 # --- # This notebook creates a basic Convolutioin Neural Net using Flux to predict estimated force needed by the exoskeleton. \\ # # Input data is file "f1_processed_data.csv". Saves model as "CNN-model.bson", train loss, and test loss \\ # # The model uses as it's training data strides from the time of the end of the last plantar flexion to about 150 ms before the start of the next plantar flexion. At this point, the model predicts a "relative force". The relative force is defined as the acceleration_z^2 + acceleration_x^2 (where x points forward, z points up, and y points side to side). Mass is assumed to be constant and is not included. using Pkg using Flux using Flux: onehot, chunk, batchseq, throttle, crossentropy using StatsBase: wsample using Base.Iterators: partition using CSV using MultivariateStats using LinearAlgebra using StatsBase using Random using BSON: @save using Dates using Plots using Statistics BIN_STEP = .0001; N_FEATURES = 6; ALPHABET_SIZE = 200; add_dim(x::Array,dim) = reshape(x, (size(x)...,1)) prctile(x::Array,p) = sort(x)[Int(round((p/100)*length(x)))] # + # Process data, defining start and end PF times as relative points within the stride cycle. Heel strikes have been found # and marked in a previous file text = CSV.read("f1_processed_data.csv", header=0); text = convert(Array, text); timepts = text[:,1]; heel_strike = findall(text[:,8].==1); cycle_time = heel_strike[2:end] .- heel_strike[1:end-1]; startPF = Int.(round.(heel_strike[1:end-1] .+ .3 .*cycle_time .- 15)); PFend = Int.(round.(heel_strike[1:end-1] .+ .6 .*cycle_time)); labels = text[:,end]; data_unNorm = text[:,[2,3,4,5,6,7]]; means = [mean(data_unNorm[:,col]) for col in range(1,size(data_unNorm,2))]; std_data = [std(data_unNorm[:,col]) for col in range(1,size(data_unNorm,2))]; means = reshape(means,(1,6)); data0 = data_unNorm .-means; data0 = data0 ./ reshape(std_data, (1,6)); # - PF_idxs = [collect(s:e) for (s, e) in zip(startPF, PFend)]; PF_idxs = PF_idxs[2:end] training_idxs = [collect(s:e) for (s, e) in zip(PFend[1:end-1], startPF[2:end])]; max_forces_tst = []; max_forces_tr = []; tr_data_stride = []; tst_data_stride = []; seqlens = []; sfl = shuffle(1:length(PF_idxs)-1) for s in sfl force = sqrt.(data0[PF_idxs[s],1].^2 .+ data0[PF_idxs[s],3].^2) data_add = data0[training_idxs[s],:] append!(seqlens, size(data_add)[1]) if s > round(.8*(length(PF_idxs)-1)) append!(tst_data_stride, [data_add]) append!(max_forces_tst, maximum(force)) else append!(tr_data_stride, [data_add]) append!(max_forces_tr, maximum(force)) end end length(sfl) length(tst_data_stride) length(tr_data_stride) size(tr_data_stride[1]) function custom_pad(data, seqlen, pad) dnew = []; for dat in data if size(dat)[1]<seqlen dat = vcat(dat, fill(pad,(seqlen-size(dat)[1], N_FEATURES))); end datn = dat[1:seqlen,:] append!(dnew, [datn]) end return dnew end seqlen = prctile(seqlens, 75); tst_data_stride = custom_pad(tst_data_stride, seqlen, 0); tr_data_stride = custom_pad(tr_data_stride, seqlen, 0); partitions = [i:i+9 for i in 1:10:length(tr_data_stride)-9]; train = [(cat(float.(tr_data_stride[i])..., dims = 4), max_forces_tr[i]) for i in partitions]; size(tr_data_stride[1]) # + using Flux, Flux.Data.MNIST, Statistics using Flux: onehotbatch, onecold, crossentropy, throttle using Base.Iterators: repeated, partition using Flux: @epochs # using CuArrays # # Classify MNIST digits with a convolutional network # imgs = MNIST.images() # labels = onehotbatch(MNIST.labels(), 0:9) # # Partition into batches of size 1,000 partitions = [i:i+9 for i in 1:10:length(tr_data_stride)-9]; train = [(cat(float.(tr_data_stride[i])..., dims = 4), max_forces_tr[i]) for i in partitions]; # println(typeof(max_forces_tr)) # train = convert.(Array{Float32,2}, tr_data_stride); train = gpu.(train) |> gpu # labels = gpu.(Float32.(max_forces_tr)) # train = [(cat(float.(train[i])..., dims = 4), labels[i]) # for i in length(train)] # println(length(train)) m = Chain( Conv((2,2), 1=>16, relu), x -> maxpool(x, (2,2)), Conv((2,2), 16=>8, relu), x -> maxpool(x, (2,2)), x -> reshape(x, :, size(x, 4)), Dense(216, 1)) |> gpu function mse_loss(x,y) y_hat = m(x); y_hat = reshape(y_hat, length(y_hat)); return sqrt(sum(y_hat .- y).^2)/length(y) end tY = gpu.(Float32.(max_forces_tst[1])) tX = reshape(float.(tst_data_stride[1]), (size(tst_data_stride[1],1),size(tst_data_stride[1],2),1,1)) |> gpu tYr = gpu.(Float32.(max_forces_tr[1])) tXr = reshape(float.(tr_data_stride[1]), (size(tr_data_stride[1],1),size(tr_data_stride[1],2),1,1)) |> gpu # tst_acc = cat(float.(tst_data_stride)..., dims = 4); opt = ADAM(params(m)) loss_vec= []; test_loss_vec = []; evalcb = throttle(1) do @show "Train Loss" mse_loss(tXr, tYr) @show "Test Loss" mse_loss(tX, tY) append!(loss_vec, mse_loss(tXr, tYr).data) append!(test_loss_vec, mse_loss(tX, tY).data) end # Flux.train!(mseloss, train, opt, cb = evalcb) @epochs 450 Flux.train!(mse_loss, train, opt, cb = evalcb) @save "CNN-model_latest.bson" m opt # - # save workspace @save "$(now())-ConvWorkspace.jld2" # loss_vec = [l.data for l in loss_vec] using PyPlot PyPlot.plot(loss_vec, label = "Train Loss") PyPlot.plot(test_loss_vec, label= "Test Loss") xlabel("Epochs") ylabel("Loss") title("MSE Loss on Conv Net") legend(loc="upper right",fancybox="true") write("convNet_testloss_latest.csv", DataFrame(reshape(test_loss_vec,1,length(test_loss_vec)))) write("convNet_loss_latest.csv", DataFrame(reshape(loss_vec,1,length(loss_vec)))) println(backend()) using Plots Plots.plotlyjs() x = 0:2*pi:100 |> collect dt, nsteps = 0.03, 3 for n = 1:nsteps IJulia.clear_output(true) Plots.plot(x[n], sin(x - n*dt)) |> display end # + sample_vec = shuffle(1:length(tst_data_stride)) i = sample_vec[1]; input_data = reshape(float.(tst_data_stride[i]), (size(tst_data_stride[i],1),size(tst_data_stride[i],2),1,1)); guess = m(input_data).data; truth = max_forces_tst[i]; guess = timevec = collect(0:10:size(input_data,1)*10); PyPlot.plot([timevec[1]], [tst_data_stride[i][1,1]]) @gif for j=2:length(timevec)-1 plot!([timevec[j]], [tst_data_stride[i][j,1]], label = "") if j == length(timevec)-1 bar!([timevec[j]], [truth], label="True Force") bar!([timevec[j]], [guess], label="Guess") end end every 10 # + sample_vec = shuffle(1:length(tst_data_stride)) i = sample_vec[1]; input_data = reshape(float.(tst_data_stride[i]), (size(tst_data_stride[i],1),size(tst_data_stride[i],2),1,1)); guess = m(input_data).data[1]; truth = max_forces_tst[i]; timevec = collect(0:10:size(input_data,1)*10); # using GR gr(show = :ijulia) display(GR.plot(1:2, tst_data_stride[i][1:2,1], xlim=[1,size(input_data,1)], ylim=[-5,5])) for k in 1:size(input_data,1)-3 display(GR.plot(1:2+k, tst_data_stride[i][1:2+k,1], xlim=[1,size(input_data,1)], ylim=[-5,5])) sleep(0.2) IJulia.clear_output(true) end # + sample_vec = shuffle(1:length(tst_data_stride)) i = sample_vec[1]; input_data = reshape(float.(tst_data_stride[i]), (size(tst_data_stride[i],1),size(tst_data_stride[i],2),1,1)); guess = m(input_data).data[1]; truth = max_forces_tst[i]; timevec = collect(0:10:size(input_data,1)*10); # using GR gr(show = :ijulia) display(GR.plot(1:2, tst_data_stride[i][1:2,1], xlim=[1,size(input_data,1)], ylim=[-5,5])) for k in 1:size(input_data,1)-3 display(GR.plot(1:2+k, tst_data_stride[i][1:2+k,1], xlim=[1,size(input_data,1)], ylim=[-5,5])) sleep(0.2) IJulia.clear_output(true) end # Plots.bar!([size(input_data,1)-1], [truth], label="Truth") # Plots.bar!([size(input_data,1)], [guess], label="Guess") # Plots.bar([size(input_data,1)-1,size(input_data,1)], [truth, guess]) # - guess = m(input_data).data[1]; # + # Plots.bar([size(input_data,1)-1,size(input_data,1)], [truth, guess], label="Truth") Plots.bar!([size(input_data,1)-1], [truth], label="Truth") Plots.bar!([size(input_data,1)], [guess], label="Guess") # + using Plots pyplot() #change to the pyplot backend and define some defaults # x = y = range(-5, stop = 5, length = 40) # zs = zeros(0,40) # n = 100 # create a progress bar for tracking the animation generation sample_vec = shuffle(1:length(tst_data_stride)) i = sample_vec[1]; input_data = reshape(float.(tst_data_stride[i]), (size(tst_data_stride[i],1),size(tst_data_stride[i],2),1,1)); truth = max_forces_tst[i]; timevec = collect(0:10:size(input_data,1)*10); anim = Animation() p1 = Plots.plot(zeros(1,length(timevec)), zeros(1,length(timevec))) for i in 1:3 # create a plot with 3 subplots and a custom layout # induce a slight oscillating camera angle sweep, in degrees (azimuth, altitude) plot!(p1, [timevec[i]], [input_data[i,1]], show = true) # add a tracking line # fixed_x = zeros(40) # z = map(f, fixed_x, y) # plot!(p[1], fixed_x, y, z, line = (:black, 5, 0.2)) # vline!(p[2], [0], line = (:black, 5)) # add to and show the tracked values over time # global zs = vcat(zs, z') # plot!(p[3], zs, alpha = 0.2, palette = cgrad(:blues).colors) # frame(anim) # increment the progress bar # next!(prog) end # + # function plot_points(time_in, stride_in, plt) # @gif for i in 1:10 # println(plt) # push!(plt, time_in[i], stride_in[i,1]) # gui(); sleep(0.5) # end # end using Plots gr() # plt1 = plot(1, title = "Plot", marker = 2) sample_vec = shuffle(1:length(tst_data_stride)) i = sample_vec[1]; # for i in sample_vec[1] input_data = reshape(float.(tst_data_stride[i]), (size(tst_data_stride[i],1),size(tst_data_stride[i],2),1,1)); # guess = m(input_data); truth = max_forces_tst[i]; timevec = collect(0:10:size(input_data,1)*10); p = plot(1,xlim=(minimum(timevec),maximum(timevec)), ylim=(minimum(input_data[:,1]),maximum(input_data[:,1])), title = "Plot") @gif for k in 1:10 append!(p, k, tst_data_stride[i][k,1]) # plot!(timevec[k], tst_data_stride[i][k]) # PlotlyJS.gui(); PlotlyJS.sleep(0.5); # PyPlot.bar(timevec[end], m(input_data).data, label = "Guess Force") # PyPlot.bar(timevec[end], truth, label = "True Force") end every 10 # bar!(size(input_data,1)*10, guess) # bar!(size(input_data,1)*10, truth) # end # - function customRecon(new_data, eigenvecs, k_keep) if length(size(new_data))==1 new_data = reshape(new_data, (length(new_data),1)); end recon = eigenvecs[:,k_keep]*(transpose(new_data)) return transpose(recon) end k = 5 recon_data = customRecon(buffer[k], vectors, k_keep); recon_data_true = customRecon(tst_data_stride[s_order[k]], vectors, k_keep); k_keep using Plots plot(recon_data[:,1]) plot!(recon_data[:,2]) plot!(recon_data[:,3]) plot!(recon_data[:,4]) plot!(recon_data[:,5]) plot!(recon_data[:,6]) plot(recon_data_true[:,1]) plot!(recon_data_true[:,2]) plot!(recon_data_true[:,3]) plot!(recon_data_true[:,4]) plot!(recon_data_true[:,5]) plot!(recon_data_true[:,6]) plot(data0[1:1000,1]) plot!(data0[1:1000,2]) plot!(data0[1:1000,3]) plot!(data0[1:1000,4]) plot!(data0[1:1000,5]) plot!(data0[1:1000,6])
.ipynb_checkpoints/basicConvNet_julia-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import plotly.graph_objs as go from ipywidgets import widgets import matplotlib.pyplot as plt import scipy.stats as spst # + t=np.linspace(0,6,1000) Lmbda_n=(-t**3 + 15*t**2/2 + 20*t)/50 lmbda_m=(-3*t*(t-5)+20)/50 plt.plot(t,lmbda_m) # - # + next_arrival=widgets.Button( description="next arrival") clear=widgets.Button( description="clear") t=np.linspace(0,6,1000) Lmbda_n=(-t**3 + 15*t**2/2 + 20*t)/50 arrival_t=np.array([]) homo_t=np.array([]) lmbda_m=np.max(-3*t*(t-5)+20)/50 tr=0 trace1=go.Scatter(x=t,y=Lmbda_n,name="\u039B(t)" ,mode="lines",line=dict( color="green", dash='dash',width=1),hoverinfo='skip') trace2=go.Scatter(x=[0,6],y=[0,0],name="arrival times" ,mode="lines",line=dict( color="gray", dash='solid',width=10),hoverinfo='skip') trace3=go.Scatter(x=[0,0],y=[0,5],name="expected number of arrivals " ,mode="lines",line=dict( color="gray", dash='solid',width=10),hoverinfo='skip') trace4=go.Scatter(x=[],y=[],name="arrivals" ,hoverinfo="skip", text="",mode="markers",marker=dict( color="blue", size=10)) trace5=go.Scatter(x=[],y=[],name="E(#arrivals)/homo arrival time" ,hoverinfo="skip", text="",mode="markers",marker=dict( color="yellow", size=10)) trace6=go.Scatter(x=[],y=[],name="" ,hoverinfo="text", text="",mode="markers",marker=dict( color="black", size=8)) g = go.FigureWidget(data=[trace1,trace2,trace3,trace4,trace5,trace6], layout=go.Layout( hovermode="closest", xaxis={'showspikes': True}, yaxis={'showspikes': True}, title=dict( text="inversion method", ), margin={'l': 0, 'r': 0, 't': 0, 'b': 0},width=800, height=300 ) ) g.update_layout( title_x=0.5, title_y=0.9, xaxis=dict(range=[-1,7] ), yaxis=dict(range=[-0.1,5]), legend=dict( x=1.1, y=0.7, traceorder="normal", font=dict( family="sans-serif", size=12, color="black" )) ) def response1(change): global tr,arrival_t, homo_t,next_proposal keep=False while keep==False: tr=tr-1/lmbda_m*np.log(np.random.rand()) if tr>6: next_arrival.disabled=True return ar=(-3*tr*(tr-5)+20)/lmbda_m/50 keep=np.random.rand()<ar arrival_t=np.append(arrival_t,tr) homo_t=np.append(homo_t,(-tr**3 + 15*tr**2/2 + 20*tr)/50) with g.batch_update(): g.data[3].y=np.repeat(0,len(arrival_t)) g.data[3].x=arrival_t g.data[4].x=np.repeat(0,len(arrival_t)) g.data[4].y=homo_t g.data[5].x=arrival_t g.data[5].y=homo_t def response2(change): global tr,arrival_t, homo_t,next_proposal,trace1,trace2,trace3,trace4,trace5,g tr=0 arrival_t=np.array([]) homo_t=np.array([]) next_arrival.disabled=False with g.batch_update(): g.data = [g.data[0]] g.add_trace(trace2) g.add_trace(trace3) g.add_trace(trace4) g.add_trace(trace5) g.add_trace(trace6) g.update_layout( title_x=0.5, title_y=0.9, xaxis=dict(range=[-1,7] ), yaxis=dict(range=[-0.1,5]), legend=dict( x=1.1, y=0.7, traceorder="normal", font=dict( family="sans-serif", size=12, color="black" )) ) next_arrival.on_click(response1) clear.on_click(response2) container1 = widgets.HBox([next_arrival,clear]) widget1=widgets.HBox([g ]) Widget=widgets.VBox([container1,widget1] ) Widget # -
inversion_method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="TOn4ozbs9vS1" executionInfo={"status": "ok", "timestamp": 1619470575154, "user_tz": 420, "elapsed": 383, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} outputId="5e896329-482d-4b15-ca59-4043b5d63d41" # "magic" commands, prefaced with "%", changes settings in the notebook # this ensures plots are embedded in notebook web page # %matplotlib inline # pdb = Python debugger, so this command turns the debugger OFF # %pdb off # + id="g6MsaUU899dU" executionInfo={"status": "ok", "timestamp": 1619470577408, "user_tz": 420, "elapsed": 1890, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} # numpy = numerical Python, implements arrays (/ matrices) import numpy as np # limit number of decimal places printed for floating-point numbers np.set_printoptions(precision=3) # scipy = scientific Python, implements operations on arrays / matrices import scipy as sp # linalg = linear algebra, implements eigenvalues, matrix inverse, etc from scipy import linalg as la # optimize = optimization, root finding, etc from scipy import optimize as op # produce matlab-style plots import matplotlib as mpl # increase font size on plots mpl.rc('font',**{'size':18}) # use LaTeX to render symbols mpl.rc('text',usetex=False) # animation from matplotlib import animation as ani # Matlab-style plotting import matplotlib.pyplot as plt # symbolic computation, i.e. computer algebra (like Mathematica, Wolfram Alpha) import sympy as sym # + id="p3jbh57s9-d7" executionInfo={"status": "ok", "timestamp": 1619470588481, "user_tz": 420, "elapsed": 306, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} def numerical_simulation(f,t,x,t0=0.,dt=1e-4,ut=None,ux=None,utx=None,return_u=False): """ simulate x' = f(x,u) input: f : R x X x U --> X - vector field X - state space (must be vector space) U - control input set t - scalar - final simulation time x - initial condition; element of X (optional:) t0 - scalar - initial simulation time dt - scalar - stepsize parameter return_u - bool - whether to return u_ (only one of:) ut : R --> U ux : X --> U utx : R x X --> U output: t_ - N array - time trajectory x_ - N x X array - state trajectory (if return_u:) u_ - N x U array - state trajectory """ t_,x_,u_ = [t0],[x],[] inputs = sum([1 if u is not None else 0 for u in [ut,ux,utx]]) assert inputs <= 1, "more than one of ut,ux,utx defined" if inputs == 0: assert not return_u, "no input supplied" else: if ut is not None: u = lambda t,x : ut(t) elif ux is not None: u = lambda t,x : ux(x) elif utx is not None: u = lambda t,x : utx(t,x) while t_[-1]+dt < t: if inputs == 0: _t,_x = t_[-1],x_[-1] dx = f(t_[-1],x_[-1]) * dt else: _t,_x,_u = t_[-1],x_[-1],u(t_[-1],x_[-1]) dx = f(_t,_x,_u) * dt u_.append( _u ) x_.append( _x + dx ) t_.append( _t + dt ) if return_u: return np.asarray(t_),np.asarray(x_),np.asarray(u_) else: return np.asarray(t_),np.asarray(x_) # + [markdown] id="I4FbsIQaWJmo" # ## phase portrait # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="9rNK1yFO-Bjr" executionInfo={"status": "ok", "timestamp": 1619471170730, "user_tz": 420, "elapsed": 2843, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} outputId="04ced72b-1410-4171-b485-bd4325eba350" g = 9.81 m = 75000.0 # kg ell = 40 # m alpha = 0.1# friction ut = lambda t : 0 dt = .01 def f(t,x,u): q, dq = x # u = 0 ddq = g/ell*np.sin(q)-alpha*dq#+1/(m*ell)*u*np.cos(q) return np.asarray([q,dq]) plt.figure(figsize=(5,5)) # simulation time t = 15 # phase portrait / "quiver" plot ax = plt.subplot(1,1,1) X, Y = np.meshgrid(np.linspace(-1,+1, 11), np.linspace(-1, +1, 11)) dX,dY = np.asarray([f(0.,(x,y),0.).flatten() for x,y in zip(X.flatten(),Y.flatten())]).T dX,dY = dX.reshape(X.shape),dY.reshape(Y.shape) #ax.quiver(X,Y,dX,dY) ax.streamplot(X,Y,dX,dY,density=2.,color=(0,0,0,.5)) ax.set_xlabel(r'$x_1$') ax.set_ylabel(r'$x_2$') for _ in range(10): # random initial condition in [-1,+1] x [-1,+1] square x0 = 2*(np.random.rand(2) - .5) t_,x_ = numerical_simulation(f,t,x0,dt=dt,ut=ut) ax.plot(x_[:,0],x_[:,1]) t_,x_ = numerical_simulation(f,t,[.5,0],dt=dt,ut=ut) ax.plot(x_[:,0],x_[:,1],'r',lw=4) t_,x_ = numerical_simulation(f,t,[0,.5],dt=dt,ut=ut) ax.plot(x_[:,0],x_[:,1],'b',lw=4) ax.set_xlim((-1,+1)) ax.set_ylim((-1,+1)) plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/"} id="eUGq3NUa_aPJ" executionInfo={"status": "ok", "timestamp": 1619476953159, "user_tz": 420, "elapsed": 220, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} outputId="7b3004ac-f0c1-4250-8353-ab12937446a0" A = np.array([[0,1],[g/ell, -alpha/ell**2]]) print('A') print(A) eigenvalue, _ = np.linalg.eig(A) print('eigenvalue of A') print(eigenvalue) # + colab={"base_uri": "https://localhost:8080/", "height": 54} id="OW7p4TeFW-U8" executionInfo={"status": "ok", "timestamp": 1619477664423, "user_tz": 420, "elapsed": 267, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09607993393454608900"}} outputId="4b0d3308-6c48-4825-ee6c-7bf40d79fe40" sym.var("k_P s") C = k_P+1/s P = 1/(s-1) Gs = P*C/(1+P*C) Gs = Gs.simplify().cancel() Gs # + id="BuKhr_KeY9f9"
tutorial4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Pandas # # *pandas* is a Python library for data analysis. It offers a number of data exploration, cleaning and transformation operations that are critical in working with data in Python. # # *pandas* build upon *numpy* and *scipy* providing easy-to-use data structures and data manipulation functions with integrated indexing. # # The main data structures *pandas* provides are *Series* and *DataFrames*. After a brief introduction to these two data structures and data ingestion, the key features of *pandas* this notebook covers are: # * Generating descriptive statistics on data # * Data cleaning using built in pandas functions # * Frequent data operations for subsetting, filtering, insertion, deletion and aggregation of data # * Merging multiple datasets using dataframes # * Working with timestamps and time-series data # # **Additional Recommended Resources:** # * *pandas* Documentation: http://pandas.pydata.org/pandas-docs/stable/ # * *Python for Data Analysis* by <NAME> # * *Python Data Science Handbook* by <NAME> # # Let's get started with our first *pandas* notebook! # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br> # # Import Libraries # </p> import pandas as pd # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"> # Introduction to pandas Data Structures</p> # <br> # *pandas* has two main data structures it uses, namely, *Series* and *DataFrames*. # # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"> # pandas Series</p> # # *pandas Series* one-dimensional labeled array. # ser = pd.Series([100, 'foo', 300, 'bar', 500], ['tom', 'bob', 'nancy', 'dan', 'eric']) ser ser.index ser.loc[['nancy','bob']] ser[[4, 3, 1]] ser.iloc[2] 'bob' in ser ser ser * 2 ser[['nancy', 'eric']] ** 2 # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"> # pandas DataFrame</p> # # *pandas DataFrame* is a 2-dimensional labeled data structure. # <p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold"> # Create DataFrame from dictionary of Python Series</p> d = {'one' : pd.Series([100., 200., 300.], index=['apple', 'ball', 'clock']), 'two' : pd.Series([111., 222., 333., 4444.], index=['apple', 'ball', 'cerill', 'dancy'])} df = pd.DataFrame(d) print(df) df.index df.columns pd.DataFrame(d, index=['dancy', 'ball', 'apple']) pd.DataFrame(d, index=['dancy', 'ball', 'apple'], columns=['two', 'five']) # <p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold"> # Create DataFrame from list of Python dictionaries</p> data = [{'alex': 1, 'joe': 2}, {'ema': 5, 'dora': 10, 'alice': 20}] pd.DataFrame(data) pd.DataFrame(data, index=['orange', 'red']) pd.DataFrame(data, columns=['joe', 'dora','alice']) # <p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold"> # Basic DataFrame operations</p> df df['one'] df['three'] = df['one'] * df['two'] df df['flag'] = df['one'] > 250 df three = df.pop('three') three df del df['two'] df df.insert(2, 'copy_of_one', df['one']) df df['one_upper_half'] = df['one'][:2] df # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"> # Case Study: Movie Data Analysis</p> # <br>This notebook uses a dataset from the MovieLens website. We will describe the dataset further as we explore with it using *pandas*. # # ## Download the Dataset # # Please note that **you will need to download the dataset**. Although the video for this notebook says that the data is in your folder, the folder turned out to be too large to fit on the edX platform due to size constraints. # # Here are the links to the data source and location: # * **Data Source:** MovieLens web site (filename: ml-20m.zip) # * **Location:** https://grouplens.org/datasets/movielens/ # # Once the download completes, please make sure the data files are in a directory called *movielens* in your *Week-3-pandas* folder. # # Let us look at the files in this dataset using the UNIX command ls. # # + # Note: Adjust the name of the folder to match your local directory # !find .. | grep -i movielens # - # !cat ./movielens/movies.csv | wc -l # !head -5 ./movielens/ratings.csv # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"> # Use Pandas to Read the Dataset<br> # </p> # <br> # In this notebook, we will be using three CSV files: # * **ratings.csv :** *userId*,*movieId*,*rating*, *timestamp* # * **tags.csv :** *userId*,*movieId*, *tag*, *timestamp* # * **movies.csv :** *movieId*, *title*, *genres* <br> # # Using the *read_csv* function in pandas, we will ingest these three files. movies = pd.read_csv('./movielens/movies.csv', sep=',') print(type(movies)) movies.head(15) # + # Timestamps represent seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970 tags = pd.read_csv('./movielens/tags.csv', sep=',') tags.head() # - ratings = pd.read_csv('./movielens/ratings.csv', sep=',', parse_dates=['timestamp']) ratings.head() # + # For current analysis, we will remove timestamp (we will come back to it!) del ratings['timestamp'] del tags['timestamp'] # - # <h1 style="font-size:2em;color:#2467C0">Data Structures </h1> # <h1 style="font-size:1.5em;color:#2467C0">Series</h1> # + #Extract 0th row: notice that it is infact a Series row_0 = tags.iloc[0] type(row_0) # - print(row_0) row_0.index row_0['userId'] 'rating' in row_0 row_0.name row_0 = row_0.rename('first_row') row_0.name # <h1 style="font-size:1.5em;color:#2467C0">DataFrames </h1> tags.head() tags.index tags.columns # + # Extract row 0, 11, 2000 from DataFrame tags.iloc[ [0,11,2000] ] # - # <h1 style="font-size:2em;color:#2467C0">Descriptive Statistics</h1> # # Let's look how the ratings are distributed! ratings['rating'].describe() ratings.describe() ratings['rating'].mean() ratings.mean() ratings['rating'].min() ratings['rating'].max() ratings['rating'].std() ratings['rating'].mode() ratings.corr() filter_1 = ratings['rating'] > 5 print(filter_1) filter_1.any() filter_2 = ratings['rating'] > 0 filter_2.all() # <h1 style="font-size:2em;color:#2467C0">Data Cleaning: Handling Missing Data</h1> movies.shape # + #is any row NULL ? movies.isnull().any() # - # Thats nice ! No NULL values ! ratings.shape # + #is any row NULL ? ratings.isnull().any() # - # Thats nice ! No NULL values ! tags.shape # + #is any row NULL ? tags.isnull().any() # - # We have some tags which are NULL. tags = tags.dropna() # + #Check again: is any row NULL ? tags.isnull().any() # - tags.shape # Thats nice ! No NULL values ! Notice the number of lines have reduced. # <h1 style="font-size:2em;color:#2467C0">Data Visualization</h1> # + # %matplotlib inline ratings.hist(column='rating', figsize=(15,10)) # - ratings.boxplot(column='rating', figsize=(15,20)) # <h1 style="font-size:2em;color:#2467C0">Slicing Out Columns</h1> # tags['tag'].head() movies[['title','genres']].head() ratings[-10:] tag_counts = tags['tag'].value_counts() tag_counts[-10:] tag_counts[:10].plot(kind='bar', figsize=(15,10)) # <h1 style="font-size:2em;color:#2467C0">Filters for Selecting Rows</h1> df.filter(regex='^T', axis=1).head(50) df.filter(like='C', axis=1).head(50) # <h1 style="font-size:2em;color:#2467C0">Filters for Selecting Rows</h1> # + is_highly_rated = ratings['rating'] >= 4.0 ratings[is_highly_rated][30:50] # + is_animation = movies['genres'].str.contains('Animation') movies[is_animation][5:15] # - movies[is_animation].head(15) # <h1 style="font-size:2em;color:#2467C0">Group By and Aggregate </h1> ratings_count = ratings[['movieId','rating']].groupby('rating').count() ratings_count average_rating = ratings[['movieId','rating']].groupby('movieId').mean() average_rating.head() movie_count = ratings[['movieId','rating']].groupby('movieId').count() movie_count.head() movie_count = ratings[['movieId','rating']].groupby('movieId').count() movie_count.tail() # <h1 style="font-size:2em;color:#2467C0">Merge Dataframes</h1> tags.head() movies.head() t = movies.merge(tags, on='movieId', how='inner') t.head() # More examples: http://pandas.pydata.org/pandas-docs/stable/merging.html # <p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br> # # # Combine aggreagation, merging, and filters to get useful analytics # </p> avg_ratings = ratings.groupby('movieId', as_index=False).mean() del avg_ratings['userId'] avg_ratings.head() box_office = movies.merge(avg_ratings, on='movieId', how='inner') box_office.tail() # + is_highly_rated = box_office['rating'] >= 4.0 box_office[is_highly_rated][-5:] # + is_comedy = box_office['genres'].str.contains('Comedy') box_office[is_comedy][:5] # - box_office[is_comedy & is_highly_rated][-5:] # <h1 style="font-size:2em;color:#2467C0">Vectorized String Operations</h1> # movies.head() # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br> # # Split 'genres' into multiple columns # # <br> </p> movie_genres = movies['genres'].str.split('|', expand=True) movie_genres[:10] # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br> # # Add a new column for comedy genre flag # # <br> </p> movie_genres['isComedy'] = movies['genres'].str.contains('Comedy') movie_genres[:10] # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br> # # Extract year from title e.g. (1995) # # <br> </p> movies['year'] = movies['title'].str.extract('.*\((.*)\).*', expand=True) movies.tail() # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br> # # More here: http://pandas.pydata.org/pandas-docs/stable/text.html#text-string-methods # <br> </p> # <h1 style="font-size:2em;color:#2467C0">Parsing Timestamps</h1> # Timestamps are common in sensor data or other time series datasets. # Let us revisit the *tags.csv* dataset and read the timestamps! # tags = pd.read_csv('./movielens/tags.csv', sep=',') tags.dtypes # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"> # # Unix time / POSIX time / epoch time records # time in seconds <br> since midnight Coordinated Universal Time (UTC) of January 1, 1970 # </p> tags.head(5) tags['parsed_time'] = pd.to_datetime(tags['timestamp'], unit='s') # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"> # # Data Type datetime64[ns] maps to either <M8[ns] or >M8[ns] depending on the hardware # # </p> # + tags['parsed_time'].dtype # - tags.head(2) # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"> # # Selecting rows based on timestamps # </p> # + greater_than_t = tags['parsed_time'] > '2015-02-01' selected_rows = tags[greater_than_t] tags.shape, selected_rows.shape # - # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"> # # Sorting the table using the timestamps # </p> tags.sort_values(by='parsed_time', ascending=True)[:10] # <h1 style="font-size:2em;color:#2467C0">Average Movie Ratings over Time </h1> # ## Are Movie ratings related to the year of launch? average_rating = ratings[['movieId','rating']].groupby('movieId', as_index=False).mean() average_rating.tail() joined = movies.merge(average_rating, on='movieId', how='inner') joined.head() joined.corr() yearly_average = joined[['year','rating']].groupby('year', as_index=False).mean() yearly_average[:10] yearly_average[-20:].plot(x='year', y='rating', figsize=(15,10), grid=True) # <p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"> # # Do some years look better for the boxoffice movies than others? <br><br> # # Does any data point seem like an outlier in some sense? # # </p>
_numpy_pandas/Introduction to Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np torque_distribution_front_rear = np.arange(0, 10) rear_torque = 2 / (1 + torque_distribution_front_rear) front_torque = torque_distribution_front_rear * rear_torque plt.plot(torque_distribution_front_rear, front_torque, label='front') plt.plot(torque_distribution_front_rear, rear_torque, label='rear') plt.legend() plt.savefig('out.png') # %ls
notebooks/front_rear_torque_plotter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.10 64-bit # language: python # name: python3 # --- import xgboost import shap import pandas as pd from sklearn.metrics import accuracy_score from sklearn.metrics import precision_recall_curve, roc_curve import numpy as np import matplotlib.pyplot as plt import plots as plots import pickle import lime import lime.lime_tabular df = pd.read_csv('raw-dataset.csv') df.head() # + data = pickle.load(open('xgb_models/data5.pkl', 'rb')) model = pickle.load(open('xgb_models/opt_XGB_YN0.pkl', 'rb')) X_train_target = data['X_train_target'] X_train_target = X_train_target.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_test_target = data['X_test_target'] X_test_target = X_test_target.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X = data['X'] X = X.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_train = data['X_train'] X_train = X_train.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_test = data['X_test'] X_test = X_test.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_val = data['X_val'] X_val = X_val.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_all = data['X_all'] X_all = X_all.drop(['PGA (g)', 'Elevation', '4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) y_train = data['y_train'] y_test = data['y_test'] y_val = data['y_val'] y = data['y'] # - X_train_target.head() len(X_train_target.loc[X_train_target['L (km)'] > 2].loc[X_train_target['Target']==1]) X_train_target.loc[X_train_target['GWD (m)'] > 3] L_03 = X_train_target.loc[X_train_target['L (km)'] < 0.3] L_03.loc[L_03['Slope (%)'] > 2].loc[L_03['Target']==1] gwd_2 = X_train_target.loc[X_train_target['GWD (m)'] > 1.95].loc[X_train_target['GWD (m)'] < 2.05] gwd_2.head() gwd_2.loc[gwd_2['L (km)']<0.7] # # Model B # + data = pickle.load(open('xgb_models/data5.pkl', 'rb')) model = pickle.load(open('xgb_models/opt_XGB_YN3.pkl', 'rb')) X_train_target = data['X_train_target'] X_train_target = X_train_target.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_test_target = data['X_test_target'] X_test_target = X_test_target.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X = data['X'] X = X.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_train = data['X_train'] X_train = X_train.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_test = data['X_test'] X_test = X_test.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_val = data['X_val'] X_val = X_val.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) X_all = data['X_all'] X_all = X_all.drop(['4m_Qtncs_m', '4m_Qtncs_s', '4m_Ic_med', '4m_Ic_std'], axis=1) y_train = data['y_train'] y_test = data['y_test'] y_val = data['y_val'] y = data['y'] # - X_train_target.loc[X_train_target['PGA (g)'] > 0.5 ].loc[X_train_target['Elevation'] < 3] X_train_target.loc[X_train_target['PGA (g)'] < 0.4 ].loc[X_train_target['Elevation'] > 4].loc[X_train_target['Target'] == 1]
data-analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # _You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ # # --- # # Assignment 3 - More Pandas # This assignment requires more individual learning then the last one did - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff. # ### Question 1 (20%) # Load the energy data from the file `Energy Indicators.xls`, which is a list of indicators of [energy supply and renewable electricity production](Energy%20Indicators.xls) from the [United Nations](http://unstats.un.org/unsd/environment/excel_file_tables/2013/Energy%20Indicators.xls) for the year 2013, and should be put into a DataFrame with the variable name of **energy**. # # Keep in mind that this is an Excel file, and not a comma separated values file. Also, make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary, so you should get rid of them, and you should change the column labels so that the columns are: # # `['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']` # # Convert `Energy Supply` to gigajoules (there are 1,000,000 gigajoules in a petajoule). For all countries which have missing data (e.g. data with "...") make sure this is reflected as `np.NaN` values. # # Rename the following list of countries (for use in later questions): # # ```"Republic of Korea": "South Korea", # "United States of America": "United States", # "United Kingdom of Great Britain and Northern Ireland": "United Kingdom", # "China, Hong Kong Special Administrative Region": "Hong Kong"``` # # There are also several countries with numbers and/or parenthesis in their name. Be sure to remove these, # # e.g. # # `'Bolivia (Plurinational State of)'` should be `'Bolivia'`, # # `'Switzerland17'` should be `'Switzerland'`. # # <br> # # Next, load the GDP data from the file `world_bank.csv`, which is a csv containing countries' GDP from 1960 to 2015 from [World Bank](http://data.worldbank.org/indicator/NY.GDP.MKTP.CD). Call this DataFrame **GDP**. # # Make sure to skip the header, and rename the following list of countries: # # ```"Korea, Rep.": "South Korea", # "Iran, Islamic Rep.": "Iran", # "Hong Kong SAR, China": "Hong Kong"``` # # <br> # # Finally, load the [Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology](http://www.scimagojr.com/countryrank.php?category=2102) from the file `scimagojr-3.xlsx`, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame **ScimEn**. # # Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15). # # The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', # 'Citations per document', 'H index', 'Energy Supply', # 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', # '2009', '2010', '2011', '2012', '2013', '2014', '2015']. # # *This function should return a DataFrame with 20 columns and 15 entries.* # + import pandas as pd import numpy as np x = pd.read_excel("energy_indicator.xls") x.drop(x.index[0:16]) laifu = x.drop(x.index[243:281]) laifu.drop(laifu.index[:16], inplace= True) laifu.drop(['Unnamed: 1', "Unnamed: 0"], axis=1, inplace = True) laifu.rename(columns= {"Environmental Indicators: Energy" : "Country","Unnamed: 3":"Energy Supply", "Unnamed: 4":"Energy Supply per Capita","Unnamed: 5": "% Renewable"}, inplace = True) laifu.fillna("Nan", inplace= True) laifu['Energy Supply']= laifu['Energy Supply'] * 1000000 y = laifu["Country"].str.replace("Republic of Korea", "South Korea").str.replace("United States of America","United States").str.replace("United Kingdom of Great Britain and Northern Ireland","United Kingdom").str.replace("China, Hong Kong Special Administrative Region", "Hong Kong") laifu["Country"]= y laifu["Country"]= laifu["Country"].str.replace('\d+', '').str.strip().str.replace(r"\(.*\)","") Energy= laifu.reset_index().drop(columns = ["index"]) Energy.set_index("Country", inplace = True) Energy # - # Next, load the GDP data from the file world_bank.csv, which is a csv containing countries' GDP from 1960 to 2015 from World Bank. Call this DataFrame GDP. # # Make sure to skip the header, and rename the following list of countries: # # "Korea, Rep.": "South Korea", # "Iran, Islamic Rep.": "Iran", # "Hong Kong SAR, China": "Hong Kong" # # # + x = pd.read_csv("GDP.csv") x.rename(columns=x.iloc[3], inplace= True) x.drop([0,1,2,3], inplace = True) x.drop(columns=[2016.0, 2017.0]) x.reset_index().drop(columns = ["index"], inplace = True) x["Country"] = x["Country Name"] cols = x.columns.tolist() cols = cols[-1:] + cols[:-1] x = x[cols] x.drop(columns = ["Country Name"], inplace = True) y= x["Country"].str.replace("Korea, Rep.", "South Korea").str.replace("Iran, Islamic Rep.","Iran").str.replace("Hong Kong SAR, China", "Hong Kong") x["Country"] = y GDP = x GDP.columns GDP.drop(columns = [1960.0, 1961.0, 1962.0, 1963.0, 1964.0, 1965.0, 1966.0, 1967.0, 1968.0, 1969.0, 1970.0, 1971.0, 1972.0, 1973.0, 1974.0, 1975.0, 1976.0, 1977.0, 1978.0, 1979.0, 1980.0, 1981.0, 1982.0, 1983.0, 1984.0, 1985.0, 1986.0, 1987.0, 1988.0, 1989.0, 1990.0, 1991.0, 1992.0, 1993.0, 1994.0, 1995.0, 1996.0, 1997.0, 1998.0, 1999.0, 2000.0, 2001.0, 2002.0, 2003.0, 2004.0, 2005.0,], inplace = True) GDP = GDP.reset_index().drop(columns = ["index"]) GDP.set_index("Country", inplace = True) GDP # - # Finally, load the Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology from the file scimagojr-3.xlsx, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame ScimEn. # # Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15). # # The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']. # # This function should return a DataFrame with 20 columns and 15 entries ScimEn = pd.read_excel("scimagojr.xlsx") ScimEn.head(15) ScimEn.drop(ScimEn.index[15:], inplace = True) ScimEn.set_index("Country", inplace=True) ScimEn # + x = ScimEn.merge(Energy, how = "left", left_index = True, right_index = True ).merge(GDP, how = "left", left_index = True, right_index = True) x.drop(columns = ['Country Code', 'Indicator Name','Indicator Code'], inplace = True) x.rename(columns = {2006.0 : "2006",2007.0:"2007", 2008.0: "2008", 2009.0:"2009", 2010.0:"2010", 2011.0:"2011", 2012.0: "2012", 2013.0: "2013", 2014.0:"2014", 2015.0:"2015", 2016.0:"2016", 2017.0:"2017"}, inplace = True) x.drop(columns = ["2016", "2017"], inplace= True) DataBase= x DataBase # + import pandas as pd import numpy as np energy = pd.read_excel('Energy Indicators.xls') energy = energy[16:243] energy = energy.drop(energy.columns[[0, 1]], axis=1) energy.rename(columns={'Environmental Indicators: Energy': 'Country','Unnamed: 3':'Energy Supply','Unnamed: 4':'Energy Supply per Capita','Unnamed: 5':'% Renewable'}, inplace=True) energy.replace('...', np.nan,inplace = True) energy['Energy Supply'] *= 1000000 def remove_digit(data): newData = ''.join([i for i in data if not i.isdigit()]) i = newData.find('(') if i>-1: newData = newData[:i] return newData.strip() energy['Country'] = energy['Country'].apply(remove_digit) di = {"Republic of Korea": "South Korea", "United States of America": "United States", "United Kingdom of Great Britain and Northern Ireland": "United Kingdom", "China, Hong Kong Special Administrative Region": "Hong Kong"} energy.replace({"Country": di},inplace = True) #energy # In[275]: GDP = pd.read_csv('world_bank.csv', skiprows=4) GDP.rename(columns={'Country Name': 'Country'}, inplace=True) di = {"Korea, Rep.": "South Korea", "Iran, Islamic Rep.": "Iran", "Hong Kong SAR, China": "Hong Kong"} GDP.replace({"Country": di},inplace = True) ScimEn = pd.read_excel('scimagojr-3.xlsx') df = pd.merge(pd.merge(energy, GDP, on='Country'), ScimEn, on='Country') # We only need 2006-2015 data df.set_index('Country',inplace=True) df = df[['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']] df = (df.loc[df['Rank'].isin([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])]) df.sort('Rank',inplace=True) df # - # ### Question 2 (6.6%) # The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose? # # *This function should return a single number.* # %%HTML <svg width="800" height="300"> <circle cx="150" cy="180" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="blue" /> <circle cx="200" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="red" /> <circle cx="100" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="green" /> <line x1="150" y1="125" x2="300" y2="150" stroke="black" stroke-width="2" fill="black" stroke-dasharray="5,3"/> <text x="300" y="165" font-family="Verdana" font-size="35">Everything but this!</text> </svg> def answer_two(): # Union A, B, C - Intersection A, B, C union = pd.merge(pd.merge(energy, GDP, on='Country', how='outer'), ScimEn, on='Country', how='outer') intersect = pd.merge(pd.merge(energy, GDP, on='Country'), ScimEn, on='Country') return len(union)-len(intersect) answer_two # <br> # # ## Answer the following questions in the context of only the top 15 countries by Scimagojr Rank (aka the DataFrame returned by `answer_one()`) # ### Question 3 (6.6%) # What is the average GDP over the last 10 years for each country? (exclude missing values from this calculation.) # # *This function should return a Series named `avgGDP` with 15 countries and their average GDP sorted in descending order.* DataBase.head() DataBase.columns x = DataBase[['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']] x.mean(axis = 1, skipna= True) def answer_three(): Top15 = answer_one() years = ['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'] return (Top15[years].mean(axis=1)).sort_values(ascending=False).rename('avgGDP') # ### Question 4 (6.6%) # By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP? # # *This function should return a single number.* # + x = DataBase[['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']] y = x.loc["Canada"] y.max() - y.min() DataBase.drop(columns = ["GDP_span"], inplace= True) def answer_four(): Top15 = answer_one() return "ANSWER" # - # ### Question 5 (6.6%) # What is the mean `Energy Supply per Capita`? # # *This function should return a single number.* DataBase["Energy Supply per Capita"].mean(skipna= True) # def answer_five(): # Top15 = answer_one() # return "ANSWER" # ### Question 6 (6.6%) # What country has the maximum % Renewable and what is the percentage? # # *This function should return a tuple with the name of the country and the percentage.* # + xxx = DataBase.copy() xxx.reset_index(inplace = True) x = xxx[["% Renewable", "Country"]].max(skipna = True).tolist() tuple(x) # def answer_six(): # Top15 = answer_one() # return "ANSWER" # - # ### Question 7 (6.6%) # Create a new column that is the ratio of Self-Citations to Total Citations. # What is the maximum value for this new column, and what country has the highest ratio? # # *This function should return a tuple with the name of the country and the ratio.* # + DataBase["citation_ratio"]= DataBase["Self-citations"]/DataBase["Citations"] x = DataBase.sort_values("citation_ratio", ascending = False) x.reset_index(inplace= True) tuple(x[["Country", "citation_ratio"]].max().tolist()) # def answer_seven(): # Top15 = answer_one() # return "ANSWER" # - # ### Question 8 (6.6%) # # Create a column that estimates the population using Energy Supply and Energy Supply per capita. # What is the third most populous country according to this estimate? # # *This function should return a single string value.* x = DataBase.copy().fillna(0) x["estimate_Pop"] = x["Energy Supply"] / x["Energy Supply per Capita"] x.sort_values("estimate_Pop",na_position='last', ascending = False ).index[0] # def answer_eight(): # Top15 = answer_one() # return "ANSWER" # ### Question 9 (6.6%) # Create a column that estimates the number of citable documents per person. # What is the correlation between the number of citable documents per capita and the energy supply per capita? Use the `.corr()` method, (Pearson's correlation). # # *This function should return a single number.* # # *(Optional: Use the built-in function `plot9()` to visualize the relationship between Energy Supply per Capita vs. Citable docs per Capita)* x["Citable_doc_capita"] = x["Citable documents"]/x["estimate_Pop"] x["Energy Supply per Capita"].corr(x["Citable_doc_capita"]) # def answer_nine(): # Top15 = answer_one() # return "ANSWER" def plot9(): import matplotlib as plt # %matplotlib inline Top15 = answer_one() Top15['PopEst'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] Top15['Citable docs per Capita'] = Top15['Citable documents'] / Top15['PopEst'] Top15.plot(x='Citable docs per Capita', y='Energy Supply per Capita', kind='scatter', xlim=[0, 0.0006]) # + #plot9() # Be sure to comment out plot9() before submitting the assignment! # - # ### Question 10 (6.6%) # Create a new column with a 1 if the country's % Renewable value is at or above the median for all countries in the top 15, and a 0 if the country's % Renewable value is below the median. # # *This function should return a series named `HighRenew` whose index is the country name sorted in ascending order of rank.* # + c = DataBase.copy() median = c["% Renewable"].median() def renewable_sort(percentage): if percentage >= median: return 1 elif percentage < median: return 0 c["HighRenew"] = c["% Renewable"].apply(renewable_sort) f = c.sort_values("Rank")[["HighRenew"]] f["HighRenew"] # def answer_ten(): # Top15 = answer_one() # return "ANSWER" # - # ### Question 11 (6.6%) # Use the following dictionary to group the Countries by Continent, then create a dateframe that displays the sample size (the number of countries in each continent bin), and the sum, mean, and std deviation for the estimated population of each country. # # ```python # ContinentDict = {'China':'Asia', # 'United States':'North America', # 'Japan':'Asia', # 'United Kingdom':'Europe', # 'Russian Federation':'Europe', # 'Canada':'North America', # 'Germany':'Europe', # 'India':'Asia', # 'France':'Europe', # 'South Korea':'Asia', # 'Italy':'Europe', # 'Spain':'Europe', # 'Iran':'Asia', # 'Australia':'Australia', # 'Brazil':'South America'} # ``` # # *This function should return a DataFrame with index named Continent `['Asia', 'Australia', 'Europe', 'North America', 'South America']` and columns `['size', 'sum', 'mean', 'std']`* # + ContinentDict = {'China':'Asia', 'United States':'North America', 'Japan':'Asia', 'United Kingdom':'Europe', 'Russian Federation':'Europe', 'Canada':'North America', 'Germany':'Europe', 'India':'Asia', 'France':'Europe', 'South Korea':'Asia', 'Italy':'Europe', 'Spain':'Europe', 'Iran':'Asia', 'Australia':'Australia', 'Brazil':'South America'} DataBase # x = pd.DataFrame(ContinentDict , index=[0]).transpose() # y= x.reset_index().rename(columns = {"index": "Country", 0 : "Continent"}).set_index("Country") # DataBase = DataBase.merge(y, how = "left", left_index = True, right_index = True) # DataBase["estimate_Pop"] = DataBase["Energy Supply"] / DataBase["Energy Supply per Capita"] # a = DataBase[["Continent","estimate_Pop"]]. # a.groupby("Continent") # summery = a.agg({"estimate_Pop": "sum"}) # mean= a.agg({"estimate_Pop": "mean"}) # std= a.agg({"estimate_Pop": "std"}) # size = a.agg({"estimate_Pop": "count"}) # def answer_eleven(): # Top15 = answer_one() # return "ANSWER" # - # ### Question 12 (6.6%) # Cut % Renewable into 5 bins. Group Top15 by the Continent, as well as these new % Renewable bins. How many countries are in each of these groups? # # *This function should return a __Series__ with a MultiIndex of `Continent`, then the bins for `% Renewable`. Do not include groups with no countries.* def answer_twelve(): Top15 = answer_one() return "ANSWER" # ### Question 13 (6.6%) # Convert the Population Estimate series to a string with thousands separator (using commas). Do not round the results. # # e.g. 317615384.61538464 -> 317,615,384.61538464 # # *This function should return a Series `PopEst` whose index is the country name and whose values are the population estimate string.* # + DataBase["Population_esti"] = DataBase["Energy Supply"]/DataBase["Energy Supply per Capita"] x = DataBase.fillna(0) y = x["Population_esti"].to_frame() "{0:0,0}".format(str(num) for num in y["Population_esti"]) ?? #format(y,',d') # def answer_thirteen(): # Top15 = answer_one() # return "ANSWER" # - # ### Optional # # Use the built in function `plot_optional()` to see an example visualization. def plot_optional(): import matplotlib as plt # %matplotlib inline Top15 = answer_one() ax = Top15.plot(x='Rank', y='% Renewable', kind='scatter', c=['#e41a1c','#377eb8','#e41a1c','#4daf4a','#4daf4a','#377eb8','#4daf4a','#e41a1c', '#4daf4a','#e41a1c','#4daf4a','#4daf4a','#e41a1c','#dede00','#ff7f00'], xticks=range(1,16), s=6*Top15['2014']/10**10, alpha=.75, figsize=[16,6]); for i, txt in enumerate(Top15.index): ax.annotate(txt, [Top15['Rank'][i], Top15['% Renewable'][i]], ha='center') print("This is an example of a visualization that can be created to help understand the data. \ This is a bubble chart showing % Renewable vs. Rank. The size of the bubble corresponds to the countries' \ 2014 GDP, and the color corresponds to the continent.") # + #plot_optional() # Be sure to comment out plot_optional() before submitting the assignment!
Assignment+3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import os import io import time import gc import random import torch from torch import nn from torch.utils import data from torch.nn import functional as F import matplotlib # + import torch current_cuda_device = -1 if torch.cuda.is_available(): current_cuda_device = torch.cuda.current_device() print(f'Is GPU used? (0=yes, -1=no): {current_cuda_device}') # - CRAWL_EMBEDDING_PATH = '/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/crawl-300d-2M.vec' GLOVE_EMBEDDING_PATH = '/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/glove.840B.300d.txt' def preprocess(data): ''' Credit goes to https://www.kaggle.com/gpreda/jigsaw-fast-compact-solution ''' punct = "/-'?!.,#$%\'()*+-/:;<=>@[\\]^_`{|}~`" + '""“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\×™√²—–&' def clean_special_chars(text, punct): for p in punct: text = text.replace(p, ' ') return text data = data.astype(str).apply(lambda x: clean_special_chars(x, punct)) return data # load data train = pd.read_csv('/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/train.csv') test = pd.read_csv('/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/test.csv') test_private = pd.read_csv('/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/test_private_expanded.csv') test_public = pd.read_csv('/home/klazaridou/projects/jigsaw-unintended-bias-in-toxicity-classification/test_public_expanded.csv') # id,target,comment_text,severe_toxicity,obscene,identity_attack,insult,threat,asian,atheist,bisexual,black,buddhist,christian,female,heterosexual,hindu,homosexual_gay_or_lesbian,intellectual_or_learning_disability,jewish,latino,male,muslim,other_disability,other_gender,other_race_or_ethnicity,other_religion,other_sexual_orientation,physical_disability,psychiatric_or_mental_illness,transgender,white,created_date,publication_id,parent_id,article_id,rating,funny,wow,sad,likes,disagree,sexual_explicit,identity_annotator_count,toxicity_annotator_count print(f'Train and test shapes: {train.shape}, {test.shape}') print(f'Test private and test public shapes: {test_private.shape}, {test_public.shape}') # all features and binarized toxicity # preprocess text x_train = preprocess(train['comment_text']) x_test = preprocess(test['comment_text']) # get targets y_train = np.where(train['target'] >= 0.5, 1, 0) y_aux_train = train[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']] print(f'y_train: {y_train}') print(f'y_aux_train: {y_aux_train}') print(f'Negative examples: {np.histogram(y_train)[0][0]}') print(f'Positive examples: {np.histogram(y_train)[0][9]}') # get fewer data for efficiency # print(f'Training data example row: {train.iloc[[2]]}') train_small = train.sample(n=100000, weights='target') targets = train_small['target'] print(f'Training small hist: {targets.hist(bins=2)}') print(f'Small training data shape: {targets.shape}') # preprocess text small dataset x_train = preprocess(train_small['comment_text']) # get targets for small dataset y_train = np.where(train_small['target'] >= 0.5, 1, 0) y_aux_train = train_small[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']] print(f'y_train: {y_train}') print(f'y_aux_train: {y_aux_train}') print(f'Negative examples: {np.histogram(y_train)[0][0]}') print(f'Positive examples: {np.histogram(y_train)[0][9]}') # tokenize and vectorize text from keras.preprocessing import text, sequence # works with tensorflow>=2.7 tokenizer = text.Tokenizer() tokenizer.fit_on_texts(list(x_train) + list(x_test)) # fit both vocabularies x_train = tokenizer.texts_to_sequences(x_train) # translate into integers x_test = tokenizer.texts_to_sequences(x_test) x_train = sequence.pad_sequences(x_train, maxlen=MAX_LEN) # pad for balanced text length x_test = sequence.pad_sequences(x_test, maxlen=MAX_LEN) vocabulary = None vocabulary = vocabulary or len(tokenizer.word_index) + 1 print(f'words in vocabulary: {vocabulary}') # + # build embedding matrix from tqdm.notebook import tqdm_notebook as tqdm def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') def load_embeddings(path): with open(path) as f: return dict(get_coefs(*line.strip().split(' ')) for line in tqdm(f)) def build_matrix(word_index, path): embedding_index = load_embeddings(path) embedding_matrix = np.zeros((len(word_index) + 1, 300)) unknown_words = [] for word, i in word_index.items(): try: embedding_matrix[i] = embedding_index[word] except KeyError: unknown_words.append(word) return embedding_matrix, unknown_words # - crawl_matrix, unknown_words_crawl = build_matrix(tokenizer.word_index, CRAWL_EMBEDDING_PATH) print('unknown words (crawl): ', len(unknown_words_crawl)) print(f'crawl_matrix: {crawl_matrix.shape}') counter = 0 for word, i in tokenizer.word_index.items(): print(f'i: {word}: embedding len: {len(crawl_matrix[i])} ') counter += 1 if counter == 1: break; glove_matrix, unknown_words_glove = build_matrix(tokenizer.word_index, GLOVE_EMBEDDING_PATH) print('unknown words (glove): ', len(unknown_words_glove)) print(f'glove_matrix: {glove_matrix.shape}') counter = 0 for word, i in tokenizer.word_index.items(): print(f'i: {word}: embedding len: {len(glove_matrix[i])} ') counter += 1 if counter == 1: break; embedding_matrix = np.concatenate([crawl_matrix, glove_matrix], axis=-1) # TODO: pad not common words because length is 300 print(f'concatanated matrix: {embedding_matrix.shape}') del crawl_matrix del glove_matrix gc.collect() counter = 0 for word, i in tokenizer.word_index.items(): print(f'i: {word}: embedding len: {len(embedding_matrix[i])} ') counter += 1 if counter == 1: break; NUM_MODELS = 1 LSTM_UNITS = 2 DENSE_HIDDEN_UNITS = 4 * LSTM_UNITS MAX_LEN = 50 # + # define model class SpatialDropout(nn.Dropout2d): def forward(self, x): x = x.unsqueeze(2) # (N, T, 1, K) x = x.permute(0, 3, 2, 1) # (N, K, 1, T) x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked x = x.permute(0, 3, 2, 1) # (N, T, 1, K) x = x.squeeze(2) # (N, T, K) return x class NeuralNet(nn.Module): def __init__(self, embedding_matrix, num_aux_targets): super(NeuralNet, self).__init__() embed_size = embedding_matrix.shape[1] self.embedding = nn.Embedding(vocabulary, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = SpatialDropout(0.3) self.lstm1 = nn.LSTM(embed_size, LSTM_UNITS, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True) self.linear1 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS) self.linear2 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS) self.linear_out = nn.Linear(DENSE_HIDDEN_UNITS, 1) self.linear_aux_out = nn.Linear(DENSE_HIDDEN_UNITS, num_aux_targets) def forward(self, x): h_embedding = self.embedding(x) h_embedding = self.embedding_dropout(h_embedding) h_lstm1, _ = self.lstm1(h_embedding) h_lstm2, _ = self.lstm2(h_lstm1) # global average pooling avg_pool = torch.mean(h_lstm2, 1) # global max pooling max_pool, _ = torch.max(h_lstm2, 1) h_conc = torch.cat((max_pool, avg_pool), 1) h_conc_linear1 = F.relu(self.linear1(h_conc)) h_conc_linear2 = F.relu(self.linear2(h_conc)) hidden = h_conc + h_conc_linear1 + h_conc_linear2 result = self.linear_out(hidden) aux_result = self.linear_aux_out(hidden) out = torch.cat([result, aux_result], 1) return out # - # transform to tensors and then datasets x_train_torch = torch.tensor(x_train, dtype=torch.long) x_test_torch = torch.tensor(x_test, dtype=torch.long) y_train_torch = torch.tensor(np.hstack([y_train[:, np.newaxis], y_aux_train]), dtype=torch.float32) print(f'y_train_torch: {y_train_torch}') train_dataset = data.TensorDataset(x_train_torch, y_train_torch) test_dataset = data.TensorDataset(x_test_torch) # + # training def sigmoid(x): return 1 / (1 + np.exp(-x)) def train_model(model, train, test, loss_fn, output_dim, lr=0.001, batch_size=32, n_epochs=2, enable_checkpoint_ensemble=True): # lr=0.001, batch_size=512, n_epochs=4 param_lrs = [{'params': param, 'lr': lr} for param in model.parameters()] optimizer = torch.optim.Adam(param_lrs, lr=lr) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 0.6 ** epoch) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) all_test_preds = [] checkpoint_weights = [2 ** epoch for epoch in range(n_epochs)] for epoch in range(n_epochs): start_time = time.time() scheduler.step() model.train() avg_loss = 0. for data in tqdm(train_loader, disable=False): x_batch = data[:-1] y_batch = data[-1] y_pred = model(*x_batch) loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() test_preds = np.zeros((len(test), output_dim)) for i, x_batch in enumerate(test_loader): y_pred = sigmoid(model(*x_batch).detach().cpu().numpy()) test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred all_test_preds.append(test_preds) elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, elapsed_time)) if enable_checkpoint_ensemble: test_preds = np.average(all_test_preds, weights=checkpoint_weights, axis=0) else: test_preds = all_test_preds[-1] return test_preds # + all_test_preds = [] def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True for model_idx in range(NUM_MODELS): print('Model ', model_idx) seed_everything(1234 + model_idx) model = NeuralNet(embedding_matrix, y_aux_train.shape[-1]) # model.cuda() test_preds = train_model(model, train_dataset, test_dataset, output_dim=y_train_torch.shape[-1], loss_fn=nn.BCEWithLogitsLoss(reduction='mean')) all_test_preds.append(test_preds) print() # -
bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns url = "https://raw.githubusercontent.com/Anasuya-Sahoo/DMDW-Lab/main/student-mat.csv" data = pd.read_csv(url) data.head() # ## 1. Evaluate the covariance and correlation of by using any two columns df = data[['traveltime','studytime']] df.head() covariance = df.cov() covariance sns.heatmap(covariance,cmap ='YlGnBu') plt.show() correlation = df.corr() correlation sns.heatmap(correlation,cmap='BuGn_r') plt.show() # ## 2. Implement chi square test dfnom = data[['Mjob','Fjob']] dfnom.head() dfnom = dfnom.replace('at_home','home') dfnom.astype('category') dfnom.info() from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() dfnom['Mjob'] = lb.fit_transform(dfnom['Mjob']) dfnom['Fjob'] = lb.fit_transform(dfnom['Fjob']) dfnom = np.array(dfnom) dfnom=dfnom[dfnom!=0] from scipy.stats import chi2_contingency stat, p, dof, expected = chi2_contingency(dfnom) stat p dof expected expected.shape # ## 3. Implement different types of normalization data.head() age=np.array(data['age']) print('MAX AGE',max(age)) print('MIN AGE',min(age)) age=age.reshape(395,1) from scipy import stats zscore = np.array(stats.zscore(age)) zscore = zscore[0:394] zscore = zscore.reshape(2,197) zscore result = [] result.append(age/pow(10,2)) result = np.array(result) result
Assignment-6/assignment-6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np df = pd.read_csv("./word2vec_wrangling.csv") exercise_to_loop = df["exercise_name"].to_list() # - exercise_name = "바차타" file_name = "#" + exercise_name + "_sum.txt" file_1 = "/Users/noopy/FitCuration/" + file_name text = open(file_1, 'r', -1, "UTF-8", errors="ignore").read() text[:1000] # + # -*- coding: utf-8 -*- import re def preprocessing_hangul(text): # 개행문자 제거 hangul = re.compile('[^ ㄱ-ㅣ가-힣]+') result = hangul.sub('', text) return result clean_text = preprocessing_hangul(text) clean_text[:1000] # + # MeCab으로 명사 뽑아내기 from konlpy.tag import Mecab mecab = Mecab() noun_list_mecab = mecab.nouns(clean_text) # stopwards preprocessing stopwords_mecab = ['수','퀄리티','도시','분','전문','스타','년','원',\ '월','화','수','목','금','시','앤','일','그램','문'] clean_noun_list_mecab = [] for n in noun_list_mecab: if n not in stopwords_mecab: clean_noun_list_mecab.append(n) # get top 100 most common nouns from collections import Counter nouns_mecab = Counter(clean_noun_list_mecab) tags_mecab = nouns_mecab.most_common(100) tags_mecab # + # Okt(Twitter 업데이트 버전)으로 서술어 뽑아내기 # Reference: https://m.blog.naver.com/imsam77/221260229647 # 1. 이전 포스트에서 크롤링한 댓글파일을 읽기전용으로 호출함 # Trying to get adjectives # 3. 트윗터 패키지 안에 konlpy 모듈호출 from konlpy.tag import Okt from collections import Counter twitter = Okt() # 4. 각 문장별로 형태소 구분하기 morphed_list_okt = twitter.pos(clean_text) # morphed_list # 불용어 stopwords_Twitter = ["입니다","있는","있습니다","같은","안녕하세요","고마워요","있어요","있게"\ ,"있도록","부탁드립니다","하는","합니다","할","하세요","하기","해","됩니다","하여",'잘','된','되고','되어','되었습니다',"없는","드립니다"\ ,"되기","하시는","하고","않을","같다","싶다","이런","저런","그런",'바랍니다'\ ,"했습니다","했다","해드립니다","하신","하실","않고","해요","가능합니다","하고싶으신"\ ,"않으며","주세요","오세요"] # 5. 형용사 품사만 뽑아내기 adj_list_okt = [] for word, tag in morphed_list_okt: if tag in ['Adjective','Verb'] and word not in stopwords_Twitter: adj_list_okt.append(word) # 6. 선별된 품사별 빈도수 계산 & 상위 빈도 10위 까지 출력 adj_counts_okt = Counter(adj_list_okt) common_adj_okt = adj_counts_okt.most_common(100) common_adj_okt # - # + # list_noun_adj_combined = tags_mecab + common_adj_okt # list_noun_adj_combined # - import pandas as pd from pandas.api.types import CategoricalDtype # 그래프의 값을 정렬해서 보기위해 import numpy as np print(pd.__version__) print(np.__version__) noun_df = pd.DataFrame(np.sort(np.array(tags_mecab), axis=1), columns=[exercise_name+' 등장횟수','관련명사']) adj_df = pd.DataFrame(np.sort(np.array(common_adj_okt), axis=1), columns=[exercise_name+ ' 등장횟수','관련서술어']) combined_df = pd.concat([noun_df,adj_df],axis=1) combined_df combined_df.to_csv(exercise_name+'.csv', index=False, header=True)
1.make-csv-of-frequency-nouns-and-adjs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h3>Simulación matemática 2018 </h3> # <div style="background-color:#0099cc;"> # <font color = white> # <ul> # <li><NAME> </li> # <li>Email: `<EMAIL>, <EMAIL>`</li> # </ul> # </font> # </div> # <!--NAVIGATION--> # < [Mapa Logístico](Clase9_MapaLogistico.ipynb) | [Guía](Clase0_GuiaSimulacionM.ipynb) | [Oscilador amortiguado](Clase10_OsciladorAmortiguado.ipynb) > # ___ # # ¿Cómo se mueve un péndulo? # <img style="float: right; margin: 0px 0px 15px 15px;" src="https://qph.ec.quoracdn.net/main-qimg-f7a6d0342e57b06d46506e136fb7d437-c" width="225px" height="50px" /> # > Se dice que un sistema cualquiera, mecánico, eléctrico, neumático, etc., es un oscilador armónico si, cuando se deja en libertad fuera de su posición de equilibrio, vuelve hacia ella describiendo oscilaciones sinusoidales, o sinusoidales amortiguadas en torno a dicha posición estable. # - https://es.wikipedia.org/wiki/Oscilador_armónico # Referencias: # - http://matplotlib.org # - https://seaborn.pydata.org # - http://www.numpy.org # - http://ipywidgets.readthedocs.io/en/latest/index.html # **En realidad esto es el estudio de oscilaciones. ** from IPython.display import YouTubeVideo YouTubeVideo('k5yTVHr6V14') # Los sistemas mas sencillos a estudiar en oscilaciones son el sistema ` masa-resorte` y el `péndulo simple`. # <div> # <img style="float: left; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/7/76/Pendulum.jpg" width="150px" height="50px" /> # <img style="float: right; margin: 15px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/ko/9/9f/Mass_spring.png" width="200px" height="100px" /> # </div> # # \begin{align} # \frac{d^2 x}{dt^2} + \omega_{0}^2 x &= 0, \quad \omega_{0} = \sqrt{\frac{k}{m}}\notag\\ # \frac{d^2 \theta}{dt^2} + \omega_{0}^{2}\, \theta &= 0, \quad\mbox{donde}\quad \omega_{0}^2 = \frac{g}{l} # \end{align} # ___ # ## Sistema `masa-resorte` # La solución a este sistema `masa-resorte` se explica en términos de la segunda ley de Newton. Para este caso, si la masa permanece constante y solo consideramos la dirección en $x$. Entonces, # \begin{equation} # F = m \frac{d^2x}{dt^2}. # \end{equation} # # ¿Cuál es la fuerza? ** <NAME>! ** # \begin{equation} # F = -k x, \quad k > 0. # \end{equation} # # Vemos que la fuerza se opone al desplazamiento y su intensidad es proporcional al mismo. Y $k$ es la constante elástica o recuperadora del resorte. # # Entonces, un modelo del sistema `masa-resorte` está descrito por la siguiente **ecuación diferencial**: # # \begin{equation} # \frac{d^2x}{dt^2} + \frac{k}{m}x = 0, # \end{equation} # # cuya solución se escribe como # \begin{equation} # x(t) = A \cos(\omega_{o} t) + B \sin(\omega_{o} t) # \end{equation} # Y su primera derivada (velocidad) sería # \begin{equation} # \frac{dx(t)}{dt} = \omega_{0}[- A \sin(\omega_{0} t) + B\cos(\omega_{0}t)] # \end{equation} # # # ### **¿Cómo se ven las gráficas de $x$ vs $t$ y $\frac{dx}{dt}$ vs $t$?** # _Esta instrucción es para que las gráficas aparezcan dentro de este entorno._ # %matplotlib inline # _Esta es la librería con todas las instrucciones para realizar gráficos. _ import matplotlib.pyplot as plt import matplotlib as mpl label_size = 14 mpl.rcParams['xtick.labelsize'] = label_size mpl.rcParams['ytick.labelsize'] = label_size # _Y esta es la librería con todas las funciones matemáticas necesarias._ import numpy as np # + # Definición de funciones a graficar A, B, w0 = .5, .1, .5 # Parámetros t = np.linspace(0, 50, 100) # Creamos vector de tiempo de 0 a 50 con 100 puntos x = A*np.cos(w0*t)+B*np.sin(w0*t) # Función de posición dx = w0*(-A*np.sin(w0*t)+B*np.cos(w0*t)) # Función de velocidad # Gráfico plt.figure(figsize = (7, 4)) # Ventana de gráfica con tamaño plt.plot(t, x, '-', lw = 1, ms = 1, label = '$x(t)$') # Explicación plt.plot(t, dx, 'ro-', lw = 1, ms = 4, label = r'$\dot{x}(t)$') plt.legend(loc='best') plt.xlabel('$t$', fontsize = 20) # Etiqueta eje x plt.show() # + # Colores, etiquetas y otros formatos plt.figure(figsize = (7, 4)) plt.scatter(t, x, lw = 0, c = 'red', label = '$x(t)$') # Gráfica con puntos plt.plot(t, x, 'r-', lw = 1) # Grafica normal plt.scatter(t, dx, lw = 0, c = 'b', label = r'$\frac{dx}{dt}$') # Con la r, los backslash se tratan como un literal, no como un escape plt.plot(t, dx, 'b-', lw = 1) plt.xlabel('$t$', fontsize = 20) plt.legend(loc = 'best') # Leyenda con las etiquetas de las gráficas plt.show() # - # Y si consideramos un conjunto de frecuencias de oscilación, entonces # + frecuencias = np.array([.1, .2 , .5, .6]) # Vector de diferentes frecuencias plt.figure(figsize = (7, 4)) # Ventana de gráfica con tamaño # Graficamos para cada frecuencia for w0 in frecuencias: x = A*np.cos(w0*t)+B*np.sin(w0*t) plt.plot(t, x, 'D-') plt.xlabel('$t$', fontsize = 16) # Etiqueta eje x plt.ylabel('$x(t)$', fontsize = 16) # Etiqueta eje y plt.title('Oscilaciones', fontsize = 16) # Título de la gráfica plt.show() # - # Estos colores, son el default de `matplotlib`, sin embargo existe otra librería dedicada, entre otras cosas, a la presentación de gráficos. import seaborn as sns sns.set(style='ticks', palette='Set2') frecuencias = np.array([.1, .2 , .5, .6]) plt.figure(figsize = (7, 4)) for w0 in frecuencias: x = A*np.cos(w0*t)+B*np.sin(w0*t) plt.plot(t, x, 'o-', label = '$\omega_0 = %s$'%w0) # Etiqueta cada gráfica con frecuencia correspondiente (conversion float a string) plt.xlabel('$t$', fontsize = 16) plt.ylabel('$x(t)$', fontsize = 16) plt.title('Oscilaciones', fontsize = 16) plt.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), prop={'size': 10}) plt.show() # Si queremos tener manipular un poco mas las cosas, hacemos uso de lo siguiente: from ipywidgets import * # + def masa_resorte(t = 0): A, B, w0 = .5, .1, .5 # Parámetros x = A*np.cos(w0*t)+B*np.sin(w0*t) # Función de posición fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(x, [0], 'ko', ms = 10) ax.set_xlim(xmin = -0.6, xmax = .6) ax.axvline(x=0, color = 'r') ax.axhline(y=0, color = 'grey', lw = 1) fig.canvas.draw() interact(masa_resorte, t = (0, 50,.01)); # - # La opción de arriba generalmente será lenta, así que lo recomendable es usar `interact_manual`. # + def masa_resorte(t = 0): A, B, w0 = .5, .1, .5 # Parámetros x = A*np.cos(w0*t)+B*np.sin(w0*t) # Función de posición fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(x, [0], 'ko', ms = 10) ax.set_xlim(xmin = -0.6, xmax = .6) ax.axvline(x=0, color = 'r') ax.axhline(y=0, color = 'grey', lw = 1) fig.canvas.draw() interact_manual(masa_resorte, t = (0, 50,.01)); # - # ___ # ## Péndulo simple # Ahora, si fijamos nuestra atención al movimiento de un péndulo simple _(oscilaciones pequeñas)_, la ecuación diferencial a resolver tiene la misma forma: # # \begin{equation} # \frac{d^2 \theta}{dt^2} + \omega_{0}^{2}\, \theta = 0, \quad\mbox{donde}\quad \omega_{0}^2 = \frac{g}{l}. # \end{equation} # # La diferencia más evidente es como hemos definido a $\omega_{0}$. Esto quiere decir que, # # \begin{equation} # \theta(t) = A\cos(\omega_{0} t) + B\sin(\omega_{0}t) # \end{equation} # # Si graficamos la ecuación de arriba vamos a encontrar un comportamiento muy similar al ya discutido anteriormente. Es por ello que ahora veremos el movimiento en el plano $xy$. Es decir, # # \begin{align} # x &= l \sin(\theta), \quad # y = l \cos(\theta) # \end{align} # Podemos definir una función que nos entregue theta dados los parámetros y el tiempo def theta_t(a, b, g, l, t): omega_0 = np.sqrt(g/l) return a * np.cos(omega_0 * t) + b * np.sin(omega_0 * t) # + # Hacemos un gráfico interactivo del péndulo def pendulo_simple(t = 0): fig = plt.figure(figsize = (5,5)) ax = fig.add_subplot(1, 1, 1) x = 2 * np.sin(theta_t(.4, .6, 9.8, 2, t)) y = - 2 * np.cos(theta_t(.4, .6, 9.8, 2, t)) ax.plot(x, y, 'ko', ms = 10) ax.plot([0], [0], 'rD') ax.plot([0, x ], [0, y], 'k-', lw = 1) ax.set_xlim(xmin = -2.2, xmax = 2.2) ax.set_ylim(ymin = -2.2, ymax = .2) fig.canvas.draw() interact_manual(pendulo_simple, t = (0, 10,.01)); # - # ### Condiciones iniciales # Realmente lo que se tiene que resolver es, # # \begin{equation} # \theta(t) = \theta(0) \cos(\omega_{0} t) + \frac{\dot{\theta}(0)}{\omega_{0}} \sin(\omega_{0} t) # \end{equation} # > **Actividad.** Modificar el programa anterior para incorporar las condiciones iniciales. # Solución: def theta_t(theta_0,dtheta_0,g,l,t): omega_0 = np.sqrt(g/l) return theta_0 * np.cos(omega_0 * t) + dtheta_0/omega_0 * np.sin(omega_0 * t) def pendulo_simple(t = 0): fig = plt.figure(figsize = (5,5)) ax = fig.add_subplot(1, 1, 1) x = 2 * np.sin(theta_t(0.1, 0, 9.8, 2, t)) y = - 2 * np.cos(theta_t(0.1, 0, 9.8, 2, t)) ax.plot(x, y, 'ko', ms = 10) ax.plot([0], [0], 'rD') ax.plot([0, x ], [0, y], 'k-', lw = 1) ax.set_xlim(xmin = -2.2, xmax = 2.2) ax.set_ylim(ymin = -2.2, ymax = .2) fig.canvas.draw() interact_manual(pendulo_simple, t = (0, 10,.01)); # ### Espacio fase $(x, \frac{dx}{dt})$ # La posición y velocidad para el sistema `masa-resorte` se escriben como: # \begin{align} # x(t) &= x(0) \cos(\omega_{o} t) + \frac{\dot{x}(0)}{\omega_{0}} \sin(\omega_{o} t)\\ # \dot{x}(t) &= -\omega_{0}x(0) \sin(\omega_{0} t) + \dot{x}(0)\cos(\omega_{0}t)] # \end{align} k = 3 #constante elástica [N]/[m] m = 1 # [kg] omega_0 = np.sqrt(k/m) x_0 = .5 dx_0 = .1 t = np.linspace(0, 15, 300) x_t = x_0 *np.cos(omega_0 *t) + (dx_0/omega_0) * np.sin(omega_0 *t) dx_t = -omega_0 * x_0 * np.sin(omega_0 * t) + dx_0 * np.cos(omega_0 * t) plt.figure(figsize = (7, 4)) plt.plot(t, x_t, label = '$x(t)$', lw = 1) #plt.plot(t, dx_t, label = '$\dot{x}(t)$', lw = 1) plt.plot(t, dx_t/omega_0, label = '$\dot{x}(t)$', lw = 1) # Mostrar que al escalar, la amplitud queda igual plt.legend(loc='center left', bbox_to_anchor=(1.01, 0.5), prop={'size': 14}) plt.xlabel('$t$', fontsize = 18) plt.show() plt.figure(figsize = (5, 5)) plt.plot(x_t, dx_t/omega_0, 'ro', ms = 2) plt.xlabel('$x(t)$', fontsize = 18) plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18) plt.show() plt.figure(figsize = (5, 5)) plt.scatter(x_t, dx_t/omega_0, cmap = 'viridis', c = dx_t, s = 8, lw = 0) plt.xlabel('$x(t)$', fontsize = 18) plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18) plt.show() # #### Multiples condiciones iniciales k = 3 #constante elástica [N]/[m] m = 1 # [kg] omega_0 = np.sqrt(k/m) t = np.linspace(0, 50, 50) x_0s = np.array([.7, .5, .25, .1]) dx_0s = np.array([.2, .1, .05, .01]) cmaps = np.array(['viridis', 'inferno', 'magma', 'plasma']) plt.figure(figsize = (6, 6)) for indx, x_0 in enumerate(x_0s): x_t = x_0 *np.cos(omega_0 *t) + (dx_0s[indx]/omega_0) * np.sin(omega_0 *t) dx_t = -omega_0 * x_0 * np.sin(omega_0 * t) + dx_0s[indx] * np.cos(omega_0 * t) plt.scatter(x_t, dx_t/omega_0, cmap = cmaps[indx], c = dx_t, s = 10, lw = 0) plt.xlabel('$x(t)$', fontsize = 18) plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18) #plt.legend(loc='center left', bbox_to_anchor=(1.05, 0.5)) # Trayectorias del oscilador armónico simple en el espacio fase $(x,\, \dot{x}\,/\omega_0)$ para diferentes valores de la energía. # > **Actividad... ** # considerar un pendulo simple en diferentes planetas, y graficar su espacio fase. # <!--NAVIGATION--> # < [Mapa Logístico](Clase9_MapaLogistico.ipynb) | [Guía](Clase0_GuiaSimulacionM.ipynb) | [Oscilador amortiguado](Clase10_OsciladorAmortiguado.ipynb) > # ___ # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. # <Strong> Copyright: </Strong> Public Domain como en [CC](https://creativecommons.org/licenses/by/2.0/) (Exepto donde se indique lo contrario) # # # </footer>
Modulo2/Clase10_OsciladorArmonico.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Action and Event Extractor # ## Extract essential information from Raw text to create a Narrative in Situation Calculus # # We need to create a factbase from the information contained in the Raw text data. # ## Load text file def read_file(file_name): with open(file_name, 'r') as file: return file.read() # ## Process full text # # Need to process multiple large books. # ## Parse and store sentences. # + import spacy nlp = spacy.load('en_core_web_lg') # Process `text` with Spacy NLP Parser text = read_file('/Users/anirudhprabhu/PycharmProjects/novelWriter/Skeleton/docs/books_txt/Fantasy/465179.txt') # + processed_text = nlp(text) #print(processed_text) spacy.displacy.serve(processed_text,style = "dep") # - for token in processed_text: print(token.text_with_ws) # + # How many sentences are in the book (Pride & Prejudice)? sentences = [s for s in processed_text.sents] print(len(sentences)) # Print sentences from index 10 to index 15, to make sure that we have parsed the correct book print(sentences[50:55]) # - # ## Steps taken for Extraction of Actions : # # ### (Based on Han's Relation Extraction) # # * Run Dependency Parsing # * Extract Root # * If Root is a finite verb, then the Verb is the action word. # * If Root is a noun # + sent_index = [] i = 1 for token in processed_text.sents: print(token) i += 1 #if token.dep_ == 'ROOT' and token.head.pos_ == 'VERB' : # sent_index.append(i) #print(sent_index) #for q in sent_index: # print(processed_text.sents[s]) sentences = [s for s in processed_text.sents] print(len(sentences)) for index in sent_index: print(sentences[sent_index[index]]) # + # Extract all the personal names from Pride & Prejudice and count their occurrences. # Expected output is a list in the following form: [('elizabeth', 622), ('darcy', 312), ('jane', 286), ('bennet', 266) ...]. from collections import Counter, defaultdict test = "The birds and the beasts were there" processed_test = nlp(test) def extract_finite_verb(doc): characters_verb = Counter() i = 1 for token in processed_text: #print(token.pos_) i += 1 if token.dep_ == 'ROOT' and token.head.pos_ == 'VERB' : if token.dep_ == "prt" and token.head.pos_ == "VERB" : verb = token.head.orth_ particle = token.orth_ characters_verb[verb + '-' + particle] += 1 characters_verb[token.text] += 1 #if (token.pos_ == 'CONJ') : #print(i) return characters_verb def extract_finite_noun(doc): characters_noun = Counter() for token in processed_text: #print(token.pos_) if token.dep_ == 'ROOT' and token.pos_ == 'NOUN' : characters_noun[token.text] += 1 #print([child for child in token.children], token.text) if (token.pos_ == 'CONJ') : print('There is a conjunction') #print(token.text) return characters_noun def extract_finite_adj(doc): characters_adj = Counter() for token in processed_text: #print(token.pos_) if token.dep_ == 'ROOT' and token.pos_ == 'ADJ' : characters_adj[token.lemma_] += 1 return characters_adj #print(processed_text.sents[sent_index[6]]) print(extract_finite_verb(processed_text)) #print("\n") #print(extract_finite_noun(processed_text)) #print("\n") #print(extract_finite_adj(processed_text)) #print("\n") # + def phrasal_verb_recognizer() : for token in processed_text : if token.dep_ == "prt" and token.head.pos_ == "VERB": verb = token.head.orth_ particle = token.orth_ print(verb + '-' + particle) #print(phrasal_verb_recognizer(processed_text)) phrasal_verb_recognizer() # - # ## Plot characters personal names as a time series # + # Matplotlib Jupyter HACK # %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt # + # Plot characters' mentions as a time series relative to the position of the actor's occurrence in a book. def get_character_offsets(doc): """ For every character in a `doc` collect all the occurences offsets and store them into a list. The function returns a dictionary that has actor lemma as a key and list of occurences as a value for every character. :param doc: Spacy NLP parsed document :return: dict object in form {'elizabeth': [123, 543, 4534], 'darcy': [205, 2111]} """ character_offsets = defaultdict(list) for ent in doc.ents: if ent.label_ == 'PERSON': character_offsets[ent.lemma_].append(ent.start) return dict(character_offsets) character_occurences = get_character_offsets(processed_text) # + from matplotlib.pyplot import hist from cycler import cycler NUM_BINS = 10 def normalize(occurencies, normalization_constant): return [o / float(len(processed_text)) for o in occurencies] def plot_character_timeseries(character_offsets, character_labels, normalization_constant=None): """ Plot characters' personal names specified in `character_labels` list as time series. :param character_offsets: dict object in form {'elizabeth': [123, 543, 4534], 'darcy': [205, 2111]} :param character_labels: list of strings that should match some of the keys in `character_offsets` :param normalization_constant: int """ x = [character_offsets[character_label] for character_label in character_labels] with plt.style.context('fivethirtyeight'): plt.figure() n, bins, patches = plt.hist(x, NUM_BINS, label=character_labels) plt.clf() ax = plt.subplot(111) for i, a in enumerate(n): ax.plot([float(x) / (NUM_BINS - 1) for x in range(len(a))], a, label=character_labels[i]) matplotlib.rcParams['axes.prop_cycle'] = cycler(color=['r','k','c','b','y','m','g','#54a1FF']) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) #plot_character_timeseries(character_occurences, ['darcy', 'bingley'], normalization_constant=len(processed_text)) plot_character_timeseries(character_occurences, ['penzias', 'moon']) # - # ## Spacy parse tree in action # + # Find words (adjectives) that describe Mr. Darcy. def get_character_adjectives(doc, character_lemma): """ Find all the adjectives related to `character_lemma` in `doc` :param doc: Spacy NLP parsed document :param character_lemma: string object :return: list of adjectives related to `character_lemma` """ adjectives = [] for ent in processed_text.ents: if ent.lemma_ == character_lemma: for token in ent.subtree: if token.pos_ == 'ADJ': # Replace with if token.dep_ == 'amod': #adjectives.append(token.lemma_) print('hi') for ent in processed_text.ents: if ent.lemma_ == character_lemma: if ent.root.dep_ == 'nsubj': for child in ent.root.head.children: if child.dep_ == 'acomp': adjectives.append(child.lemma_) return adjectives print(get_character_adjectives(processed_text, 'penzias')) # + # Find characters that are 'talking', 'saying', 'doing' the most. Find the relationship between # entities and corresponding root verbs. character_verb_counter = Counter() VERB_LEMMA = 'count' for ent in processed_text.ents: if ent.label_ == 'PERSON' and ent.root.head.lemma_ == VERB_LEMMA: character_verb_counter[ent.text] += 1 print(character_verb_counter.most_common(10)) # Find all the characters that got married in the book. # # Here is an example sentence from which this information could be extracted: # # "her mother was talking to that one person (<NAME>) freely, # openly, and of nothing else but her expectation that Jane would soon # be married to Mr. Bingley." # # - # ## Extract Keywords # + # Extract Keywords using noun chunks from the news article (file 'article.txt'). # Spacy will pick some noun chunks that are not informative at all (e.g. we, what, who). # Try to find a way to remove non informative keywords. article = read_file('Data/article.txt') doc = nlp(article) keywords = Counter() for chunk in doc.noun_chunks: if nlp.vocab[chunk.lemma_].prob < - 8: # probablity value -8 is arbitrarily selected threshold keywords[chunk.lemma_] += 1 keywords.most_common(20) # - help('modules') processed
Scripts/AnE Extractor-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #A notebook for Tweet Sentiment Analysis by <NAME> # - import tweepy import re import pickle import matplotlib.pyplot as plt import numpy as np from tweepy import OAuthHandler #does the job of authenticating our client machine with Twitter server # + #Now initialize all keys we need, and they should be entered in the form of strings # - consumer_key = 'fyYq7oF0KxkUdeI2zwGCKV8GQ' consumer_secret = '<KEY>' access_token = '<KEY>' access_secret='<KEY>' #Authenticate Client, because every App we build will have unique keys and secrets auth = OAuthHandler(consumer_key,consumer_secret) auth.set_access_token(access_token,access_secret) #set our search tokens args = ['Mask'] api = tweepy.API(auth,timeout=10)#if we don't find any tweets, after 10 seconds API stops looking #Declare a list list_tweets = [] query = args [0] if len(args) == 1: for status in tweepy.Cursor (api.search, q=query+"-filter:retweets", lang='en', result_type = 'recent', tweet_mode='extended').items(100): list_tweets.append(status.full_text)#status is a JSON object in Python #cursor is another library to fetch tweets list_tweets #To classify tweets we need our Tfidf and classifier models with open ('tfidfmodel.pickle','rb') as f: vectorizer = pickle.load (f) with open ('classifier.pickle','rb') as f: clf = pickle.load(f) #Let's try if it works or not clf.predict(vectorizer.transform(['You are terrible Mr!'])) total_pos=0 total_neg=0 #Pre-process Tweets we have collected for tweet in list_tweets: tweet=re.sub(r"^http://t.co/[a-zA-Z09]*\s"," ",tweet) #remove links tweet=re.sub(r"\s+https://t.co/[a-zA-Z0-9]*\s"," ",tweet) tweet=re.sub(r"\s+https://t.co/[a-zA-Z0-9]*$"," ",tweet) tweet=tweet.lower() tweet=re.sub(r"that's","that is", tweet) tweet=re.sub(r"there's","there is", tweet) tweet=re.sub(r"what's","what is", tweet) tweet=re.sub(r"where's","where is", tweet) tweet=re.sub(r"it's","it is", tweet) tweet=re.sub(r"who's","who is", tweet) tweet=re.sub(r"i'm","i am", tweet) tweet=re.sub(r"she's","she is", tweet) tweet=re.sub(r"he's","he is", tweet) tweet=re.sub(r"they're","they are", tweet) tweet=re.sub(r"who're","who are", tweet) tweet=re.sub(r"ain't","am not", tweet) tweet=re.sub(r"wouldn't","would not", tweet) tweet=re.sub(r"can't","can not", tweet) tweet=re.sub(r"could't","could not", tweet) tweet=re.sub(r"won't","wil not", tweet) tweet=re.sub(r"\W"," ", tweet) tweet=re.sub(r"\d"," ", tweet) tweet=re.sub(r"\s+[a-z]\s+"," ", tweet) tweet=re.sub(r"\s+[a-z]$"," ", tweet) tweet=re.sub(r"^[a-z]\+s"," ", tweet) tweet=re.sub(r"\s+"," ", tweet) #Predicting Sentiment of Tweets sent = clf.predict(vectorizer.transform([tweet]).toarray()) #Increase sentinment variables based on tweet sentiment if sent[0] == 1: total_pos +=1 else: total_neg+=1 print("cleaned tweets:", tweet,sent) # + #Plotting the results #First, we will declare two variables before the cleaning loop to put total number of pos or neg words # - print (total_neg) print (total_pos) objects = ['Positive','Negative'] y_pos = np.arange (len(objects))#we need the position of y #Plot the results and visualize the data plt.bar(y_pos,[total_pos,total_neg],alpha=0.5); plt.xticks(y_pos,objects) plt.ylabel("Number") plt.title("Number of Positive and Negative Tweets")
Tweet_Sent_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- sample_text = "Does this thing really works? lets see." from nltk.tokenize import sent_tokenize, word_tokenize sent_tokenize(sample_text) words = word_tokenize(sample_text) from nltk.corpus import stopwords stop = stopwords.words('english') import string punctuations = list(string.punctuation) stop = stop + punctuations clean_words = [w for w in words if not w in stop] clean_words # # Stemming from nltk.stem import PorterStemmer stem_words = ["play", "playing", "player", "played"] ps = PorterStemmer() stemmed_words = [ps.stem(w) for w in stem_words] stemmed_words # # POS parts of speech from nltk import pos_tag from nltk.corpus import state_union text = state_union.raw("2006-GWBush.txt") text '''import nltk nltk.download('averaged_perceptron_tagger') pos = pos_tag(word_tokenize(text)) pos pos_tag(word_tokenize("I have been painting since morning.")) # # Lemmatization from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() '''import nltk nltk.download('wordnet') lemmatizer.lemmatize("better", pos = "a") lemmatizer.lemmatize("excellent", pos = "n") lemmatizer.lemmatize("painting", pos = "n") lemmatizer.lemmatize("painting", pos = "v") pos("better")
NLTK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np from matplotlib import pyplot as plt import sklearn as sklearn gundata = pd.read_excel(r"C:\Users\OWNER\Downloads\Gun Violence.xlsx") gundata.head() gundata.tail() data_column_category=gundata.select_dtypes(exclude=[np.number]).columns data_column_category gundata[data_column_category].head() gundata[data_column_category].tail to_drop = ['city_or_county','address','sources','incident_url_fields_missing','participant_name'] gundata.drop(to_drop, inplace=True, axis=1) gundata.head() gundata = gundata.set_index('date') gundata.head() # + fig,ax = plt.subplots(figsize =(8,6)) plt.plot(gundata.index.values, gundata['n_guns_involved']) plt.title('Trend of Gun Violence in USA') plt.xlabel("Date") plt.ylabel('Number of Guns Involved') fig.autofmt_xdate(rotation=45) plt.show() # - gundata.describe()
Trend of Gun Violence in USA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="NYex4Un2E7VO" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="LBqMFhO4VTLs" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/PySpark/4.PySpark_Exploratory_Data_Analysis.ipynb) # + [markdown] id="tOqEte7ZE7Ob" # # **PySpark Tutorial-4 Exploratory Data Analysis** # + [markdown] id="mmpTAzXGRGLj" # # Overview # # In this notebook, exploratory data analysis is performed for the Titanic dataset using PySpark. # + [markdown] id="FJ5Fk637jtD0" # In statistics, exploratory data analysis is an approach of analyzing data sets to summarize their main characteristics, often using statistical graphics and other data visualization methods. A statistical model can be used or not, but primarily EDA is for seeing what the data can tell us beyond the formal modeling or hypothesis testing task. Exploratory data analysis has been promoted by John Tukey since 1970 to encourage statisticians to explore the data, and possibly formulate hypotheses that could lead to new data collection and experiments. EDA is different from initial data analysis (IDA), which focuses more narrowly on checking assumptions required for model fitting and hypothesis testing, and handling missing values and making transformations of variables as needed. EDA encompasses IDA. [source](https://en.wikipedia.org/wiki/Exploratory_data_analysis) # + [markdown] id="HGdS4JP_RGtA" # ### Install PySpark # + id="D3pnMpGRQE2q" # install PySpark # ! pip install pyspark==3.2.0 # + [markdown] id="JTgVVz1xu2B4" # ### Initializing Spark # + id="54fC7hk3QeH8" colab={"base_uri": "https://localhost:8080/", "height": 219} executionInfo={"status": "ok", "timestamp": 1644670274983, "user_tz": -180, "elapsed": 11527, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="684cf0bb-cd17-4d22-ff6c-abe5c16f496e" from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark # + id="7p-XaEE_5WmC" # ==>> DO NOT FORGET WHNE YOU'RE DONE>> spark.stop() # + id="E-nVWgsxTNv9" from datetime import datetime, date from pyspark.sql import Row import pandas as pd import seaborn as sns # + id="sVsm7tABIqmB" from pyspark.sql.functions import * # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="KkWqs8Qw9rnY" executionInfo={"status": "ok", "timestamp": 1644670353964, "user_tz": -180, "elapsed": 6, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="f6d4c2fe-3a0e-47a7-f381-c13aa4f21747" df = sns.load_dataset("titanic") df # + id="8mu1PG4sEW0y" # save to csv df.to_csv("titanic.csv", index=True) # + [markdown] id="GvBpFe_n3jqj" # ## Read dataset # + [markdown] id="xDLMStyvM45u" # source : https://github.com/mwaskom/seaborn-data/blob/master/titanic.csv # + colab={"base_uri": "https://localhost:8080/"} id="xB_pt6r9Ed7G" executionInfo={"status": "ok", "timestamp": 1644670364580, "user_tz": -180, "elapsed": 6732, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="db218bec-e975-4c2e-bf02-111600c05fb8" # read with spark titanic=spark.read.csv('titanic.csv',header=True,inferSchema=True,) titanic.show() # + colab={"base_uri": "https://localhost:8080/"} id="tyXYSUpRE40y" executionInfo={"status": "ok", "timestamp": 1644670369181, "user_tz": -180, "elapsed": 288, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="b4ecda54-13a7-40ff-af0c-743c7e59f86d" ### Check the schema titanic.printSchema() # + [markdown] id="sniwhlJB3ui2" # ## Column operations # + id="fisQ6DHuE4yr" # chance index column name ("_co") to "id" titanic = titanic.withColumnRenamed("_c0","id") # + colab={"base_uri": "https://localhost:8080/"} id="l0Kl8Bq9GSLB" executionInfo={"status": "ok", "timestamp": 1644670375589, "user_tz": -180, "elapsed": 992, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="5f0b5656-cfa8-450a-c578-16c89b801908" titanic.count() # + id="3Q92fv1OE4q8" # check all column and drop same columns # + colab={"base_uri": "https://localhost:8080/"} id="j3HNEW6WOirI" executionInfo={"status": "ok", "timestamp": 1644670379387, "user_tz": -180, "elapsed": 279, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="69e8580d-fd9b-4e4b-81bb-053c41ab1d7c" titanic.show(10) # + colab={"base_uri": "https://localhost:8080/"} id="Qlb4wAscOioh" executionInfo={"status": "ok", "timestamp": 1644670382242, "user_tz": -180, "elapsed": 410, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="f5df612e-6f87-460f-f36b-2c144da7b9d7" titanic.select("survived","alive","embarked","embark_town","pclass","class").show(10) # + colab={"base_uri": "https://localhost:8080/"} id="pNF2KatbOimJ" executionInfo={"status": "ok", "timestamp": 1644670384480, "user_tz": -180, "elapsed": 373, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="4aa5a0e6-ea1b-4ed6-9d18-36fc364f30ec" # drop alive, embark_town and class columns titanic = titanic.drop("alive","embark_town","class") titanic.printSchema() # + colab={"base_uri": "https://localhost:8080/"} id="a5-pJ2qPXEfG" executionInfo={"status": "ok", "timestamp": 1644670388159, "user_tz": -180, "elapsed": 268, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="fc1c7778-e5c5-48bd-8c08-4415c4a99d82" titanic # + [markdown] id="kuDwpDyA4K6x" # ## Missing value # + colab={"base_uri": "https://localhost:8080/"} id="jUS3xmeIIo2f" executionInfo={"status": "ok", "timestamp": 1644670394821, "user_tz": -180, "elapsed": 5539, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="6a508217-e7ec-4ab8-bd62-b44c36fae131" # lets check missing value for i in titanic.columns: print(i,titanic.count()-(titanic.na.drop(subset=i).count())) # + [markdown] id="C4h3TE-T_vR2" # we can drop 2 missing row in `embarked` column # + colab={"base_uri": "https://localhost:8080/"} id="lgBDe3gCOij6" executionInfo={"status": "ok", "timestamp": 1644670402628, "user_tz": -180, "elapsed": 4754, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="24cb989f-f876-4bbd-8508-573cc50092b4" titanic =titanic.na.drop(subset=["embarked"]) for i in titanic.columns: print(i,titanic.count()-(titanic.na.drop(subset=i).count())) # + colab={"base_uri": "https://localhost:8080/"} id="-duBWOPJOihT" executionInfo={"status": "ok", "timestamp": 1644670495154, "user_tz": -180, "elapsed": 274, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="23bb03b1-80ee-4f50-ac77-1cf7c1a8b2dd" # drop deck column b titanic = titanic.drop("deck") titanic.printSchema() # + id="VIK0FBme-3Tq" # fill null values in age colum with median from pyspark.ml.feature import Imputer imputer = Imputer( inputCols=['age'], outputCols=["{}_imputed".format(c) for c in ['age']] ).setStrategy("median") # + colab={"base_uri": "https://localhost:8080/"} id="TrJ_WaS-AQcF" executionInfo={"status": "ok", "timestamp": 1644670612918, "user_tz": -180, "elapsed": 810, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="1b3539fc-a3f3-4e7f-c2cf-54bb6303fba8" # Add imputation cols to df imputer.fit(titanic).transform(titanic).show() # + colab={"base_uri": "https://localhost:8080/"} id="MoaHNSg2Oiey" executionInfo={"status": "ok", "timestamp": 1644670674306, "user_tz": -180, "elapsed": 269, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="6e3f0143-a9b0-47ae-ee65-61a5c990c9d3" # fill null values in age colum with avg from pyspark.sql.functions import mean age_mean = titanic.select(mean(titanic['age'])).collect() age_mean = int(age_mean[0][0]) age_mean # + colab={"base_uri": "https://localhost:8080/"} id="12COuk06OicI" executionInfo={"status": "ok", "timestamp": 1644670675456, "user_tz": -180, "elapsed": 2, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="3126f7a6-9a9b-4c66-f067-f9d089872776" titanic = titanic.na.fill(age_mean, subset=['age']) titanic.show(10) # + id="T6nu1RJzE4oZ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644670681889, "user_tz": -180, "elapsed": 3837, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="a165720e-9d59-408e-a70d-18b4fce84865" for i in titanic.columns: print(i,titanic.count()-(titanic.na.drop(subset=i).count())) # + [markdown] id="AnvMUb794TxC" # ## Replacing value # + colab={"base_uri": "https://localhost:8080/"} id="3BKzIn0z-XOT" executionInfo={"status": "ok", "timestamp": 1643879330804, "user_tz": -180, "elapsed": 1193, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="89ff6be1-45a7-4f77-aec9-415bd10b0771" # replace categoric gender values with 0 and 1 from pyspark.sql.functions import col , udf Dict = {'male':'1','female':'0'} map_func = udf(lambda row : Dict.get(row,row)) titanic = titanic.withColumn("sex", map_func(col("sex"))) titanic.show(10) # + [markdown] id="8uavYd59Y02C" # ## pivot table and ploting # + colab={"base_uri": "https://localhost:8080/"} id="v6PKco6--W75" executionInfo={"status": "ok", "timestamp": 1643879332457, "user_tz": -180, "elapsed": 1655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="11892483-337e-41cd-b5c0-24361c0fae0b" titanic.groupBy('survived').pivot("pclass").count().show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="EH0W_2zcOs42" executionInfo={"status": "ok", "timestamp": 1643879333594, "user_tz": -180, "elapsed": 1139, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="b225d1ba-a8e1-4c9e-9c0a-3c16939f8e42" titanic.groupBy('survived').pivot("pclass").count().toPandas().plot(kind="bar") # + colab={"base_uri": "https://localhost:8080/"} id="Ndng0fAl-W5K" executionInfo={"status": "ok", "timestamp": 1643879334303, "user_tz": -180, "elapsed": 712, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="207e8c89-40fb-4dab-c500-2fe7922f7eb1" titanic.groupBy('survived').pivot("who").count().show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="VwfzOj9dO84a" executionInfo={"status": "ok", "timestamp": 1643879335415, "user_tz": -180, "elapsed": 1113, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="b672a072-443d-4f10-ea0e-81dfd5828bad" titanic.groupBy('survived').pivot("who").count().toPandas().plot(kind="bar") # + colab={"base_uri": "https://localhost:8080/"} id="TAk_1wvJ-W2i" executionInfo={"status": "ok", "timestamp": 1643879336100, "user_tz": -180, "elapsed": 688, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="cbfefe4b-f6dc-454e-9117-0f0c020e96cf" titanic.groupBy('survived',"who").pivot("pclass").count().show() # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="ZmnOEnsY-Wz4" executionInfo={"status": "ok", "timestamp": 1643879337122, "user_tz": -180, "elapsed": 1024, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="7a64b7ce-728a-4514-9160-da614e9365f6" titanic.groupBy('survived',"sex").pivot("pclass").count().toPandas() # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="p-u3tFSQ-Wxa" executionInfo={"status": "ok", "timestamp": 1643879337686, "user_tz": -180, "elapsed": 566, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="2399c834-a13a-4500-d873-f9fd13679518" titanic.select("age").toPandas().plot(kind="hist") # + [markdown] id="nKCkUPhTZDN9" # ## correlation # + colab={"base_uri": "https://localhost:8080/"} id="HI7wWHxXZQpe" executionInfo={"status": "ok", "timestamp": 1643879337686, "user_tz": -180, "elapsed": 5, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="be26ba21-9c38-4ae2-828f-3876801b87da" titanic.show(10) # + id="7WsQ-4qv-Wuv" from pyspark.mllib.stat import Statistics # select variables to check correlation df_clr = titanic.select("survived","pclass","sex","age","fare") # create RDD table for correlation calculation rdd_table = df_clr.rdd.map(lambda row: row[0:]) # get the correlation matrix corr_mat=Statistics.corr(rdd_table, method="pearson") # + colab={"base_uri": "https://localhost:8080/", "height": 286} id="swWsrSe9ZbIF" executionInfo={"status": "ok", "timestamp": 1643879342080, "user_tz": -180, "elapsed": 609, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="b3bc9a80-f85e-4cb6-8fd6-0a5a69465850" import seaborn as sns sns.heatmap(corr_mat,annot=True, xticklabels=["survived","pclass","sex","age","fare"], yticklabels=["survived","pclass","sex","age","fare"]) # + [markdown] id="S-_T2UNP2ylv" # ## describe & summary # + id="UYI52vY-ZqJl" executionInfo={"status": "ok", "timestamp": 1643879502450, "user_tz": -180, "elapsed": 1057, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} colab={"base_uri": "https://localhost:8080/"} outputId="332b2b09-6780-4269-eaf5-49cabe92bb50" titanic.describe(["survived","pclass","sex","age","fare"]).show() # + colab={"base_uri": "https://localhost:8080/"} id="iLuxAk-d2a6K" executionInfo={"status": "ok", "timestamp": 1643879518718, "user_tz": -180, "elapsed": 952, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="fcf7a03d-cf7a-428e-bcc1-93f257caf6d4" titanic.describe().show() # + colab={"base_uri": "https://localhost:8080/"} id="B1d274YG6ino" executionInfo={"status": "ok", "timestamp": 1643882552316, "user_tz": -180, "elapsed": 1435, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="02e742bc-33ff-4cf5-9c0a-7eb82753c891" # summary is similar to describe titanic.summary().show() # + colab={"base_uri": "https://localhost:8080/"} id="FQZnOS8s6ilB" executionInfo={"status": "ok", "timestamp": 1643882679858, "user_tz": -180, "elapsed": 1390, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="e0b640ed-5c87-400a-cfab-fb0ffccf8ecf" # We can choose specific statistic methods only. titanic.summary('min','25%','50%','75%','max').show() # + [markdown] id="XPplmuttFgd0" # ## using function # + colab={"base_uri": "https://localhost:8080/"} id="nmxcVtyy6iib" executionInfo={"status": "ok", "timestamp": 1643884605851, "user_tz": -180, "elapsed": 661, "user": {"displayName": "Monster C", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08787989274818793476"}} outputId="260fdee5-637d-40df-ee7a-490b1521f2c5" def func(x): return round(x) titanic = titanic.withColumn("fare", func("fare")) titanic.show(10) # + id="mklQvmPnKOk_" spark.stop() # + [markdown] id="e9IX0wi_6RCN" # # Resources # # 1. https://spark.apache.org/docs/latest/rdd-programming-guide.html # 2. https://spark.apache.org/docs/latest/api/python/getting_started/quickstart_df.html# # 3. https://github.com/vkocaman/PySpark_Essentials_March_2019 # 4. https://github.com/sundarramamurthy/pyspark # 5. https://towardsdatascience.com/beginners-guide-to-pyspark-bbe3b553b79f # 6. https://www.guru99.com/pyspark-tutorial.html # 7. https://towardsdatascience.com/exploratory-data-analysis-eda-with-pyspark-on-databricks-e8d6529626b1 # #
tutorials/PySpark/4.PySpark_Exploratory_Data_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- student_per_p = 'student-por.csv' import pandas as pd student_per = pd.read_csv(student_per_p, sep=';', decimal='.', header=0, names=['school', 'sex', 'age', 'address', 'famsize', 'pstatus', 'medu', 'fedu', 'mjob', 'fjob', 'reason', 'guardian', 'traveltime', 'studytime', 'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'dalc', 'walc', 'health', 'absences', 'grade1', 'grade2', 'finalgrade'],index_col=False) student_per #Pre-process 'school' column #Check for missing values student_per['school'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['school'] = student_per['school'].str.strip() student_per['school'] = student_per['school'].str.lower() student_per['school'].value_counts() # - #Pre-porcess 'sex' column #check for missing values student_per['sex'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['sex'] = student_per['sex'].str.strip() student_per['sex'] = student_per['sex'].str.lower() student_per['sex'].value_counts() # - #Pre-process 'age' column #Check for missing values student_per['age'].isnull().sum() #Check for sanity checks student_per['age'].value_counts() #pre-process 'address' column #Check for missing values student_per['address'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['address'] = student_per['address'].str.strip() student_per['address'] = student_per['address'].str.lower() student_per['address'].value_counts() # + #pre-process 'famsize' column #Check for missing values # - student_per['famsize'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['famsize'] = student_per['famsize'].str.strip() student_per['famsize'] = student_per['famsize'].str.lower() student_per['famsize'].value_counts() # - #pre-process 'pstatus' column #Check for missing values student_per['pstatus'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['pstatus'] = student_per['pstatus'].str.strip() student_per['pstatus'] = student_per['pstatus'].str.lower() student_per['pstatus'].value_counts() # - #pre-process 'medu' column #check for missing values student_per['medu'].isnull().sum() #Sanity checks student_per['medu'].value_counts() #pre-process 'fedu' column #Check for missing values student_per['fedu'].isnull().sum() #Sanity checks student_per['fedu'].value_counts() #pre-process 'mjob' column #check for missing values student_per['mjob'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['mjob'] = student_per['mjob'].str.strip() student_per['mjob'] = student_per['mjob'].str.lower() student_per['mjob'].value_counts() # - #pre-process 'fjob' column #check for missing values student_per['fjob'].isnull().sum() # + ##Check for typos, extra whitespaces and sanity checks student_per['fjob'] = student_per['fjob'].str.strip() student_per['fjob'] = student_per['fjob'].str.lower() student_per['fjob'].value_counts() # - #pre-process 'reason' column #check for missing values student_per['reason'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['reason'] = student_per['reason'].str.strip() student_per['reason'] = student_per['reason'].str.lower() student_per['reason'].value_counts() # - #pre-process 'guardian' column #check for missing values student_per['guardian'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['guardian'] = student_per['guardian'].str.strip() student_per['guardian'] = student_per['guardian'].str.lower() student_per['guardian'].value_counts() # - #pre-process 'traveltime' column #check for missing values student_per['traveltime'].isnull().sum() #Sanity Check student_per['traveltime'].value_counts() #pre-process 'studytime' column #check for missing values student_per['studytime'].isnull().sum() #Sanity checks student_per['studytime'].value_counts() #pre-process 'failures' column #check for missing values student_per['failures'].isnull().sum() #Sanity checks student_per['failures'].value_counts() #pre-process 'schoolsup' column #check for missing values student_per['schoolsup'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['schoolsup'] = student_per['schoolsup'].str.strip() student_per['schoolsup'] = student_per['schoolsup'].str.lower() student_per['schoolsup'].value_counts() # - #pre-process 'famsup' column #check for missing values student_per['famsup'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['famsup'] = student_per['famsup'].str.strip() student_per['famsup'] = student_per['famsup'].str.lower() student_per['famsup'].value_counts() # - #pre-process 'paid' column #check for missing values student_per['paid'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['paid'] = student_per['paid'].str.strip() student_per['paid'] = student_per['paid'].str.lower() student_per['paid'].value_counts() # - #pre-process 'activities' column #Check for missing values student_per['activities'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['activities'] = student_per['activities'].str.strip() student_per['activities'] = student_per['activities'].str.lower() student_per['activities'].value_counts() # - #pre-process 'nursery' column #check for missing values student_per['nursery'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['nursery'] = student_per['nursery'].str.strip() student_per['nursery'] = student_per['nursery'].str.lower() student_per['nursery'].value_counts() # - #pre-process 'higher' column #check for missing values student_per['higher'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['higher'] = student_per['higher'].str.strip() student_per['higher'] = student_per['higher'].str.lower() student_per['higher'].value_counts() # - #pre-process 'internet' column #check for missing values student_per['internet'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['internet'] = student_per['internet'].str.strip() student_per['internet'] = student_per['internet'].str.lower() student_per['internet'].value_counts() # - #pre-process 'romantic' column #Check for missing values student_per['romantic'].isnull().sum() # + #Check for typos, extra whitespaces and sanity checks student_per['romantic'] = student_per['romantic'].str.strip() student_per['romantic'] = student_per['romantic'].str.lower() student_per['romantic'].value_counts() # - #pre-process 'famrel' column #check for missing values student_per['famrel'].isnull().sum() #Sanity Check student_per['famrel'].value_counts() #pre-process 'freetime' column #check for missing values student_per['freetime'].isnull().sum() #Sanity Checks student_per['freetime'].value_counts() #pre-process 'goout' column #Check for missing values student_per['goout'].isnull().sum() #Sanity Check student_per['goout'].value_counts() #pre-process 'dalc' column #check for missing values student_per['dalc'].isnull().sum() #Sanity Check student_per['dalc'].value_counts() #pre-process 'walc' column #check for missing values student_per['walc'].isnull().sum() #Sanity Check student_per['walc'].value_counts() #pre-process 'health' column #check for missing values student_per['health'].isnull().sum() #Sanity Checks student_per['health'].value_counts() #pre-process 'absences' column #check for missing values student_per['absences'].isnull().sum() #Sanity checks student_per['absences'].value_counts() #pre-process 'grade1' column #Check for missing values student_per['grade1'].isnull().sum() #Sanity Check student_per['grade1'].value_counts() #pre-process 'grade2' column #check for missing values student_per['grade2'].isnull().sum() #Sanity Checks student_per['grade2'].value_counts() #pre-process 'finalgrade' column #check for missing values student_per['finalgrade'].isnull().sum() #Sanity Checks student_per['finalgrade'].value_counts() # + #We have chosen 10 attributes or columns to focus on #address #pstatus #guardian #traveltime #romantic #famrel #walc #health #absences #finalgrade
Data Cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import rebound import reboundx import numpy as np import scipy from scipy import signal from IPython.display import display, clear_output import matplotlib.pyplot as plt from matplotlib import colors # %matplotlib inline # + rhill = 0.3543 def add_moons(sim, num): randis = np.random.uniform(0,1000,size=num) a_rand = 4 * np.power(10, -2+randis/1000) * rhill e_rand = np.random.randint(2,10,size=num) e_rand = 1/e_rand print(a_rand) w_rand = np.random.randint(0,360,size=num)*radeg half = int(num/2) i_rand1 = np.random.randint(0,30,size=half+1)*radeg i_rand2 = np.random.randint(150,180,size=half)*radeg i_rand = np.concatenate((i_rand1,i_rand2)) node_rand = np.random.randint(0,360,size=num)*radeg for i in range(num): sem = a_rand[i] ecc = e_rand[i] icl = i_rand[i] Ome = w_rand[i] nod = node_rand[i] has = 'Hill {0}'.format(i) sim.add(m=0, primary=sim.particles['jupiter'], a=sem, e=0, inc=0, Omega=0, omega=0, hash=has) return # + def masses(x): # for input array of time values # calculate approximate M_sol # at those times in its life y = np.zeros_like(x) for i, time in enumerate(x): if (time <= 1.132e10): y[i] = 1 elif (1.132e10 < time <= 1.1336e10): y[i] = 0.05 * (708.5 - time/(1.6e7))**(1/3) + .95 elif (1.1336e10 < time <= 1.1463e10): y[i] = -8**((time - 1.1463e10)/574511)/2.4 + .95 elif (1.1463e10 < time): y[i] = 0.54 return y def lums_array(x): y = np.zeros_like(x) for i, time in enumerate(x): if (time <= 1.113e10): y[i] = 1.05 elif (1.113e10 < time <= 1.1225e10): y[i] = 1.45 + ((1.45 - 1.1)/(1.1225e10 - 1.1135e10))*(time - 1.1225e10) elif (1.1225e10 < time <= 1.125e10): y[i] = 1.45 elif (1.125 < time <= 1.1336e10): y[i] = 1.35 + .1*1.002**((time - 1.125e10)/58000) elif (1.1336e10 < time <= 1.142e10): y[i] = 1.673 elif (1.142e10 < time <= 1.14397e10): y[i] = 3.198e-9*time - 34.85 elif (1.14397e10 < time <= 1.14479e10): y[i] = 1.736 + 0.032*1.5**((time - 1.14455e10)/360000) elif (1.14479e10 < time <= 1.1462e10): y[i] = 2.15 + 0.00021*1.5**((time - 1.1444e10)/870000) elif (1.1462e10 < time <= 1.14632e10): y[i] = 3.5 + (.43/0.0001e10)*(time - 1.1463e10) elif (1.14632e10 < time <= 1.14636e10): y[i] = 2.3*((time - 1.1463e10)/45000)**(-0.3) elif (1.14636e10 < time <= 1.14654715e10): y[i] = .2 + ((.2 - 1.05)/(1.14654715e10 - 1.14636e10))*(time - 1.14654715e10) elif (1.14654715e10 < time): y[i] = .2 return y def inst_lum(x): time = x if (time <= 1.113e10): y = 1.05 elif (1.113e10 < time <= 1.1225e10): y = 1.45 + ((1.45 - 1.1)/(1.1225e10 - 1.1135e10))*(time - 1.1225e10) elif (1.1225e10 < time <= 1.125e10): y = 1.45 elif (1.125 < time <= 1.1336e10): y = 1.35 + .1*1.002**((time - 1.125e10)/58000) elif (1.1336e10 < time <= 1.142e10): y = 1.673 elif (1.142e10 < time <= 1.14397e10): y = 3.198e-9*time - 34.85 elif (1.14397e10 < time <= 1.14479e10): y = 1.736 + 0.032*1.5**((time - 1.14455e10)/360000) elif (1.14479e10 < time <= 1.1462e10): y = 2.15 + 0.00021*1.5**((time - 1.1444e10)/870000) elif (1.1462e10 < time <= 1.14632e10): y = 3.5 + (.43/0.0001e10)*(time - 1.1463e10) elif (1.14632e10 < time <= 1.14636e10): y = 2.3*((time - 1.1463e10)/45000)**(-0.3) elif (1.14636e10 < time <= 1.14654715e10): y = .2 + ((.2 - 1.05)/(1.14654715e10 - 1.14636e10))*(time - 1.14654715e10) elif (1.14654715e10 < time): y = .2 return y def yark(simp, rebx_force, particles, N): sim = simp.contents part = sim.particles current_time = sim.t + T0 L_sol = np.exp(inst_lum(current_time)) sunx = part['Sun'].x ; suny = part['Sun'].y ; sunz = part['Sun'].z sunvx= part['Sun'].vx; sunvy= part['Sun'].vy; sunvz= part['Sun'].vz for troj in range(num): i = troj + 1 x = part[i].x + sunx ; y = part[i].y + suny ; z = part[i].z + sunz vx= part[i].vx + sunvx; vy= part[i].vy + sunvy; vz= part[i].vz + sunvz R = moon_radii[i-1] m_ast = moon_masses[i-1] c = 63197.8 # speed of light in au/yr r = np.sqrt(x**2 + y**2 + z**2) A = (R**2 * L_sol)/(4*m_ast*r**2) D = (c - vx*x - vy*y - vz*z)/r part[i].ax += A/c**2 * (D*x - vx) part[i].ay += A/c**2 * (D*0.25*x + D*y - vx - vy) part[i].az += A/c**2 * (D*z - vz) return # + ###################### # Get an array of times over which to do the analysis. User can define: # - Nout: number of measurements of the simulation # - T0: starting age of the star # - t_tot: total time of integration for the system (M_star and L_star will start from T0) # # This cell also plots those masses and luminosities. ###################### N_times = 10000 T0 = 1.14610e10 t_tot = 2500000 ts = np.linspace(0, t_tot, N_times) mtimes = masses(ts + T0) lumins = lums_array(ts + T0) plt.plot(ts + T0, mtimes) plt.plot(ts + T0, lumins) plt.legend([r"$M_{star}$ / $M_\odot$", r"log($L_{star}$) / log($L_\odot$)"]) plt.xlabel("Time/yr") plt.show() # - # # Moon sim # + sim = rebound.Simulation() irrgs_add = 0 moons_add = 0 num_hillies = 10 dt_w_inners = 0.008 dt_no_inner = 0.03 M0 = mtimes[0] # Add Jupiter and satellites sim.add(m=9.543e-4, x=0, y=0, z=0, vx=0, vy=0, vz=0, hash='jupiter') if (moons_add != 0): add_moons(sim, moons_add) if (irrgs_add !=0): add_irregs(sim, irrgs_add) if (num_hillies != 0): add_hillies(sim, num_hillies) # Add Sun as Jupiter-centric; add saturn as solar-centric sim.add(m=M0,primary=sim.particles['jupiter'], a=5.2, e=.04839, inc=-.022689, Omega=-1.8, omega=-.2574, hash='Sun') #set simulation parameters sim.dt = dt_w_inners sim.move_to_com() ps = sim.particles fig, ax = rebound.OrbitPlot(sim) ax.set_xlim(1.9,2.8) ax.set_ylim(3.9,4.8) fig.show() # + num = num_hillies rad_ast = 10 # radius in km moon_radii = np.full(num, rad_ast/1.496e+8) # gives each asteroid a radius in AU mass_typic = 3*(4/3)*np.pi*(rad_ast*100000)**3 # gives typical mass @ this radius, w/ density = 3 g cm^-3 moon_masses = np.random.normal(mass_typic, .3*mass_typic, num) # gives array of values around that mass moon_masses /= 1.9891e33 # divides each mass by M_sol to get masses in M_sol print("Typical moon mass:", mass_typic, "g") print("Average moon mass:", np.mean(moon_masses), "M_sol") # + rebx = reboundx.Extras(sim) starmass = reboundx.Interpolator(rebx, ts, mtimes, 'spline') starlum = reboundx.Interpolator(rebx, ts, lumins, 'spline') rebx = reboundx.Extras(sim) yrkv = rebx.create_force("yarkovsky") yrkv.force_type = "vel" yrkv.update_accelerations = yark rebx.add_force(yrkv) gh = rebx.load_force("gravitational_harmonics") rebx.add_force(gh) mof = rebx.load_force("modify_orbits_forces") rebx.add_force(mof) J2 = 14736e-6 J2prime = 0.045020 R_jup = 0.000477895 ps['jupiter'].params["J2"] = J2prime ps['jupiter'].params["R_eq"] = R_jup # - Nout = 100000 times = np.linspace(0,t_tot,Nout) # + # initialize arrays for tracking progression of bodies over integration x_sol = np.zeros(Nout); y_sol = np.zeros(Nout) x_sol[0] = ps['Sun'].x y_sol[0] = ps['Sun'].y x_jup = np.zeros(Nout); y_jup = np.zeros(Nout) x_jup[0] = ps['jupiter'].x y_jup[0] = ps['jupiter'].y mass = np.zeros(Nout) a_jup = np.zeros(Nout) e_jup = np.zeros(Nout) i_jup = np.zeros(Nout) pmjup = np.zeros(Nout) lmjup = np.zeros(Nout) mass[0] = M0 a_jup[0] = ps['Sun'].a e_jup[0] = ps['Sun'].e i_jup[0] = ps['Sun'].inc pmjup[0] = ps['Sun'].pomega lmjup[0] = ps['Sun'].l a_vals = np.zeros((num, Nout)) e_vals = np.zeros((num, Nout)) i_vals = np.zeros((num, Nout)) pmvals = np.zeros((num, Nout)) lmvals = np.zeros((num, Nout)) x_vals = np.zeros((num, Nout)) y_vals = np.zeros((num, Nout)) if (moons_add == 0): for moon in range(num): a_vals[moon,0] = ps[moon + 1].a e_vals[moon,0] = ps[moon + 1].e i_vals[moon,0] = ps[moon + 1].inc pmvals[moon,0] = ps[moon + 1].pomega lmvals[moon,0] = ps[moon + 1].l x_vals[moon,0] = ps[moon + 1].x y_vals[moon,0] = ps[moon + 1].y else: for moon in range(num): a_vals[moon,0] = ps[moon + len(moons_add) + 1].a e_vals[moon,0] = ps[moon + len(moons_add) + 1].e i_vals[moon,0] = ps[moon + len(moons_add) + 1].inc pmvals[moon,0] = ps[moon + len(moons_add) + 1].pomega lmvals[moon,0] = ps[moon + len(moons_add) + 1].l x_vals[moon,0] = ps[moon + len(moons_add) + 1].x y_vals[moon,0] = ps[moon + len(moons_add) + 1].y print(a_vals) # - for i, time in enumerate(times): sim.integrate(time) ps['Sun'].m = starmass.interpolate(rebx, t=sim.t) sim.move_to_com() mass[i] = ps['Sun'].m x_sol[i] = ps['Sun'].x y_sol[i] = ps['Sun'].y x_jup[i] = ps['jupiter'].x y_jup[i] = ps['jupiter'].y a_jup[i] = ps['Sun'].a e_jup[i] = ps['Sun'].e i_jup[i] = ps['Sun'].inc pmjup[i] = ps['Sun'].pomega lmjup[i] = ps['Sun'].l for moon in range(num): a_vals[moon, i] = ps[moon + 1].a e_vals[moon, i] = ps[moon + 1].e i_vals[moon, i] = ps[moon + 1].inc pmvals[moon, i] = ps[moon + 1].pomega lmvals[moon, i] = ps[moon + 1].l x_vals[moon, i] = ps[moon + 1].x y_vals[moon, i] = ps[moon + 1].y # + #fig, ax = rebound.OrbitPlot(sim) #ax.set_xlim(2.025,2.625) #ax.set_ylim(4.075,4.675) #fig.show() print(a_vals[:,:5]) # + fig, ax = plt.subplots(6,1,figsize=(20,37), sharex=True) plt.subplots_adjust(hspace=0) end = Nout ax[0].plot(ts[:end]/1e6+.1,lumins[:end]) ax[0].set_ylabel(r"log($L_\star$ / $L_\odot$)", fontsize=16) ax[1].plot(times[:end]/1e6+.1,mass[:end]) ax[1].set_ylabel(r"$M_\star$ / $M_\odot$", fontsize=16) ax[2].plot(times[:end]/1e6+.1, a_vals.T[:end,:]) ax[2].plot(times[:end]/1e6+.1, a_jup[:end], "k") ax[2].set_ylabel(r"$a_{Jup}$ / AU", fontsize=16) ax[3].plot(times[:end]/1e6+.1, a_vals.T[:end, :]) ax[3].plot(times[:end]/1e6+.1, a_jup[:end], "k") ax[3].set_ylim(0,.2) ax[3].set_ylabel(r"$a_{troj}$ / AU", fontsize=16) ax[4].plot(times[:end]/1e6+.1, e_vals.T[:end,:]) ax[4].set_ylabel(r"$e_{troj}$", fontsize=16) ax[5].plot(times[:end]/1e6+.1, i_vals.T[:end,:]) ax[5].set_ylabel(r"$i$ / degrees", fontsize=16) ax[5].set_xlabel("Myr after start of AGB", fontsize=16) fig.show() #fig.savefig("irregs-all-elements.png", dpi=300) # - fig, ax = plt.subplots() ax.scatter(x_vals[:,end-1],y_vals[:,end-1]) ax.scatter(x_jup[end-1],y_jup[end-1]) ax.set_aspect("equal") fig.show() # + hists, hax = plt.subplots(4,5,figsize=(25,18), sharey=True) (t1, t2, t3, t4) = (int(Nout/4 - 1), int(Nout/2 - 1), int(3*Nout/4 - 1), int(Nout - 1)) hax[0,0].hist(a_vals[:,0], 10) hax[0,1].hist(a_vals[:,t1], 10) hax[0,2].hist(a_vals[:,t2], 10) hax[0,3].hist(a_vals[:,t3], 10) hax[0,4].hist(a_vals[:,t4], 10) hax[0,2].set_xlabel("Semimajor Axes") hax[1,0].hist(e_vals[:,0], 10) hax[1,1].hist(e_vals[:,t1], 10) hax[1,2].hist(e_vals[:,t2], 10) hax[1,3].hist(e_vals[:,t3], 10) hax[1,4].hist(e_vals[:,t4], 10) hax[1,2].set_xlabel("Eccentricities") hax[2,0].hist(i_vals[:,0], 10) hax[2,1].hist(i_vals[:,t1], 10) hax[2,2].hist(i_vals[:,t2], 10) hax[2,3].hist(i_vals[:,t3], 10) hax[2,4].hist(i_vals[:,t4], 10) hax[2,2].set_xlabel("Inclinations") hax[3,0].hist(Omvals[:,0], 10) hax[3,1].hist(Omvals[:,t1], 10) hax[3,2].hist(Omvals[:,t2], 10) hax[3,3].hist(Omvals[:,t3], 10) hax[3,4].hist(Omvals[:,t4], 10) hax[3,2].set_xlabel("Omegas") hists.tight_layout() # + n = num_hillies y = 85000 diffx = np.zeros((n,y)) diffy = np.zeros((n,y)) for i in range(n): diffx[i,:] = signal.medfilt(x_jup[:y] - x_vals[i,:y],499) diffy[i,:] = signal.medfilt(y_jup[:y] - y_vals[i,:y],499) if ((n/(i+1)) == 2): print("Wooooooooooah we're halfway there") # - r_raw4 = np.sqrt(np.power(diffx,2) + np.power(diffy,2)) r_4 = np.mean(r_raw4,0) fig, ax = plt.subplots(figsize=(10,10)) ax.plot(times[500:y-5000]/1e6,(r_4[500:y-5000])/a_jup[500:y-5000],'r',lw=3) ax.plot(times[500:y-5000]/1e6,np.power(times[500:y-5000]/1e6,2),'k',ls=":",lw=3) ax.plot(times[500:y-5000]/1e6,(r_raw4.T[500:y-5000,7]/a_jup[500:y-5000]).T, c='r', alpha=.7) ax.plot(times[500:y-5000]/1e6,(r_raw4.T[500:y-5000,9]/a_jup[500:y-5000]).T, c='b', alpha=.7) ax.set_xlabel("Myr after start of AGB", fontsize=16) ax.set_ylabel(r"Distance from ast. to its Lagrange point / $a_{jup}$", fontsize=16) ax.set_ylim(-0.01,0.08) ax.set_title(r"Distances averaged over $\sim$40 orbits", fontsize=16) fig.show() fig.savefig("hill-jup-dists-w-inds-w-walk.png", dpi=300) fig, ax = plt.subplots(figsize=(10,10)) ax.plot((x_vals[0,:5]-x_jup[:5])/a_jup[:5],(y_vals[0,:5]-y_jup[:5])/a_jup[:5]) ax.scatter(0,0,marker="x",c="k") ax.set_aspect("equal") # + fig, ax = plt.subplots(figsize=(10,10)) ax.plot((x_vals[0,:5]-x_jup[:5])/a_jup[:5],(y_vals[0,:5]-y_jup[:5])/a_jup[:5]) ax.scatter(0,0,marker="x",c="k") ax.set_aspect("equal")
9-Archive/Old_Sims/.ipynb_checkpoints/Moons (Mod. A)-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="E2iZ4xXqrcs6" # ## Word Embeddings # # - Word embeddings transform a binary/count based or tf*idf vectors into a much smaller dimension vector of real numbers. The one-hot encoded vector or binary vector is also known as a sparse vector, whilst the real valued vector is known as a dense vector. # # - An word embedding maps discrete, categorical values to a continous space. Major advances in NLP applications have come from these continuous representations of words. # # - The key concept in these word embeddings is that words that appear in similar contexts appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. # # - By context here, we mean the surrounding words. For example in the sentences **"it is the time of stupidity"** and **"it is the age of foolishness**" the words **'time'** and **'age'** and **'stupidity'** and **'foolishness'** appear in the same context and thus should be close together in vector space. # # - You did learn about word2vec which calculates word vectors from a corpus. In this lab session we use GloVe vectors, GloVe being another algorithm to calculate word vectors. If you want to find out more about GloVe, check the website [here](https://nlp.stanford.edu/projects/glove/). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). # + [markdown] id="WT2xjbPyChNB" # ## Loading the GloVe vectors # # First, we'll load the GloVe vectors. The name field specifies what the vectors have been trained on, here the 6B means a corpus of 6 billion words. The dim argument specifies the dimensionality of the word vectors. **GloVe vectors are available in 50, 100, 200 and 300 dimensions.** There is also a 42B and 840B glove vectors, **however they are only available at 300 dimensions**. # # - For more information about GloVe vectors loading using `torchtext` visit the [link](https://torchtext.readthedocs.io/en/latest/vocab.html#glove). # # - [GLoVe](https://github.com/stanfordnlp/GloVe) comes with different domain differences:- # # - **Common Crawl** (42B tokens, 1.9M vocab, uncased, 300d vectors, 1.75 GB download) # - **Common Crawl** (840B tokens, 2.2M vocab, cased, 300d vectors, 2.03 GB download) # - **Wikipedia 2014 + Gigaword 5**(6B tokens, 400K vocab, uncased, 300d vectors, 822 MB download) # - **Twitter** (2B tweets, 27B tokens, 1.2M vocab, uncased, 200d vectors, 1.42 GB download) # + colab={"base_uri": "https://localhost:8080/"} id="87-07Kq1OGCl" executionInfo={"status": "ok", "timestamp": 1616055627874, "user_tz": -360, "elapsed": 1328, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="7d9ecdda-d9c9-4d4b-da48-c82f9fca9a7a" import torchtext.vocab glove = torchtext.vocab.GloVe(name = '6B', dim = 100) print(f'There are {len(glove.itos)} words in the vocabulary') # + [markdown] id="JaBn5cZHEJoY" # As shown above, **there are 400,000 unique words** in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.** # # `glove.vectors is the actual tensor containing the values of the embeddings.` # + colab={"base_uri": "https://localhost:8080/"} id="NA7bMWI_amGe" executionInfo={"status": "ok", "timestamp": 1616055817009, "user_tz": -360, "elapsed": 1208, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="f27ae755-296f-47b5-ec52-a250a248097b" glove.vectors.shape # + [markdown] id="YFCUa5-zH75o" # We can see what word is associated with each row by checking the **itos (int to string)** list. We can also use the **stoi (string to int)** dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. # + colab={"base_uri": "https://localhost:8080/"} id="FGvd2XvIdDZq" executionInfo={"status": "ok", "timestamp": 1616055924184, "user_tz": -360, "elapsed": 3553, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="06290aad-2d74-464f-8640-b53f48443975" glove.itos[:10] # + colab={"base_uri": "https://localhost:8080/"} id="QBmvy8bXdSfM" executionInfo={"status": "ok", "timestamp": 1616055941615, "user_tz": -360, "elapsed": 990, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="2f25b528-9664-45a5-c2a3-e1c7455f924d" glove.stoi['the'] # + colab={"base_uri": "https://localhost:8080/"} id="3K5Xd5mPOami" executionInfo={"status": "ok", "timestamp": 1616055963841, "user_tz": -360, "elapsed": 937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="2b223bce-1246-4c21-c6a0-8738b091f70a" print(glove.vectors[glove.stoi['the']]) print(glove.vectors[glove.stoi['the']].shape) # + id="g1V3JwLbdZ_E" def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] # + colab={"base_uri": "https://localhost:8080/"} id="tnp-D7sefc1o" executionInfo={"status": "ok", "timestamp": 1616056217274, "user_tz": -360, "elapsed": 860, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="c1f243ab-3242-4ab2-b227-f26080e208af" print(get_vector(glove, 'dhaka')) print(get_vector(glove, 'dhaka').shape) # + [markdown] id="NnPjkO86SqW1" # ## Similar Contexts # # Now to start looking at the context of different words. # # If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary calculating the distance between the vector of each word and our input word vector. We then sort these from closest to furthest away. # # The function below returns the closest 10 words to an input word vector: # + id="WOQ168R6fgPQ" import torch def closest_words(embeddings, vector, n = 10): distances = [(word, torch.dist(vector, get_vector(embeddings, word)).item()) for word in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] # + [markdown] id="2fPjFFBVh3A8" # Let's try it out with 'dhaka'. The closest word is the word 'dhaka' itself (not very interesting), however all of the words are related in some way. # # Interestingly, we also get 'lahore' and 'karachi', implies that Bangladesh, and Pakistan are frequently talked about together in similar contexts. # # Moreover, other vectors are geographically situated near each other. # + colab={"base_uri": "https://localhost:8080/"} id="wu6LZp6Yhxml" executionInfo={"status": "ok", "timestamp": 1616056388013, "user_tz": -360, "elapsed": 4845, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="f2c1eba7-a5db-4e76-8e4b-14267045df93" word_vector = get_vector(glove, 'dhaka') closest_words(glove, word_vector) # + [markdown] id="LKXpxMeIi7_G" # Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India (geographically), but Thailand and Malaysia are closer. So why is Australia closer to India in vector space? This is most probably due to India and Australia appearing in the context of cricket matches together. # + colab={"base_uri": "https://localhost:8080/"} id="QcED47-mic9n" executionInfo={"status": "ok", "timestamp": 1616056562229, "user_tz": -360, "elapsed": 4478, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="7db9f5f2-15e0-4647-9bb9-fe1180fb9b27" word_vector = get_vector(glove, 'india') closest_words(glove, word_vector) # + colab={"base_uri": "https://localhost:8080/"} id="Dy1br1mQGHq5" executionInfo={"status": "ok", "timestamp": 1616057014838, "user_tz": -360, "elapsed": 4627, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="31e04b8e-9b98-44d0-82ea-80b32b4f2ac1" word_vector = get_vector(glove, 'google') closest_words(glove, word_vector) # + [markdown] id="_aBwPpmUxRUk" # ## Analogies # # Another property of word embeddings is that they can be operated on just as any standard vector and give interesting results. # + id="ItPm3L48xWVj" def analogy(embeddings, word1, word2, word3, n=4): #get vectors for each word word1_vector = get_vector(embeddings, word1) word2_vector = get_vector(embeddings, word2) word3_vector = get_vector(embeddings, word3) #calculate analogy vector analogy_vector = word2_vector - word1_vector + word3_vector #find closest words to analogy vector candidate_words = closest_words(embeddings, analogy_vector, n+3) #filter out words already in analogy candidate_words = [(word, dist) for (word, dist) in candidate_words if word not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words # + [markdown] id="Mx8I61GGx-4y" # <div align="center"> # <img src="https://drive.google.com/uc?id=12Kku3uSvqqaTya7trjkfy5EKU7pC9u2U" width="500"> # </div> # # + colab={"base_uri": "https://localhost:8080/"} id="vwAE_EzhxjRF" executionInfo={"status": "ok", "timestamp": 1616057278015, "user_tz": -360, "elapsed": 5629, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="eea7a61c-a829-479a-b23d-9929185afd83" print(analogy(glove, 'man', 'king', 'woman')) # + [markdown] id="39ZCvfp6yUhw" # If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is a queen! # + colab={"base_uri": "https://localhost:8080/"} id="N4LMvlaGylbo" executionInfo={"status": "ok", "timestamp": 1616057470689, "user_tz": -360, "elapsed": 4637, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="a4f77cd4-e36f-4b8f-ce83-60ed763147a8" print(analogy(glove, 'man', 'actor', 'woman')) # + colab={"base_uri": "https://localhost:8080/"} id="8F_bPvxNyvPi" executionInfo={"status": "ok", "timestamp": 1616057486213, "user_tz": -360, "elapsed": 4643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="1c3f2357-3937-4d89-e011-e4157da6365a" print(analogy(glove, 'india', 'delhi', 'bangladesh')) # + colab={"base_uri": "https://localhost:8080/"} id="RgQ5gUXEGWC7" executionInfo={"status": "ok", "timestamp": 1616057498516, "user_tz": -360, "elapsed": 4529, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="28bb43aa-a6f5-44bb-9b52-ac89d9a480da" print(analogy(glove, 'good', 'heaven', 'bad')) # + colab={"base_uri": "https://localhost:8080/"} id="bvTQLmNcGgr1" executionInfo={"status": "ok", "timestamp": 1616057512742, "user_tz": -360, "elapsed": 4722, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="1537f36b-0d1f-4159-e0a0-b13b7f7ef8db" print(analogy(glove, 'jordan', 'basketball', 'ronaldo')) # + colab={"base_uri": "https://localhost:8080/"} id="mkQ1Ns5WHIpC" executionInfo={"status": "ok", "timestamp": 1616057523033, "user_tz": -360, "elapsed": 4945, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="a5915cd2-db55-4c50-85b1-a14230b8f372" print(analogy(glove, 'paper', 'newspaper', 'screen')) # + [markdown] id="RcTR-Q-wz8Co" # ## Similarity operations on embeddings # + id="HL0AdBkKz9Ol" from scipy import spatial def cosineSim(word1, word2): vector1, vector2 = get_vector(glove, word1), get_vector(glove, word2) return 1 - spatial.distance.cosine(vector1, vector2) # + colab={"base_uri": "https://localhost:8080/"} id="Y-GiNcnb0t0S" executionInfo={"status": "ok", "timestamp": 1616057627047, "user_tz": -360, "elapsed": 976, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhiM4cOpMNCedgjDWZ9oHIKufBTjojcruiTtinn=s64", "userId": "11387451028968277735"}} outputId="7bc7159c-c8c7-4da7-b137-bd8eaa82f0fd" word_pairs = [ ('dog', 'cat'), ('tree', 'cat'), ('tree', 'leaf'), ('king', 'queen'), ] for word1, word2 in word_pairs: print(f'Similarity between "{word1}" and "{word2}":\t{cosineSim(word1, word2):.2f}') # + [markdown] id="10cb1WhyrXl_" # ### Need to learn embedding for your own corpus? # # #### Simplest Ans: Use [Gensim Library](https://radimrehurek.com/gensim/auto_examples/index.html#documentation) # # - [Word2Vec](https://radimrehurek.com/gensim/models/word2vec.html) # - [fastText](https://radimrehurek.com/gensim/auto_examples/tutorials/run_fasttext.html) # - [Doc2Vec](https://radimrehurek.com/gensim/auto_examples/tutorials/run_doc2vec_lee.html) # - [GloVe](https://nlp.stanford.edu/projects/glove/) # - [How is GloVe different from word2vec?](https://www.quora.com/How-is-GloVe-different-from-word2vec) # + [markdown] id="yF0-IdrzkLbQ" # # ### Job Related Topics - Part I [Optional] # # - Create a professional email address # - First name + last name = <EMAIL> # - First name . last name = <EMAIL> # - First name - last name = <EMAIL> # - First name . middle name . last name = <EMAIL> # - First name - middle name - last name = <EMAIL> # - First initial + last name = <EMAIL> # - First initial + middle name + last name = <EMAIL> # - First initial + middle initial + last name = <EMAIL> # - The shorter your email the better # - Complete your Linkedin profile # - Prepare a CV in Latex # - Seperate your contact number [personal vs professional] # - Create GitHub profile [Username may only contain alphanumeric characters or single hyphens, and cannot begin or end with a hyphen.] # - You can also use [desktop version of GitHub](https://desktop.github.com/). It's very easy to use without any commands! # - Build your website using [GitHub pages](https://pages.github.com/) # - [Great Templates! ](https://wowchemy.com/templates/) to use.
Class Notebooks/Lab 08 - Word Embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ex.07 - More Printing # ------ # # ### General Instructions # - Just type the code and make it run. # # ### Code Learned # ```python # var1 = x # var2 = y # var3 = z # var4 = w # var5 = t # print(var1 + var2 + var3, end = '') # print(var4 + var5) # print("Print something here {}.".format('in the end')) # ``` # # ### Math Operations & Other Symbols # # | Symbol | Symbol name |Pronunciation | What it does? | # |--------|:-----------:|-------------:|:-------------:| # | \"=\" |Equal to | Equals |Equality | # | \"+\" |Plus |Plus |Addition | # | \"*\" |Asterisk |Times |Multiplication | # | \"#\" |Hash|-|-| # # ### Function (LaTex) # # ------ # *<NAME>. (2017).* **Learn Python 3 The Hard Way** - A very simple introduction to the terrifyingly beautiful world of computers and code. Boston: Addison-Wesley. # + print("Mary had a little lamb.") print("Its fleece was white as {}.".format('snow')) print("And everywhere that Mary went.") print("." * 10) #what'd that do? end1 = "C" end2 = "h" end3 = "e" end4 = "e" end5 = "s" end6 = "e" end7 = "B" end8 = "u" end9 = "r" end10 = "g" end11 = "e" end12 = "r" # watch that comma at the end. Try removing it to see what happens print(end1 + end2 + end3 + end4 + end5 + end6, end = '') print(end7 + end8 + end9 + end10 + end11 + end12) # - # > **Study Drills** # > - Just apply what you've learned. # + print("Its fleece was {}, {} {} white as {}.".format('sweet', 'soft', 'and', 'snow')) print(end1 + end2 + end3 + end8, end = '') print(end7 + (end3 * 42)) var1 = end1 + (end8 * 42) print("Its fleece was {}, {} {} white as {}.".format('sweet', 'soft', 'and', var1)) # - var1 = 'x' var2 = 'y' var3 = 'z' var4 = 'w' var5 = 't' print(var1 + var2 + var3, end = '') print(var4 + var5) print("Print something here {}.".format('in the end'))
2017_SHAW/ex07_Jupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import os sys.path.append(os.path.expanduser('~/workspace/tacotron/')) import argparse from hparams import hparams import matplotlib.pyplot as plt import matplotlib.patches as mpatches from matplotlib import cm from matplotlib.colors import ListedColormap import numpy as np from PIL import Image from tacotron.alignment_synthesizer import AlignmentSynthesizer from tacotron.pml_synthesizer import Configuration, PMLSynthesizer import tensorflow as tf # - FIGSIZE = (10, 6) # + training_data_dir = '/media/josh/Store/tacotron-data/gran-lj-training' metadata_filename = os.path.join(training_data_dir, 'test.txt') cfg = Configuration(16000, 86) synth = PMLSynthesizer(cfg) # synth.load(checkpoint_path, hparams, gta=gta, model_name=args.variant) with open(metadata_filename, encoding='utf-8') as f: metadata = [line.strip().split('|') for line in f] hours = sum((int(x[2]) for x in metadata)) * hparams.frame_shift_ms / (3600 * 1000) print('Loaded metadata for %d examples (%.2f hours)' % (len(metadata), hours)) pml_features = [m[3] for m in metadata] texts = [m[5] for m in metadata] wav_files = [m[6] for m in metadata] # - TARGET_INDEX = 26 texts[TARGET_INDEX] def get_losses(checkpoint, key='model/stats/loss'): losses = [] # This example supposes that the events file contains summaries with a # summary value tag 'loss'. These could have been added by calling # `add_summary()`, passing the output of a scalar summary op created with # with: `tf.scalar_summary(['loss'], loss_tensor)`. summ_iter = tf.train.summary_iterator(checkpoint) # infinite loop while True: try: # get the next item element = next(summ_iter) # do something with element for v in element.summary.value: if v.tag == key: losses.append(v.simple_value) except tf.errors.DataLossError: pass except StopIteration: # if StopIteration is raised, break from loop break return losses tacotron_lj_pmlx_loss = [] tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1551261505.air208', key='model/stats/loss_pml') tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1551471596.air208', key='model/stats/loss_pml') tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1551904223.air208', key='model/stats/loss_pml') # tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1551977987.air208') # tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1557985364.air208') # tacotron_lj_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-lj-500k/events.out.tfevents.1557999848.air208') tacotron_lj_pmlx_loss = np.array(tacotron_lj_pmlx_loss) tacotron_lj_pmlx_loss.shape locsens_pmlx_loss = [] locsens_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-locsens-lj-150k/events.out.tfevents.1557999848.air208', key='model/stats/loss_pml') locsens_pmlx_loss += get_losses('/media/josh/Store/remote-logs/pmlx-locsens-lj-150k/events.out.tfevents.1558254576.air208', key='model/stats/loss_pml') locsens_pmlx_loss = np.array(locsens_pmlx_loss) locsens_pmlx_loss.shape pmlx_iter_counts = np.arange(0, tacotron_lj_pmlx_loss.size) * 100 locsens_iter_counts = np.arange(0, locsens_pmlx_loss.size) * 100 plt.figure(figsize=FIGSIZE) plt.plot(locsens_iter_counts, locsens_pmlx_loss) plt.plot(pmlx_iter_counts, tacotron_lj_pmlx_loss) plt.ylabel('Loss') plt.xlabel('Iteration Count') plt.title('Tacotron Conditioned on Vocoder Trajectories Loss Curve') plt.legend(['Tacotron PMLx with Location-Sensitive Attention Loss Curve', 'Tacotron PMLx Loss with Content-Based Attention Curve']) plt.ylim(0.04, 0.10) plt.show() np.mean(locsens_pmlx_loss[200:]), np.mean(tacotron_lj_pmlx_loss[200:]) def show_alignment(alignment, label, ax, cmap=cm.cool, cutoff=220, scale_factor=1, norm=False): # normalise the alignment to scale from 0 to 1 if norm: alignment = alignment / np.max(alignment) # set the background of the axis to black ax.set_facecolor('xkcd:black') cool = cmap(np.arange(cmap.N)) # Set alpha cool[:, -1] = np.linspace(0, 1, cmap.N) # Create new colormap cool = ListedColormap(cool) # generate the image im_obj = Image.fromarray(np.uint8(cool(alignment) * 255)) height, width = alignment.shape im_obj = im_obj.resize((round(width * scale_factor), height)) im_obj = np.array(im_obj)[:, :cutoff] im = ax.imshow( im_obj / 255, cmap=cmap, aspect='auto', origin='lower', interpolation='none') patch = mpatches.Patch(color=cool(0.8), label=label) xstep = 50 outputs_per_step = 5 frame_shift = 0.005 plt.xticks(np.arange(0, 250, step=xstep), np.arange(0, 250 * frame_shift * outputs_per_step, step=frame_shift * outputs_per_step * xstep)) plt.xlabel('Decoder Time (s)') plt.ylabel('Encoder Step (character index)') plt.tight_layout() return im, patch # + # fixed_sentence = 'by offering life pensions at full pay for federal judges on all courts who are willing to retire at seventy.' # fixed_sentence = 'Scientists at the CERN laboratory say they have discovered a new particle.' fixed_sentence = texts[TARGET_INDEX] # reset the graph before we do anything tf.reset_default_graph() synth = AlignmentSynthesizer() synth.load('/media/josh/Store/remote-logs/pmlx-lj-500k/model.ckpt-131000', hparams, model_name='tacotron_pml_x', locked_alignments=None) pmlx_alignment = synth.synthesize(fixed_sentence) # of shape (encoder_steps, decoder_steps) # reset the graph after the first synthesise call tf.reset_default_graph() synth.load('/media/josh/Store/remote-logs/pmlx-locsens-lj-150k/model.ckpt-109000', hparams, model_name='tacotron_pml_x_locsens', locked_alignments=None) locsens_alignment = synth.synthesize(fixed_sentence) # + fig, ax = plt.subplots(figsize=FIGSIZE) im, taco_pml_patch = show_alignment(locsens_alignment, 'Prediction Network with CBHG Correction and Location-Sensitive Attention Alignment', ax, cmap=cm.binary) _, taco_patch = show_alignment(pmlx_alignment, 'Prediction Network with CBHG Correction and Content-Based Attention Alignment', ax, cmap=cm.hot) ax.legend(handles=[taco_pml_patch, taco_patch], loc='upper left') # fig.colorbar(im, ax=ax) plt.savefig('./LocSensAttention/LocSensAlignment.png', dpi=600) plt.show() # - # reset the graph after the second synthesise call tf.reset_default_graph() synth.load('/media/josh/Store/remote-logs/post-locsens-lj-500k/model.ckpt-248000', hparams, model_name='tacotron_pml_locsens', locked_alignments=None) postnet_alignment = synth.synthesize(fixed_sentence) # + fig, ax = plt.subplots(figsize=FIGSIZE) _, taco_pml_postnet_patch = show_alignment(postnet_alignment, 'Prediction Network with Residual Prediction and Location-Sensitive Attention Alignment', ax, cmap=cm.binary, norm=True) # taco_patch = show_alignment(second_alignment, 'Tacotron Alignment', ax, cmap=cm.Wistia, scale_factor=12.5 / 5) # taco_pmlx_patch = show_alignment(third_alignment, 'Tacotron PML Extended Alignment', ax, cmap=cm.Wistia) ax.legend(handles=[taco_pml_postnet_patch], loc='upper left') plt.savefig('./LocSensAttention/PostnetLocsensAlignment.png', dpi=600) plt.show() # + from tacotron.pml_synthesizer import Configuration, PMLSynthesizer from lib import sigproc as sp from util import audio # reset the graph before we do anything tf.reset_default_graph() cfg = Configuration(16000, 86) pml_synth = PMLSynthesizer(cfg) pml_synth.load('/media/josh/Store/remote-logs/pmlx-lj-500k/model.ckpt-131000', hparams, model_name='tacotron_pml_x') wav_outputs = pml_synth.synthesize([fixed_sentence], to_wav=True) wav = wav_outputs[0] wav = wav[:audio.find_endpoint(wav)] sp.wavwrite('./LocSensAttention/pmlx_output.wav', wav, 16000, norm_max_ifneeded=True) # reset the graph before we do anything tf.reset_default_graph() pml_synth.load('/media/josh/Store/remote-logs/pmlx-locsens-lj-150k/model.ckpt-109000', hparams, model_name='tacotron_pml_x_locsens') wav_outputs = pml_synth.synthesize([fixed_sentence], to_wav=True) wav = wav_outputs[0] wav = wav[:audio.find_endpoint(wav)] sp.wavwrite('./LocSensAttention/locsens_output.wav', wav, 16000, norm_max_ifneeded=True) # - wav = wav_outputs[0] wav = wav[:audio.find_endpoint(wav)] sp.wavwrite('./LocSensAttention/locsens_output.wav', wav, 16000, norm_max_ifneeded=True)
notebooks/.ipynb_checkpoints/LocSensAttention-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Challenge 01: Diagonal Unitary Circuit Composer # # Write a program that takes as an input a diagonal unitary matrix, and returns a corresponding quantum circuit. For example, given the unitary matrix $\Lambda \in \mathbb{R}^{4\times4}$ shown below, the composer would synthesize the following circuit: # # <img src="../images/composer_cir.png" width = 750/> # # For this problem, you are not restricted to the use of a particular set of gates. Since different equivalent circuits can have the same unitary operator associated with them, your circuit for $\Lambda$ wouldn't have to necessarily look the same as the one above. For example, depending on your choice of gates, the same unitary $\Lambda$ above could result in circuits with more (or less) number of gates, like the ones shown below: # # <img src="../images/composer_cir2.png" width = 680/> # # We have broken down this challenge in three levels of difficulty. You don't have to work on these separately nor complete all three of them; If you have an idea how to tackle Level 3, you can just go for it, but if not, solving first two first should help you get started. # # **Level 1:** The composer should return a circuit for any unitary matrix $\Lambda \in \mathbb{R}^{4\times4}$. (i.e., $4 \times 4$ matrices with only real entries) # # **Level 2:** The composer should return a circuit for any unitary matrix $\Lambda \in \mathbb{R}^{N\times N}$. (i.e., matrices of arbitrary size $N = 2^n$ with only real entries) # # **Level 3:** The composer should return a circuit for any unitary matrix $\Lambda \in \mathbb{C}^{N\times N}$. (i.e., matrices of arbitrary size $N = 2^n$ with complex entries) # # **Bonus:** The composer should return circuits consisting of only one-qubit and two-qubit gates. The bonus applies to solutions in levels 2 and 3. # # The idea behind this challenge is for you to develop an understanding of how circuits can be built out of unitaries, so avoid using functions that do this for you automatically, like the `transpile` function in Qiskit, or the `decompose` function in Cirq. # # **Hint:** If you have trouble getting started, it might be helpful to know that this problem is closely related to the design of Oracles for Grover's Algorithm [1]. # # [1] Figgatt, Caroline, et al. "Complete 3-Qubit Grover search on a programmable quantum computer." Nature communications 8.1 (2017): 1-9. [arXiv:1703.10535](https://arxiv.org/pdf/1703.10535.pdf) # # Level 1 # My solution is likely going to be the most naive of all submitted. I've chosen to stay away from looking at any references. Never having tried this before I wanted to work this out empirically. The result will most definitely be suboptimal. import numpy as np import itertools import ipywidgets as widgets # All unitary matrices, $U$, must have $|\det(U)| = 1$ Λ = np.diag([-1, 1, 1, 1]) print(Λ) np.linalg.norm(np.linalg.det(Λ)) from qiskit import QuantumCircuit from qiskit.extensions import UnitaryGate ug = UnitaryGate(Λ) ug.to_matrix() # Just for reference, let me see the form of what Qiskit does qc = QuantumCircuit(2) qc.append(ug, range(qc.num_qubits)) qc.decompose().draw() import qiskit.quantum_info as qi sv = qi.Statevector([1, 0, 0, 0]) sv.evolve(qi.Operator(qc)) # Let me start with a 2x2 matrix to gain some intuition # + show_all = True all_correct = True for diag in list(itertools.product([1, -1], repeat=2)): correct = True out = widgets.Output() with out: Λ = np.diag(diag) qc = QuantumCircuit(1) print("Qiskit composition") qc.unitary(Λ, range(qc.num_qubits)) display(qc.decompose().draw()) ug = UnitaryGate(qi.Operator(qc)) um = ug.to_matrix() print(um) print() dc = QuantumCircuit(1) for i,el in enumerate(diag): if el == -1: if i == 0: dc.x(0) dc.z(0) if i == 0: dc.x(0) print("Unitary composition") display(dc.decompose().draw()) dug = UnitaryGate(qi.Operator(dc)) dm = dug.to_matrix() print(dm) correct = np.allclose(um, dm) if correct: print("Compositions equal ✅") else: all_correct = False print("Compositions differ ❌") print('-' * 30) if not correct or show_all: display(out) if all_correct: print("All compositions equal ✅") # - # Okay, let me try a 4x4 now # + show_all = True all_correct = True for diag in list(itertools.product([1, -1], repeat=4)): correct = True out = widgets.Output() with out: Λ = np.diag(diag) qc = QuantumCircuit(2) print("Qiskit composition") qc.unitary(Λ, range(qc.num_qubits)) display(qc.decompose().draw()) ug = UnitaryGate(qi.Operator(qc)) um = ug.to_matrix() print(um) print() dc = QuantumCircuit(2) for i,el in enumerate(diag): if el == -1: if i == 0: dc.x(0) dc.z(0) dc.cz(0, 1) dc.x(0) if i == 1: dc.x(1) dc.cz(0, 1) dc.x(1) elif i == 2: dc.x(0) dc.cz(0, 1) dc.x(0) elif i == 3: dc.cz(0, 1) print("Unitary composition") display(dc.decompose().draw()) dug = UnitaryGate(qi.Operator(dc)) dm = dug.to_matrix() print(dm) correct = np.allclose(um, dm) if correct: print("Compositions equal ✅") else: all_correct = False print("Compositions differ ❌") print('-' * 30) if not correct or show_all: display(out) if all_correct: print("All compositions equal ✅") # - # # Level 2 # Okay, so I got a 4x4 working empirically, but I need to figure out something more general now. As we grow to more qubits the number of permutations grows considerably, so we will need to start random sampling. However, we'll verify all of the permutations for 2 and 3 qubits. # # NOTE: I went through many permutations on this below and am only leaving the final result. I had a general solution for 2 qubits, but as soon as I went to 4 qubits I started to have problems. Then, I realized that this was similar to loading a qram and that it was necessary to think about it as "addressing" a specific element in the diagonal matrix. # + import random def sample_diagonals(N, samplesize): results = [] while samplesize > 0: v = [] while len(v) < N: if random.randint(0, 1) == 0: v.append(-1) else: v.append(1) if v not in results: results.append(v) samplesize -= 1 return results def validate_composition(num_qubits, max_samples=16): def bitfield(n, max_bits): return [int(digit) for digit in f'{{0:0{max_bits}b}}'.format(n)] show_all = False all_correct = True print_last = True N = 2**num_qubits total_count = 2**N if total_count < max_samples: diagonals = list(itertools.product([1, -1], repeat=N)) else: diagonals = sample_diagonals(N, max_samples) print(f"Validating {len(diagonals)} random samples from {total_count} total") for d, diag in enumerate(diagonals): correct = True out = widgets.Output() with out: dc = QuantumCircuit(num_qubits) for i,el in enumerate(diag): if el == -1: for j, a in enumerate(reversed(bitfield(i, num_qubits))): if not a: dc.x(j) if num_qubits == 1: dc.z(0) else: # equivalent to an mcz dc.h(num_qubits - 1) dc.mcx(list(range(num_qubits - 1)), num_qubits - 1) dc.h(num_qubits - 1) for j, a in enumerate(reversed(bitfield(i, num_qubits))): if not a: dc.x(j) print("Unitary composition") print(diag) display(dc.draw()) dug = UnitaryGate(qi.Operator(dc)) dm = dug.to_matrix().diagonal() correct = np.allclose(diag, dm) if correct: print("Compositions equal ✅") else: print(dm) all_correct = False print("Compositions differ ❌") print('-' * 30) should_print_last = (print_last and d == len(diagonals) - 1) if not correct or show_all or should_print_last: display(out) if should_print_last: print(f"👆 Showing last sample for inspection") if all_correct: print("All compositions equal ✅") limit_samples = 256 for qubit_count in list(range(1,5+1)): print(f"Testing compositions for {qubit_count} qubits ({2**qubit_count}x{2**qubit_count} matrices)") validate_composition(qubit_count, limit_samples) limit_samples >>= 1 print() print('#' * 60) print() # - # Okay, I fairly confident about this although I haven't exhaustively tested all permutations up through and past 5 qubits. # # Level 3 # Now that we're dealing with complex entries, let me start small again and build up. For starters, I don't think all permutations of diagonals with complex values are unitary. So, I think I'll want to strip those out of the set. However, I'm curious about diagonals that only have a single imaginary. Are those still unitary? Λ = np.diag([-1, 1, 1, 1j]) print(Λ) print(np.linalg.norm(np.linalg.det(Λ))) # Okay, so I guess I just need to expand permutations to include _i_ and _-i_. # + def sample_complex_diagonals(N, samplesize): results = [] while samplesize > 0: v = [] while len(v) < N: choice = random.randint(0, 4) if choice == 0: v.append(-1) elif choice == 1: v.append(1) elif choice == 2: v.append(1j) else: v.append(0-1j) if v not in results: results.append(v) samplesize -= 1 return results def validate_composition(num_qubits, max_samples=16): def bitfield(n, max_bits): return [int(digit) for digit in f'{{0:0{max_bits}b}}'.format(n)] show_all = False all_correct = True print_last = True N = 2**num_qubits total_count = 2**N if total_count < max_samples: diagonals = list(itertools.product([1, -1, 1j, 0-1j], repeat=N)) else: diagonals = sample_complex_diagonals(N, max_samples) print(f"Validating {len(diagonals)} random samples from {total_count} total") for d, diag in enumerate(diagonals): correct = True out = widgets.Output() with out: dc = QuantumCircuit(num_qubits) for i,el in enumerate(diag): if el != 1: for j, a in enumerate(reversed(bitfield(i, num_qubits))): if not a: dc.x(j) # instead of z gates, let's now use phase gates to allow for arbitrary phase if num_qubits == 1: dc.p(np.angle(el), 0) else: dc.mcp(np.angle(el), list(range(num_qubits - 1)), num_qubits - 1) for j, a in enumerate(reversed(bitfield(i, num_qubits))): if not a: dc.x(j) print("Unitary composition") print(diag) display(dc.draw()) dug = UnitaryGate(qi.Operator(dc)) dm = dug.to_matrix().diagonal() correct = np.allclose(diag, dm) if correct: print("Compositions equal ✅") else: print(dm) all_correct = False print("Compositions differ ❌") print('-' * 30) should_print_last = (print_last and d == len(diagonals) - 1) if not correct or show_all or should_print_last: display(out) if should_print_last: print(f"👆 Showing last sample for inspection") if all_correct: print("All compositions equal ✅") limit_samples = 256 for qubit_count in list(range(1,5+1)): print(f"Testing compositions for {qubit_count} qubits ({2**qubit_count}x{2**qubit_count} matrices)") validate_composition(qubit_count, limit_samples) limit_samples >>= 1 print() print('#' * 60) print() # - # Well, from the looks of it the above seems to be a solution. The only thing I'm not completely sure on is whether I am missing permutations of complex-valued diagonals that are still unitary. However, even in that case I think the above solution still works, albeit non-optimally.
challenge-2020.11-nov/amirebrahimi/qosf-monthly-challenge-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Yelp Review Data Project # > analysis on Yelp dataset # # - Repo : [YelpReviews](https://github.com/yennanliu/YelpReviews) # - Presentation : [Presentation](https://github.com/yennanliu/YelpReviews/blob/master/doc/YelpReview_DS_demo.pdf) # - Visualization : [redash_dashboard](https://app.redash.io/yen_dev/public/dashboards/xpfG9wKgb9qEcMHlGiSpjhFn8dCZuun8XbCN52GN) # - Dataset : [yelp-dataset](https://www.kaggle.com/yelp-dataset/yelp-dataset) # # > youtube: q2TE0oR6KNI # ## Intro # - Build a POC end-to-end BI app that mining the interest from Kaggle yelp dataset. # - This dataset is a subset of Yelp's businesses, reviews, and user data. It was originally put together for the Yelp Dataset Challenge which is a chance for students to conduct research or analysis on Yelp's data and share their discoveries. In the dataset you'll find information about businesses across 11 metropolitan areas in four countries. # # ## Process # - Step1 : data collect # - Step 2 : data process # - Step 3 : db modeling # - Step 4 : data storage # - Step 5 : ETL # - Step 6 : data analysis / ML # - Step 7 : data visualization # # ## Project focus # - database modeling / schema design (per business understanding, use cases) # - data process # - analysis (think about how to leverage the data if as a Yelp PM) # - framework design logic (why this database, why this schema, why this BI tool..) # # ## Demo # - [redash_dashboard](https://app.redash.io/yen_dev/public/dashboards/xpfG9wKgb9qEcMHlGiSpjhFn8dCZuun8XbCN52GN) # - [Presentation](https://github.com/yennanliu/YelpReviews/blob/master/doc/YelpReview_DS_demo.pdf) #
_notebooks/2020-06-05-yelp-review.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda3 # language: python # name: conda3 # --- import sys sys.path.append(os.path.join(os.path.abspath(os.path.join('../..')), 'src')) from importlib import reload import numpy import scipy from scipy import sparse # ## Test Network(s) karate_club_raw = """ [2 1] [3 1] [3 2] [4 1] [4 2] [4 3] [5 1] [6 1] [7 1] [7 5] [7 6] [8 1] [8 2] [8 3] [8 4] [9 1] [9 3] [10 3] [11 1] [11 5] [11 6] [12 1] [13 1] [13 4] [14 1] [14 2] [14 3] [14 4] [17 6] [17 7] [18 1] [18 2] [20 1] [20 2] [22 1] [22 2] [26 24] [26 25] [28 3] [28 24] [28 25] [29 3] [30 24] [30 27] [31 2] [31 9] [32 1] [32 25] [32 26] [32 29] [33 3] [33 9] [33 15] [33 16] [33 19] [33 21] [33 23] [33 24] [33 30] [33 31] [33 32] [34 9] [34 10] [34 14] [34 15] [34 16] [34 19] [34 20] [34 21] [34 23] [34 24] [34 27] [34 28] [34 29] [34 30] [34 31] [34 32] [34 33] """ # + ii = [] jj = [] karate_club_raw = ' '.join(karate_club_raw.split('\n')).strip() entries = karate_club_raw.split('] [') if len(entries)==1: i,j = entries[0][1:-1].split() ii.append(int(i)) jj.append(int(j)) else: # 1st entry i,j = entries[0][1:].split() ii.append(int(i)) jj.append(int(j)) # Middle entries for entry in entries[1:-1]: i,j = entry.split() ii.append(int(i)) jj.append(int(j)) ii.append(int(j)) jj.append(int(i)) # Last entry i,j = entries[-1][:-1].split() ii.append(int(i)) jj.append(int(j)) data = [1 for _ in range(len(ii))] ii = [i - 1 for i in ii] jj = [j - 1 for j in jj] # - sub_A = sparse.coo_matrix((data, (ii,jj)), shape=(34,34)) sub_A = sub_A.tocsc() sub_A.shape sub_A.sum() / sub_A.shape[0] ** 2 n = sub_A.shape[0] # ## Test Some Stuff import graph_tools def spanSpace(n, max_groups): """ Randomly assigns nodes to a random number of groups (less than or equal to 'max_groups') :type n: int :param n: number of nodes in the graph :type max_groups: numeric :param max groups: indicates the maximum number of groups the nodes are to be split into """ groups_index = numpy.random.randint(1, high=numpy.random.randint(2, max_groups + 1) + 1, size=n) groups_index = groups_index.reshape((n,)) return(groups_index) # Define random groups g = ((numpy.random.rand(sub_A.shape[0]) > 0.5) * 1).reshape((n,)) # row ndarray # + n = sub_A.shape[0] g = numpy.ones(n).reshape((n,)) while len(numpy.unique(g)) < 5: g = spanSpace(n, 6) # - indx = numpy.array(range(n)).reshape((n,)) indx = [i for i in range(n)] L = 1 allow_make_new = True groups, score = graph_tools.mKL(g, sub_A, indx, L, allow_make_new=False, verbose=True) score # ## Test Spectral Code On Karate import spectral_partition reload(spectral_partition) grps, counts, hist = spectral_partition.spectralGraphPartition23(sub_A, Bin='bNG', finetune=False) counts grps
notebooks/explore/Graph Tools Primer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Factory Planning II # # ## Objective and Prerequisites # # Both this model and Factory Planning I are examples of production planning problems. In production planning problems, choices must be made about which products to produce, how much of those products to produce, and what resources to use in order to maximize profits or minimize costs, while meeting a range of constraints. These problems are common across a broad range of manufacturing situations. # # ## What You Will Learn # # In this particular example, we’ll model and solve a production planning problem: During each period we can manufacture a range of products. Each of the products requires a different amount of time to manufacture on different machines, and yields a different profit. The aim is to create an optimal multi-period production plan to maximize the profit. In contrast to the Factory Planning I example, in this example the optimization model will choose the month in which each machine is down for maintenance. There is an upper limit on the sales of each product in each month due to market limitations and the storage capacity is also restricted. # # More information on this type of model can be found in example # 4 of the fifth edition of Modeling Building in Mathematical Programming by <NAME> on pages 256 and 302-303. # # This modeling example is at the intermediate level, where we assume that you know Python and are familiar with the Gurobi Python API. In addition, you should have some knowledge about building mathematical optimization models. # # **Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*. # # --- # ## Problem Description # # A factory makes seven products (Prod 1 to Prod 7) using a range of machines including: # # - Four grinders # - Two vertical drills # - Three horizontal drills # - One borer # - One planer # # Each product has a defined profit contribution per unit sold (defined as the sales price per unit minus the cost of raw materials). In addition, the manufacturing of each product requires a certain amount of time on each machine (in hours). The profit and manufacturing time value are shown below. A dash indicates that the manufacturing process for the given product does not require that machine. # # | <i></i> | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 | # | --- | --- | --- | --- | --- | --- | --- | --- | # | Profit | 10 | 6 | 8 | 4 | 11 | 9 | 3 | # | Grinding | 0.5 | 0.7 | - | - | 0.3 | 0.2 | 0.5 | # | Vertical Drilling | 0.1 | 0.2 | - | 0.3 | - | 0.6 | - | # | Horizontal Drilling | 0.2 | - | 0.8 | - | - | - | 0.6 | # | Boring | 0.05 | 0.03 | - | 0.07 | 0.1 | - | 0.08 | # | Planning | - | - | 0.01 | - | 0.05 | - | 0.05 | # # Instead of pre-defining a maintenance schedule for the machines, as was done in Factory Planning I, in this version of the model we will also optimize the maintenance schedule. # # The maintenance requirements are as follows: # # - Each machine must be down for maintenance in one month of the six. # - The exception to the above are the grinding machines as only two of them need to be down during the six months. # # There are limitations on how many of each product can be sold in a given month. These limits are shown below: # # | Month | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 | # | --- | --- | --- | --- | --- | --- | --- | --- | # | January | 500 | 1000 | 300 | 300 | 800 | 200 | 100 | # | February | 600 | 500 | 200 | 0 | 400 | 300 | 150 | # | March | 300 | 600 | 0 | 0 | 500 | 400 | 100 | # | April | 200 | 300 | 400 | 500 | 200 | 0 | 100 | # | May | 0 | 100 | 500 | 100 | 1000 | 300 | 0 | # | June | 500 | 500 | 100 | 300 | 1100 | 500 | 60 | # # Up to 100 units of each product may be stored in inventory at a cost of $0.50 per unit per month. At the start of January, there is no product inventory. However, by the end of June, there should be 50 units of each product in inventory. # # The factory produces products six days a week using two eight-hour shifts per day. It may be assumed that each month consists of 24 working days. Also, for the purposes of this model, there are no production sequencing issues that need to be taken into account. # # What should the production and maintenance plans look like? Also, is it possible to recommend any price increases and determine the value of acquiring any new machines? # # This problem is based on a larger model built for the Cornish engineering company of <NAME>. # # --- # ## Model Formulation # # ### Sets and Indices # # $t \in \text{Months}=\{\text{Jan},\text{Feb},\text{Mar},\text{Apr},\text{May},\text{Jun}\}$: Set of months. # # $p \in \text{Products}=\{1,2,\dots,7\}$: Set of products. # # $m \in \text{Machines}=\{\text{Grinder},\text{VertDrill},\text{horiDrill},\text{Borer},\text{Planer}\}$: Set of machines. # # ### Parameters # # $\text{hours_per_month} \in \mathbb{R}^+$: Time (in hours/month) available at any machine on a monthly basis. It results from multiplying the number of working days (24 days) by the number of shifts per day (2) by the duration of a shift (8 hours). # # $\text{max_inventory} \in \mathbb{N}$: Maximum number of units of a single product type that can be stored in inventory at any given month. # # $\text{holding_cost} \in \mathbb{R}^+$: Monthly cost (in USD/unit/month) of keeping in inventory a unit of any product type. # # $\text{store_target} \in \mathbb{N}$: Number of units of each product type to keep in inventory at the end of the planning horizon. # # $\text{profit}_p \in \mathbb{R}^+$: Profit (in USD/unit) of product $p$. # # $\text{installed}_m \in \mathbb{N}$: Number of machines of type $m$ installed in the factory. # # $\text{down_req}_{m} \in \mathbb{N}$: Number of machines of type $m$ that should be scheduled for maintenance at some point in the planning horizon. # # $\text{time_req}_{m,p} \in \mathbb{R}^+$: Time (in hours/unit) needed on machine $m$ to manufacture one unit of product $p$. # # $\text{max_sales}_{t,p} \in \mathbb{N}$: Maximum number of units of product $p$ that can be sold at month $t$. # # ### Decision Variables # # $\text{make}_{t,p} \in \mathbb{R}^+$: Number of units of product $p$ to manufacture at month $t$. # # $\text{store}_{t,p} \in [0, \text{max_inventory}] \subset \mathbb{R}^+$: Number of units of product $p$ to store at month $t$. # # $\text{sell}_{t,p} \in [0, \text{max_sales}_{t,p}] \subset \mathbb{R}^+$: Number of units of product $p$ to sell at month $t$. # # $\text{repair}_{t,m} \in \{0,1,\dots, \text{down_req}_m\} \subset \mathbb{N}$: Number of machines of type $m$ scheduled for maintenance at month $t$. # # **Assumption:** We can produce fractional units. # # ### Objective Function # # - **Profit:** Maximize the total profit (in USD) of the planning horizon. # # \begin{equation} # \text{Maximize} \quad Z = \sum_{t \in \text{Months}}\sum_{p \in \text{Products}} # (\text{profit}_p*\text{make}_{t,p} - \text{holding_cost}*\text{store}_{t,p}) # \tag{0} # \end{equation} # # ### Constraints # # - **Initial Balance:** For each product $p$, the number of units produced should be equal to the number of units sold plus the number stored (in units of product). # # \begin{equation} # \text{make}_{\text{Jan},p} = \text{sell}_{\text{Jan},p} + \text{store}_{\text{Jan},p} \quad \forall p \in \text{Products} # \tag{1} # \end{equation} # # - **Balance:** For each product $p$, the number of units produced in month $t$ and previously stored should be equal to the number of units sold and stored in that month (in units of product). # # \begin{equation} # \text{store}_{t-1,p} + \text{make}_{t,p} = \text{sell}_{t,p} + \text{store}_{t,p} \quad \forall (t,p) \in \text{Months} \setminus \{\text{Jan}\} \times \text{Products} # \tag{2} # \end{equation} # # - **Inventory Target:** The number of units of product $p$ kept in inventory at the end of the planning horizon should hit the target (in units of product). # # \begin{equation} # \text{store}_{\text{Jun},p} = \text{store_target} \quad \forall p \in \text{Products} # \tag{3} # \end{equation} # # - **Machine Capacity:** Total time used to manufacture any product at machine type $m$ cannot exceed its monthly capacity (in hours). # # \begin{equation} # \sum_{p \in \text{Products}}\text{time_req}_{m,p}*\text{make}_{t,p} \leq \text{hours_per_month}*(\text{installed}_m - \text{repair}_{t,m}) \quad \forall (t,m) \in \text{Months} \times \text{Machines} # \tag{4} # \end{equation} # # - **Maintenance**: The number of machines of type $m$ scheduled for maintenance should meet the requirement. # # \begin{equation} # \sum_{t \in \text{Months}}\text{repair}_{t,m} = \text{down_req}_m \quad \forall m \in \text{Machines} # \tag{5} # \end{equation} # # --- # ## Python Implementation # # We import the Gurobi Python Module and other Python libraries. # + import numpy as np import pandas as pd import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 # - # ## Input Data # We define all the input data of the model. # + # Parameters products = ["Prod1", "Prod2", "Prod3", "Prod4", "Prod5", "Prod6", "Prod7"] machines = ["grinder", "vertDrill", "horiDrill", "borer", "planer"] months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"] profit = {"Prod1":10, "Prod2":6, "Prod3":8, "Prod4":4, "Prod5":11, "Prod6":9, "Prod7":3} time_req = { "grinder": { "Prod1": 0.5, "Prod2": 0.7, "Prod5": 0.3, "Prod6": 0.2, "Prod7": 0.5 }, "vertDrill": { "Prod1": 0.1, "Prod2": 0.2, "Prod4": 0.3, "Prod6": 0.6 }, "horiDrill": { "Prod1": 0.2, "Prod3": 0.8, "Prod7": 0.6 }, "borer": { "Prod1": 0.05,"Prod2": 0.03,"Prod4": 0.07, "Prod5": 0.1, "Prod7": 0.08 }, "planer": { "Prod3": 0.01,"Prod5": 0.05,"Prod7": 0.05 } } # number of each machine available installed = {"grinder":4, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1} # number of machines that need to be under maintenance down_req = {"grinder":2, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1} # market limitation of sells max_sales = { ("Jan", "Prod1") : 500, ("Jan", "Prod2") : 1000, ("Jan", "Prod3") : 300, ("Jan", "Prod4") : 300, ("Jan", "Prod5") : 800, ("Jan", "Prod6") : 200, ("Jan", "Prod7") : 100, ("Feb", "Prod1") : 600, ("Feb", "Prod2") : 500, ("Feb", "Prod3") : 200, ("Feb", "Prod4") : 0, ("Feb", "Prod5") : 400, ("Feb", "Prod6") : 300, ("Feb", "Prod7") : 150, ("Mar", "Prod1") : 300, ("Mar", "Prod2") : 600, ("Mar", "Prod3") : 0, ("Mar", "Prod4") : 0, ("Mar", "Prod5") : 500, ("Mar", "Prod6") : 400, ("Mar", "Prod7") : 100, ("Apr", "Prod1") : 200, ("Apr", "Prod2") : 300, ("Apr", "Prod3") : 400, ("Apr", "Prod4") : 500, ("Apr", "Prod5") : 200, ("Apr", "Prod6") : 0, ("Apr", "Prod7") : 100, ("May", "Prod1") : 0, ("May", "Prod2") : 100, ("May", "Prod3") : 500, ("May", "Prod4") : 100, ("May", "Prod5") : 1000, ("May", "Prod6") : 300, ("May", "Prod7") : 0, ("Jun", "Prod1") : 500, ("Jun", "Prod2") : 500, ("Jun", "Prod3") : 100, ("Jun", "Prod4") : 300, ("Jun", "Prod5") : 1100, ("Jun", "Prod6") : 500, ("Jun", "Prod7") : 60, } holding_cost = 0.5 max_inventory = 100 store_target = 50 hours_per_month = 2*8*24 # - # ## Model Deployment # We create a model and the variables. We set the UpdateMode parameter to 1 (which simplifies the code – see the documentation for more details). For each product (seven kinds of products) and each time period (month), we will create variables for the amount of which products will get manufactured, held, and sold. In each month, there is an upper limit on the amount of each product that can be sold. This is due to market limitations. For each type of machine and each month we create a variable d, which tells us how many machines are down in this month of this type. # + factory = gp.Model('Factory Planning II') make = factory.addVars(months, products, name="Make") # quantity manufactured store = factory.addVars(months, products, ub=max_inventory, name="Store") # quantity stored sell = factory.addVars(months, products, ub=max_sales, name="Sell") # quantity sold repair = factory.addVars(months, machines, vtype=GRB.INTEGER, ub=down_req, name="Repair") # number of machines down # - # Next, we insert the constraints. # The balance constraints ensure that the amount of product that is in the storage in the prior month and the amount that gets manufactured equals the amount that is sold and held for each product in the current month. This ensures that all products in the model are manufactured in some month. The initial storage is empty. # + #1. Initial Balance Balance0 = factory.addConstrs((make[months[0], product] == sell[months[0], product] + store[months[0], product] for product in products), name="Initial_Balance") #2. Balance Balance = factory.addConstrs((store[months[months.index(month) -1], product] + make[month, product] == sell[month, product] + store[month, product] for product in products for month in months if month != months[0]), name="Balance") # - # The endstore constraints force that at the end of the last month the storage contains the specified amount of each product. #3. Inventory Target TargetInv = factory.addConstrs((store[months[-1], product] == store_target for product in products), name="End_Balance") # The capacity constraints ensure that for each month the time all products require on a certain kind of machine is lower or equal than the available hours for that machine in that month multiplied by the number of available machines in that month. Each product requires some machine hours on different machines. Each machine is down in one or more months due to maintenance, so the number and types of available machines varies per month. There can be multiple machines per machine type. #4. Machine Capacity MachineCap = factory.addConstrs((gp.quicksum(time_req[machine][product] * make[month, product] for product in time_req[machine]) <= hours_per_month * (installed[machine] - repair[month, machine]) for machine in machines for month in months), name = "Capacity") # The maintenance constraints ensure that the specified number and types of machines are down due maintenance in some month. Which month a machine is down is now part of the optimization. # + #5. Maintenance Maintenance = factory.addConstrs((repair.sum('*', machine) == down_req[machine] for machine in machines), "Maintenance") # - # The objective is to maximize the profit of the company, which consists of the profit for each product minus cost for storing the unsold products. This can be stated as: # + #0. Objective Function obj = gp.quicksum(profit[product] * sell[month, product] - holding_cost * store[month, product] for month in months for product in products) factory.setObjective(obj, GRB.MAXIMIZE) # - # Next, we start the optimization and Gurobi finds the optimal solution. factory.optimize() # --- # ## Analysis # # The result of the optimization model shows that the maximum profit we can achieve is $\$108,855.00$. This is an increase of $\$15,139.82$ over the course of six months compared to the Factory Planning I example as a result of being able to pick the maintenance schedule as opposed to having a fixed one. Let's see the solution that achieves that optimal result. # # ### Production Plan # This plan determines the amount of each product to make at each period of the planning horizon. For example, in February we make 600 units of product Prod1. # + rows = months.copy() columns = products.copy() make_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in make.keys(): if (abs(make[month, product].x) > 1e-6): make_plan.loc[month, product] = np.round(make[month, product].x, 1) make_plan # - # ### Sales Plan # This plan defines the amount of each product to sell at each period of the planning horizon. For example, in February we sell 600 units of product Prod1. # + rows = months.copy() columns = products.copy() sell_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in sell.keys(): if (abs(sell[month, product].x) > 1e-6): sell_plan.loc[month, product] = np.round(sell[month, product].x, 1) sell_plan # - # ### Inventory Plan # This plan reflects the amount of product in inventory at the end of each period of the planning horizon. For example, at the end of February we have zero units of Prod1 in inventory. # + rows = months.copy() columns = products.copy() store_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in store.keys(): if (abs(store[month, product].x) > 1e-6): store_plan.loc[month, product] = np.round(store[month, product].x, 1) store_plan # - # ### Maintenance Plan # This plan shows the maintenance plan for each period of the planning horizon. For example, 2 machines of type grinder will be down for maintenance in April. # + rows = months.copy() columns = machines.copy() repair_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, machine in repair.keys(): if (abs(repair[month, machine].x) > 1e-6): repair_plan.loc[month, machine] = repair[month, machine].x repair_plan # - # **Note:** If you want to write your solution to a file, rather than print it to the terminal, you can use the model.write() command. An example implementation is: # # `factory.write("factory-planning-2-output.sol")` # # --- # ## References # # <NAME>, Model Building in Mathematical Programming, fifth edition. # # Copyright &copy; 2020 Gurobi Optimization, LLC
Gurobi/Gurobi-modeling-examples-master/factory_planning_1_2/factory_planning_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Quantum Key Distribution # # Quantum key distribution is the process of distributing cryptographic keys between parties using quantum methods. Due to the unique properties of quantum information compared to classical, the security of a key can be guarunteed (as any unwelcomed measurement would change the state of quantum information transmitted). # # In this file, we see the use of SeQUeNCe to simulate quantum key distribution between two adjacent nodes. The first example demonstrates key distribution alone (using the BB84 protocol), while the second example demonstrates additional error correction with the cascade protocol. The network topology, including hardware components, is shown below: # # <img src="./notebook_images/QKD_topo.png" width="500"/> # ## Example 1: Only BB84 # # ### Import # # We must first import the necessary tools from SeQUeNCe to run our simulations. # # - `Timeline` is the main simulation tool, providing an interface for the discrete-event simulation kernel. # - `QKDNode` provides a ready-to-use quantum node for quantum key distribution, including necessary hardware and protocol implementations. # - `QuantumChannel` and `ClassicalChannel` are communication links between quantum nodes, providing models of optical fibers. # - The `pair_bb84_protocols` function is used to explicitly pair 2 node instances for key distribution, and establishes one node as the sender "Alice" and one as the receiver "Bob". from ipywidgets import interact from matplotlib import pyplot as plt import time from sequence.kernel.timeline import Timeline from sequence.topology.node import QKDNode from sequence.components.optical_channel import QuantumChannel, ClassicalChannel from sequence.qkd.BB84 import pair_bb84_protocols # ### Control and Collecting Metrics # # Several elements of SeQUeNCe automatically collect simple metrics. This includes the BB84 protocol implementation, which collects key error rates, throughput, and latency. For custom or more advanced metrics, custom code may need to be written and applied. See the documentation for a list of metrics provided by default for each simulation tool. # # Here, we create a `KeyManager` class to collect a custom metric (in this case, simply collect all of the generated keys and their generation time) and to provide an interface for the BB84 Protocol. To achieve this, we use the `push` and `pop` functions provided by the protocol stack on QKD nodes. `push` is used to send information down the stack (from the key manager to BB84 in this example) while `pop` is used to send information upwards (from BB84 to the key manager). Different protocols may use these interfaces for different data but only BB84 is shown in this example. class KeyManager(): def __init__(self, timeline, keysize, num_keys): self.timeline = timeline self.lower_protocols = [] self.keysize = keysize self.num_keys = num_keys self.keys = [] self.times = [] def send_request(self): for p in self.lower_protocols: p.push(self.keysize, self.num_keys) # interface for BB84 to generate key def pop(self, info): # interface for BB84 to return generated keys self.keys.append(info) self.times.append(self.timeline.now() * 1e-9) # ### Building the Simulation # # We are now ready to build the simulation itself. This example follows the usual process to ensure that all tools function properly: # # 1. Create the timeline for the simulation # 2. Create the simulated network topology (here this is done explicitly, but this may also be handled by functions of the `Topology` class under `sequence.topology.topology`) # 3. Instantiate custom protocols and ensure all protocols are set up (paired) properly (if necessary) # 4. Initialize and run the simulation # 5. Collect and display the desired metrics def test(sim_time, keysize): """ sim_time: duration of simulation time (ms) keysize: size of generated secure key (bits) """ # begin by defining the simulation timeline with the correct simulation time tl = Timeline(sim_time * 1e9) tl.seed(0) # Here, we create nodes for the network (QKD nodes for key distribution) # stack_size=1 indicates that only the BB84 protocol should be included n1 = QKDNode("n1", tl, stack_size=1) n2 = QKDNode("n2", tl, stack_size=1) pair_bb84_protocols(n1.protocol_stack[0], n2.protocol_stack[0]) # connect the nodes and set parameters for the fibers # note that channels are one-way cc0 = ClassicalChannel("cc_n1_n2", tl, distance=1e3) cc1 = ClassicalChannel("cc_n2_n1", tl, distance=1e3) cc0.set_ends(n1, n2) cc1.set_ends(n2, n1) qc0 = QuantumChannel("qc_n1_n2", tl, attenuation=1e-5, distance=1e3, polarization_fidelity=0.97) qc1 = QuantumChannel("qc_n2_n1", tl, attenuation=1e-5, distance=1e3, polarization_fidelity=0.97) qc0.set_ends(n1, n2) qc1.set_ends(n2, n1) # instantiate our written keysize protocol km1 = KeyManager(tl, keysize, 25) km1.lower_protocols.append(n1.protocol_stack[0]) n1.protocol_stack[0].upper_protocols.append(km1) km2 = KeyManager(tl, keysize, 25) km2.lower_protocols.append(n2.protocol_stack[0]) n2.protocol_stack[0].upper_protocols.append(km2) # start simulation and record timing tl.init() km1.send_request() tick = time.time() tl.run() print("execution time %.2f sec" % (time.time() - tick)) # display our collected metrics plt.plot(km1.times, range(1, len(km1.keys) + 1), marker="o") plt.xlabel("Simulation time (ms)") plt.ylabel("Number of Completed Keys") plt.show() print("key error rates:") for i, e in enumerate(n1.protocol_stack[0].error_rates): print("\tkey {}:\t{}%".format(i + 1, e * 100)) # ### Running the Simulation # # All that is left is to run the simulation with user input. (maximum execution time: ~5 sec) # # Parameters: # # sim_time: duration of simulation time (ms) # keysize: size of generated secure key (bits) # Create and run the simulation interactive_plot = interact(test, sim_time=(100, 1000, 100), keysize=[128, 256, 512]) interactive_plot # Due to the imperfect polarization fidelity specified for the optical fiber, we observe that most (if not all) of the completed keys have errors that render them unusable. For this reason, error correction protocols (such as cascade, which is included in SeQUeNCe and shown in the next example) must also be used. # ## Example 2: Adding Cascade # # This example is simular to the first example, with slight alterations to allow for # # - Instatiation of the cascade error correction protocol on the qkd nodes # - Differences in the cascade `push`/`pop` interface compared to BB84 # # while the network topology remains unchanged. # + from sequence.qkd.cascade import pair_cascade_protocols class KeyManager(): def __init__(self, timeline, keysize, num_keys): self.timeline = timeline self.lower_protocols = [] self.keysize = keysize self.num_keys = num_keys self.keys = [] self.times = [] def send_request(self): for p in self.lower_protocols: p.push(self.keysize, self.num_keys) # interface for cascade to generate keys def pop(self, key): # interface for cascade to return generated keys self.keys.append(key) self.times.append(self.timeline.now() * 1e-9) def test(sim_time, keysize): """ sim_time: duration of simulation time (ms) keysize: size of generated secure key (bits) """ # begin by defining the simulation timeline with the correct simulation time tl = Timeline(sim_time * 1e9) tl.seed(0) # Here, we create nodes for the network (QKD nodes for key distribution) n1 = QKDNode("n1", tl) n2 = QKDNode("n2", tl) pair_bb84_protocols(n1.protocol_stack[0], n2.protocol_stack[0]) pair_cascade_protocols(n1.protocol_stack[1], n2.protocol_stack[1]) # connect the nodes and set parameters for the fibers cc0 = ClassicalChannel("cc_n1_n2", tl, distance=1e3) cc1 = ClassicalChannel("cc_n2_n1", tl, distance=1e3) cc0.set_ends(n1, n2) cc1.set_ends(n2, n1) qc0 = QuantumChannel("qc_n1_n2", tl, attenuation=1e-5, distance=1e3, polarization_fidelity=0.97) qc1 = QuantumChannel("qc_n2_n1", tl, attenuation=1e-5, distance=1e3, polarization_fidelity=0.97) qc0.set_ends(n1, n2) qc1.set_ends(n2, n1) # instantiate our written keysize protocol km1 = KeyManager(tl, keysize, 10) km1.lower_protocols.append(n1.protocol_stack[1]) n1.protocol_stack[1].upper_protocols.append(km1) km2 = KeyManager(tl, keysize, 10) km2.lower_protocols.append(n2.protocol_stack[1]) n2.protocol_stack[1].upper_protocols.append(km2) # start simulation and record timing tl.init() km1.send_request() tick = time.time() tl.run() print("execution time %.2f sec" % (time.time() - tick)) # display our collected metrics plt.plot(km1.times, range(1, len(km1.keys) + 1), marker="o") plt.xlabel("Simulation time (ms)") plt.ylabel("Number of Completed Keys") plt.show() error_rates = [] for i, key in enumerate(km1.keys): counter = 0 diff = key ^ km2.keys[i] for j in range(km1.keysize): counter += (diff >> j) & 1 error_rates.append(counter) print("key error rates:") for i, e in enumerate(error_rates): print("\tkey {}:\t{}%".format(i + 1, e * 100)) # - # ### Running the Simulation # # We can now run the cascade simulation with user input. Note that the extra steps required by the cascade protocol may cause the simulation to run much longer than the example with only BB84. # # Parameters: # # sim_time: duration of simulation time (ms) # keysize: size of generated secure key (bits) # # The maximum execution time (`sim_time=1000`, `keysize=512`) is around 60 seconds. # Create and run the simulation interactive_plot = interact(test, sim_time=(100, 1000, 100), keysize=[128, 256, 512]) interactive_plot # ### Results # # The implementation of the cascade protocol found within SeQUeNCe relies on the creation of a large sequence of bits, from which exerpts are used to create individual keys. Due to this behavior, keys are generated in large numbers in regularly spaced "batches". Also note that after application of error correction, the error rates for all keys are now at 0%.
example/qkd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Movie reviews sentiment analysis with Naive Bayes # # Project 2, MS621<br> # *Fall 2019* # # ## Goal # # In this project, you will build a multinomial naive bayes classifier to predict whether a movie review is positive or negative. As part of the project, you will also learn to do *k*-fold cross validation testing. # # You will do your work in a `bayes`-*userid* repository. Please keep all of your files in the root directory of the repository. # ## Getting started # # Download and uncompress [polarity data set v2.0](https://www.cs.cornell.edu/people/pabo/movie-review-data/review_polarity.tar.gz) into the root directory of your repository, but do not add the data to git. My directory looks like: # # ``` # $ ls # bayes.py review_polarity test_bayes.py # ``` # # Download the [test_bayes.py](https://github.com/parrt/msds621/blob/master/projects/bayes/test_bayes.py) script into the root directory of your repository; you can add this if you want but I will overwrite it when testing. It assumes that the `review_polarity` directory is in the same directory (the root of the repository). # # Download the [bayes.py starter kit](https://github.com/parrt/msds621/blob/master/projects/bayes/bayes.py) into the root directory of your repository. Make sure to add this to the repo. # # See Naive Bayes discussion, p258 in [Introduction to Information Retrieval](https://nlp.stanford.edu/IR-book/). # # **Please do not add the data to your repository!** # ## Discussion # ### Naive bayes classifiers for text documents # # A text classifier predicts to which class, $c$, an unknown document $d$ belongs. In our case, the predictions are binary: $c=0$ for negative movie review and $c=1$ for positive movie review. We can think about classification mathematically as picking the most likely class: # # $$ # c^*= \underset{c}{argmax} ~P(c|d) # $$ # # We can replace $P(c|d)$, using Bayes' theorem: # # $$ # P(c | d) = \frac{P(c)P(d|c)}{P(d)} # $$ # # to getg the formula # # $$ # c^*= \underset{c}{argmax} ~\frac{P(c)P(d|c)}{P(d)} # $$ # # Since $P(d)$ is a constant for any given document $d$, we can use the following equivalent but simpler formula: # # $$ # c^*= \underset{c}{argmax} ~ P(c)P(d|c) # $$ # # Training then consists of estimating $P(c)$ and $P(c|d)$, which will get to shortly. # ### Representing documents # # Text classification requires a representation for document $d$. When loading a document, we first load the text and then tokenize the words, stripping away punctuation and stop words like *the*. The list of words is a fine representation for a document except that each document has a different length, which makes training most models problematic as they assume tabular data with a fixed number of features. The simplest and most common approach is to create an overall vocabulary, $V$, created as a set of unique words across all documents in all classes. Then, the training features are those words. # One way to represent a document than is with a binary word vector, with a 1 in each column if that word is present in the document. Something like this: import pandas as pd df = pd.DataFrame(data=[[1,1,0,0], [0,0,1,1]], columns=['cat','food','hong','kong']) df # This tends to work well for very short strings/documents, such as article titles or tweets. For longer documents, using a binary presence or absence loses information. Instead, it's better to count the number of times each word is present. For example, here are 3 documents and resulting word vectors: d1 = "cats food cats cats" d2 = "hong kong hong kong" d3 = "cats in hong kong" # assume we strip stop words like "in" df = pd.DataFrame(data=[[3,1,0,0], [0,0,2,2], [1,0,1,1]], columns=['cat','food','hong','kong']) df # These word vectors with fixed lengths are how most models expect data, including sklearn's implementation. (It's assuming Gaussian distributions for probability estimates where as we are assuming multinomial, but we can still shove our data in.) Here's how to train a Naive Bayes model with sklearn using the trivial/toy `df` data and get the training set error: # + from sklearn.naive_bayes import GaussianNB import numpy as np X = df.values y = [0, 1, 1] # assume document classes sknb = GaussianNB() sknb.fit(X, y) y_pred = sknb.predict(X) print(f"Correct = {np.sum(y==y_pred)} / {len(y)} = {100*np.sum(y==y_pred) / len(y):.1f}%") # - # Because it is convenient to keep word vectors in a 2D matrix and it is what sklearn likes, we will use the same representation in this project. Given the directory name, your function `load_docs()` will return a list of word lists where each word list is the raw list of tokens, typically with repeated words. Then, function `vocab()` will create the combined vocabulary as a mapping from word to word feature index, starting with index 1. Index 0 is reserved for unknown words. Vocabulary $V$ should be a `defaultdict(int)` so that unknown words get mapped to value/index 0. Finally, function `vectorize()` will convert that to a 2D matrix, one row per document: # # ``` # neg = load_docs(neg_dir) # pos = load_docs(pos_dir) # V = vocab(neg,pos) # vneg = vectorize_docs(neg, V) # vpos = vectorize_docs(pos, V) # ``` # ### Estimating probabilities # To train our model, we need to estimate $P(c)$ and $P(d|c)$ for all classes and documents. Estimating $P(c)$ is easy: it's just the number of documents in class $c$ divided by the total number of documents. To estimate $P(d | c)$, Naive Bayes assumes that each word is *conditionally independent*, given the class, meaning: # # $$ # P(d | c) = \prod_{w \in d} P(w | c) # $$ # # so that gives us: # # $$ # c^*= \underset{c}{argmax} ~ P(c) \prod_{w \in d} P(w | c) # $$ # where $w$ is not a unique word in $d$, so the product includes $P(w|c)$ 5 times if $w$ appears 5 times in $d$. # # Because we are going to use word counts, not binary word vectors, in fixed-length vectors, we need to include $P(w|c)$ explicitly multiple times for repeated $w$ in $d$: # # $$ # c^*= \underset{c}{argmax} ~ P(c) \prod_{unique(w) \in d} P(w | c)^{n_w(d)} # $$ # # where $n_w(d)$ is the number of times $w$ occurs in $d$. # # Now we have to figure out how to estimate $P(w | c)$, the probability of seeing word $w$ given that we're looking at a document from class $c$. That's just the number of times $w$ appears in all documents from class $c$ divided by the total number of words (including repeats) in all documents from class $c$: # # $$P(w | c) = \frac{count(w,c)}{count(c)}$$ # ### Making predictions # # Once we have the appropriate parameter estimates, we have a model that can make predictions in an ideal setting: # # $$ # c^*= \underset{c}{argmax} ~ P(c) \prod_{unique(w) \in d} P(w | c)^{n_w(d)} # $$ # #### Floating point underflow # # The first problem involves the limitations of floating-point arithmetic in computers. Because the probabilities are less than one and there could be tens of thousands multiplied together, we risk floating-point underflow. That just means that eventually the product will attenuate to zero and our classifier is useless. The solution is simply to take the log of the right hand side because it is monotonic and won't affect the $argmax$: # # $$ # c^*= \underset{c}{argmax} \left \{ log(P(c)) + \sum_{unique(w) \in d} log(P(w | c)^{n_w(d)}) \right \} # $$ # # Or, # # $$ # c^* = \underset{c}{argmax} \left \{ log(P(c)) + \sum_{unique(w) \in d} n_w(d) \times log(P(w | c)) \right \} # $$ # #### Avoiding log(0) # # If word $w$ does not exist in class $c$ (but is in $V$), then the classifier will try to evaluate $log(0)$, which gets an error. To solve the problem, we use *Laplace Smoothing*, which just means adding 1 to each word count in $n_w(d)$ when computing $P(w|c)$ and making sure to compensate by adding $|V|$ to the denominator (adding 1 for each vocabulary word): # # $$P(w | c) = \frac{count(w,c) + 1}{count(c) + |V|}$$ # # where $|V|$ is the size of the vocabulary for all documents in all classes. Adding this to the denominator, keeps the $P(w|c)$ ratio the same. This way, even if $count(w,c)$ is 0, this ratio > 0. (Note: Each doc's word vector is size $|V|$. During training, any $w$ not found in docs of $c$, will have word count 0. Summing these gets us just total number of words in $c$. However, when we add +1, then $c$ looks like it has every word in $V$. Hence, we must divide by $|V|$ not $|V_c|$.) # # In your project, we can deal with both the Laplace smoothing by adding one to the data frame or 2-D matrix: df_ = df+1; df_ # and then computing the class word counts from this incremented matrix. The last two rows are from category 1, the positive reviews so we can isolate those and compute word counts per class: vpos = df_.iloc[1:3,:]; vpos # The word counts for documents in the positive category are then found using matrix operation `sum`: pos_word_counts = vpos.sum(axis=0); pos_word_counts # The $P(w|c)$ for all $w$ is then: pos_total_words = np.sum(pos_word_counts) print(f"Total words in pos docs: {pos_total_words}") print(f"P(w|c):") pos_word_counts / pos_total_words # It's important that you get used to using these vector operations for productivity and performance reasons. # ### Dealing with missing words # # Laplace smoothing deals with $w$ that are in the vocabulary $V$ but that are not in a class, hence, giving $count(w,c)=0$ for some $c$. There's one last slightly different problem. If a future unknown document contains a word not in $V$ (i.e., not in the training data), then what should $count(w,c)$ be? Probably not 0 because that would mean we had data indicating it does not appear in class $c$ when we have *no* training data on it. # # To be strictly correct and keep $P(w | c)$ a probability in the presence of unknown words, all we have to do is add 1 to the denominator in addition to the Laplace smoothing changes: # # $$P(w | c) = \frac{count(w,c) + 1}{count(c) + |V| + 1}$$ # # We are lumping all unknown words into a single "wildcard" word that exists in every $V$. That has the effect of increasing the overall vocabulary size for class $c$ to include room for an unknown word (and all unknown words map to that same spot). In this way, an unknown word gets probability: # # $$P(unknown|c) = \frac{0 + 1}{count(c) + |V| + 1}$$ # In the end, this is no big deal as all classes will get the same nudge for the unknown word so classification won't be affected. # # To deal with unknown words in the project, we can reserve word index 0 to mean unknown word. All words in the training vocabulary start at index 1. So, if we normally have $|V|$ words in the training vocabulary, we will now have $|V|+1$ and each class will also have $|V|+1$ words since no word has 0 word count. Each word vector will be of length $|V|+1$. # # When we count the words per class with `vneg.sum(axis=0)` and `vpos.sum(axis=0)`, these sums will include the "+1" we added for Laplace smoothing. Since we have augmented the vocabulary for unknown words at index 0, this will also increase `word_count_per_class` values since we added "+1" to word index 0 as well with `df+1`. So the formula for estimating $P(w|c)$ remains: # # ``` # pos_word_counts / pos_total_words # ``` # # To make a prediction for an unknown document, $d$, you will be given a feature vector composed of the word counts from $d$. Sum the multiplication of the word counts for $w \in d$ by the log of $P(w|c)$ for class $c$ to get the weighted sum, then add the log of the class likelihood $P(c)$. # #### Speed issues # # For large data sets, Python loops often are too slow and so we have to rely on vector operations, which are implemented in C. For example, the `predict(X)` method receives a 2D matrix of word vectors and must make a prediction for each one. The temptation is to write the very readable: # # ``` # y_pred = [] # for d in X: # y_pred = prediction for d # return y_pred # ``` # # But, you should use the built-in `numpy` functions such as `np.dot` (same as the `@` operator) and apply functions across vectors. For example, if I have a vector, $v$, and I'd like the log of each value, don't write a loop. Use `np.log(v)`, which will give you a vector with the results. # # My `predict()` method consists primarily of a matrix-vector multiplication per class followed by `argmax`. My implementation is twice as fast as sklearn and appears to be more accurate for 4-fold cross validation. # ## Deliverables # # To submit your project, ensure that your `bayes.py` file is submitted to your repository. That file must be in the root of your `bayes`-*userid* repository. It should not have a main program and should consist of a collection of functions. You must implement the following functions: # # * `load_docs(docs_dirname)` # * `vocab(neg, pos)` # * `vectorize_docs(docs, V)` # * `kfold_CV(model, X, y, k=4)` # # and implement class `NaiveBayes621` with these methods # # * `__init__(self)` (The body of this function is just keyword `pass`) # * `fit(self, X, y)` # * `predict(self, X)` # # # **Please do not add the data to your repository!** # ## Submission # # In your github `bayes`-*userid* repository, you should submit your `bayes.py` file in the root directory. It should not have a main program that runs when the file is imported. # # *Please do not add data files to your repository!* # ## Evaluation # # To evaluate your projects I will run `test_bayes.py` from your repo root directory. Here is a sample test run: # # ``` # $ python -m pytest -v test_bayes.py # ============================================== test session starts ============================ # platform darwin -- Python 3.7.1, pytest-4.0.2, py-1.7.0, pluggy-0.8.0 -- ... # cachedir: .pytest_cache # rootdir: /Users/parrt/courses/msds621-private/projects/bayes, inifile: # plugins: remotedata-0.3.1, openfiles-0.3.1, doctestplus-0.2.0, arraydiff-0.3 # collected 6 items # # test_bayes.py::test_load PASSED [ 16%] # test_bayes.py::test_vocab PASSED [ 33%] # test_bayes.py::test_vectorize PASSED [ 50%] # test_bayes.py::test_training_error PASSED [ 66%] # test_bayes.py::test_kfold_621 PASSED [ 83%] # test_bayes.py::test_kfold_sklearn_vs_621 PASSED [100%] # # =========================================== 6 passed in 21.04 seconds ============================ # ``` # # Notice that it takes about 20 seconds. If your project takes more than one minute, I will take off 10 points from 100. Each test is evaluated in a binary fashion: it either works or it does not. Each failed test cost you 15 points.
projects/bayes/bayes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 ('aiffel_3.8') # language: python # name: python3 # --- # # 16. 파이썬으로 이미지 파일 다루기 # # **파이썬에서 OpenCV를 이용하여 이미지 파일을 열고 정보를 추출 해 본다. 컴퓨터에서 이미지가 표현되는 방식을 이해하는 데 도움이 될 것이다.** # ## 16-1. 들어가며 # ```bash # $ pip install pillow opencv-python matplotlib # ``` # ```bash # $ mkdir -p ~/aiffel/python_image_proc/data # $ ln -s ~/data/* ~/aiffel/python_image_proc/data # $ ls ~/aiffel/python_image_proc/data # 파일 확인 # ``` # ## 16-2. 디지털 이미지 # ## 16-3. Pillow 사용법 # + import numpy as np from PIL import Image data = np.zeros([32, 32, 3], dtype=np.uint8) image = Image.fromarray(data, 'RGB') image # - data[:, :] = [255, 0, 0] image = Image.fromarray(data, 'RGB') image #- 문제 1 -# # [[YOUR CODE]] data = np.zeros([128, 128, 3], dtype=np.uint8) data[:, :] = [255, 255, 255] image = Image.fromarray(data, 'RGB') image # ```python # # 정답 코드 # # data = np.zeros([128, 128, 3], dtype=np.uint8) # data[:, :] = [255, 255, 255] # image = Image.fromarray(data, 'RGB') # image # ``` # + #- 문제 2 -# from PIL import Image import os # 연습용 파일 경로 image_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/pillow_practice.png' # 이미지 열기 # [[YOUR CODE]] image = Image.open(image_path) # width와 height 출력 # [[YOUR CODE]] width, height = image.size print(width, height) # JPG 파일 형식으로 저장해보기 # [[YOUR CODE]] image_save_path = image_path.split('.')[0] + '.jpg' image = image.convert('RGB') # jpg 파일은 투명도를 표현할 수 없음! image.save(image_save_path) # - # ```python # # 정답 코드 # # from PIL import Image # import os # # # 연습용 파일 경로 # image_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/pillow_practice.png' # # # 이미지 열기 # img = Image.open(image_path) # img # # # width와 height 출력 # print(img.width) # print(img.height) # # # JPG 파일 형식으로 저장해보기 # new_image_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/jpg_pillow_practice.jpg' # img = img.convert('RGB') # img.save(new_image_path) # ``` # + #- 문제 3 -# # [[YOUR CODE]] image = Image.open(image_path) image_resize = image.resize((100, 200)) image_save_path = image_path.replace('pillow_practice', 'pillow_practice_resize') image_resize.save(image_save_path) # - # ```python # # 정답 코드 # # resized_image = img.resize((100,200)) # # resized_image_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/pillow_practice_resized.png' # resized_image.save(resized_image_path) # resized_image # ``` # + #- 문제 4 -# # [[YOUR CODE]] image = Image.open(image_path) image_crop = image.crop((300, 100, 600, 400)) image_save_path = image_path.replace('pillow_practice', 'pillow_practice_crop') image_crop.save(image_save_path) # - # ```python # # 정답 코드 # # box = (300, 100, 600, 400) # region = img.crop(box) # # cropped_image_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/pillow_practice_cropped.png' # region.save(cropped_image_path) # region # ``` # ## 16-4. Pillow를 활용한 데이터 전처리 # + import os import pickle from PIL import Image dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/cifar-100-python' train_file_path = os.path.join(dir_path, 'train') with open(train_file_path, 'rb') as f: train = pickle.load(f, encoding='bytes') print(type(train)) #print(train) # 혹시 내용이 궁금하다면 주석을 풀고 실행해 보세요. # - train.keys() type(train[b'filenames']) train[b'filenames'][0:5] train[b'data'][0:5] train[b'data'][0].shape image_data = train[b'data'][0].reshape([32, 32, 3], order='F') # order를 주의하세요!! image = Image.fromarray(image_data) # Pillow를 사용하여 Numpy 배열을 Image객체로 만들어서 image # 화면에 띄워 봅시다!! image_data = image_data.swapaxes(0, 1) image = Image.fromarray(image_data) image # + import os import pickle from PIL import Image import numpy from tqdm import tqdm dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/cifar-100-python' train_file_path = os.path.join(dir_path, 'train') # image를 저장할 cifar-100-python의 하위 디렉토리(images)를 생성합니다. images_dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/cifar-images' if not os.path.exists(images_dir_path): os.mkdir(images_dir_path) # images 디렉토리 생성 # 32X32의 이미지 파일 50000개를 생성합니다. with open(train_file_path, 'rb') as f: train = pickle.load(f, encoding='bytes') for i in tqdm(range(len(train[b'filenames']))): # [[YOUR CODE]] # - # ```python # # 정답 코드 # # import os # import pickle # from PIL import Image # import numpy # from tqdm import tqdm # # dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/cifar-100-python' # train_file_path = os.path.join(dir_path, 'train') # # # image를 저장할 cifar-100-python의 하위 디렉토리(images)를 생성합니다. # images_dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/cifar-images' # if not os.path.exists(images_dir_path): # os.mkdir(images_dir_path) # images 디렉토리 생성 # # # 32X32의 이미지 파일 50000개를 생성합니다. # with open(train_file_path, 'rb') as f: # train = pickle.load(f, encoding='bytes') # for i in tqdm(range(len(train[b'filenames']))): # filename = train[b'filenames'][i].decode() # data = train[b'data'][i].reshape([32, 32, 3], order='F') # image = Image.fromarray(data.swapaxes(0, 1)) # image.save(os.path.join(images_dir_path, filename)) # ``` # ## 16-5. OpenCV (1) 안녕, OpenCV # + import os import cv2 as cv import numpy as np from matplotlib import pyplot as plt # %matplotlib inline img_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/cv_practice.png' img = cv.imread(img_path) # Convert BGR to HSV hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # define range of blue color in HSV lower_blue = np.array([100,100,100]) upper_blue = np.array([130,255,255]) # Threshold the HSV image to get only blue colors mask = cv.inRange(hsv, lower_blue, upper_blue) # Bitwise-AND mask and original image res = cv.bitwise_and(img, img, mask=mask) plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB)) plt.show() plt.imshow(cv.cvtColor(mask, cv.COLOR_BGR2RGB)) plt.show() plt.imshow(cv.cvtColor(res, cv.COLOR_BGR2RGB)) plt.show() # - # ## 16-6. OpenCV (2) 톺아보기 # ```python # import cv2 as cv # import numpy as np # ``` # ```python # img_path = os.getenv('HOME')+'/aiffel/python_image_proc/data/cv_practice.png' # img = cv.imread(img_path) # ``` # ```python # # Convert BGR to HSV # hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # ``` # ```python # # define range of blue color in HSV # lower_blue = np.array([100,100,100]) # upper_blue = np.array([130,255,255]) # # # Threshold the HSV image to get only blue colors # mask = cv.inRange(hsv, lower_blue, upper_blue) # ``` # ```python # # Bitwise-AND mask and original image # res = cv.bitwise_and(img, img, mask=mask) # ``` # ```python # plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB)) # plt.show() # plt.imshow(cv.cvtColor(mask, cv.COLOR_BGR2RGB)) # plt.show() # plt.imshow(cv.cvtColor(res, cv.COLOR_BGR2RGB)) # plt.show() # ``` # ## 16-7. 실습 : 비슷한 이미지 찾아내기 # ```bash # $ pip install opencv-python matplotlib # ``` # + import os import pickle import cv2 import numpy as np from matplotlib import pyplot as plt from tqdm import tqdm from PIL import Image # 전처리 시 생성했던 디렉토리 구조 dir_path = os.getenv('HOME')+'/aiffel/python_image_proc/' train_file_path = os.path.join(dir_path, 'train') images_dir_path = os.path.join(dir_path, 'cifar-images') # - # 파일명을 인자로 받아 해당 이미지 파일과 히스토그램을 출력해 주는 함수 def draw_color_histogram_from_image(file_name): image_path = os.path.join(images_dir_path, file_name) # 이미지 열기 img = Image.open(image_path) cv_image = cv2.imread(image_path) # Image와 Histogram 그려보기 f=plt.figure(figsize=(10,3)) im1 = f.add_subplot(1,2,1) im1.imshow(img) im1.set_title("Image") im2 = f.add_subplot(1,2,2) color = ('b','g','r') for i,col in enumerate(color): # image에서 i번째 채널의 히스토그램을 뽑아서(0:blue, 1:green, 2:red) histr = cv2.calcHist([cv_image],[i],None,[256],[0,256]) im2.plot(histr,color = col) # 그래프를 그릴 때 채널 색상과 맞춰서 그립니다. im2.set_title("Histogram") draw_color_histogram_from_image('adriatic_s_001807.png') def get_histogram(image): histogram = [] # Create histograms per channels, in 4 bins each. for i in range(3): # [[YOUR CODE]] hist = cv2.calcHist([image],[i],None,[4],[0,256]) histogram.append(hist) histogram = np.concatenate(histogram) histogram = cv2.normalize(histogram, histogram) return histogram # ```python # # 정답 코드 # # def get_histogram(image): # histogram = [] # # # Create histograms per channels, in 4 bins each. # for i in range(3): # channel_histogram = cv2.calcHist(images=[image], # channels=[i], # mask=None, # histSize=[4], # 히스토그램 구간을 4개로 한다. # ranges=[0, 256]) # histogram.append(channel_histogram) # # histogram = np.concatenate(histogram) # histogram = cv2.normalize(histogram, histogram) # # return histogram # ``` # get_histogram() 확인용 코드 filename = train[b'filenames'][0].decode() file_path = os.path.join(images_dir_path, filename) image = cv2.imread(file_path) histogram = get_histogram(image) histogram # + import os import pickle import cv2 import numpy as np from matplotlib import pyplot as plt from tqdm import tqdm def build_histogram_db(): histogram_db = {} #디렉토리에 모아 둔 이미지 파일들을 전부 리스트업합니다. path = images_dir_path file_list = os.listdir(images_dir_path) # [[YOUR CODE]] for file in tqdm(file_list): img_path = os.path.join(path, file) img = cv2.imread(img_path) # CIFAR-100 이미지 불러오기 hist = get_histogram(img) # CIFAR-100 이미지를 히스토그램으로 만듦 histogram_db[file] = hist # key: 이미지 이름, value: 히스토그램 return histogram_db # - # ```python # # 정답 코드 # # def build_histogram_db(): # histogram_db = {} # # #디렉토리에 모아 둔 이미지 파일들을 전부 리스트업합니다. # path = images_dir_path # file_list = os.listdir(images_dir_path) # # for file_name in tqdm(file_list): # file_path = os.path.join(images_dir_path, file_name) # image = cv2.imread(file_path) # # histogram = get_histogram(image) # # histogram_db[file_name] = histogram # # return histogram_db # ``` histogram_db = build_histogram_db() histogram_db['adriatic_s_001807.png'] def get_target_histogram(): filename = input("이미지 파일명을 입력하세요: ") if filename not in histogram_db: print('유효하지 않은 이미지 파일명입니다.') return None return histogram_db[filename] target_histogram = get_target_histogram() target_histogram def search(histogram_db, target_histogram, top_k=5): results = {} # Calculate similarity distance by comparing histograms. # [[YOUR CODE]] for file, hist in tqdm(histogram_db.items()): ret = cv2.compareHist(target_histogram, hist, cv2.HISTCMP_CORREL) # 입력 이미지와 검색 대상 이미지의 히스토그램 간 유사도 계산 results[file] = ret # key: 이미지 이름, value: 유사도 sorted_results = sorted(results.items(), key = lambda item: item[1]) # 유사도를 기준으로 정렬 # type: list results = dict(sorted_results[:top_k]) # 유사도 순서상 상위 5개 이미지만 남김 return results # ```python # # 정답 코드 # # def search(histogram_db, target_histogram, top_k=5): # results = {} # # # Calculate similarity distance by comparing histograms. # for file_name, histogram in tqdm(histogram_db.items()): # distance = cv2.compareHist(H1=target_histogram, # H2=histogram, # method=cv2.HISTCMP_CHISQR) # # results[file_name] = distance # # results = dict(sorted(results.items(), key=lambda item: item[1])[:top_k]) # # return results # ``` result = search(histogram_db, target_histogram) result def show_result(result): f=plt.figure(figsize=(10,3)) for idx, filename in enumerate(result.keys()): # [[YOUR CODE]] draw_color_histogram_from_image(filename) # ```python # # 정답 코드 # # def show_result(result): # f=plt.figure(figsize=(10,3)) # for idx, filename in enumerate(result.keys()): # img_path = os.path.join(images_dir_path, filename) # im = f.add_subplot(1,len(result),idx+1) # img = Image.open(img_path) # im.imshow(img) # ``` show_result(result) target_histogram = get_target_histogram() result = search(histogram_db, target_histogram) show_result(result)
FUNDAMENTALS/Node_16/[F-16] Only_LMS_Code_Blocks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pylab as pl import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns from pandas.plotting import scatter_matrix from sklearn import preprocessing import sklearn.linear_model as lm from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn import svm, grid_search from sklearn.preprocessing import StandardScaler df_svm = pd.read_csv('wholesale-customers.csv', sep=',') df_svm.head() X_svm = df_svm.drop(columns = ["Channel"]) y_svm = df_svm["Channel"] for i in range(440): y_svm[i] = (-1) ** y_svm[i] scaler = StandardScaler() X_svm = scaler.fit_transform(X_svm) #Cs = 10. ** np.arange(-2,5) Cs = [1,2,3,4,5,6,7,8,9] g = np.logspace(-5, 4, 10) def split_selector(X, y): X_train, X_test, y_train, y_test =train_test_split(X, y , test_size=0.3) cv = split_selector(X_svm,y_svm) X_train, X_test, y_train, y_test = train_test_split(X_svm, y_svm , test_size=0.3) p_grid = {'C': Cs ,'gamma' : g} cross_val = GridSearchCV(SVC(), p_grid, scoring = "accuracy",cv=cv) cross_val.fit(X_svm, y_svm) cross_val.best_params_ score = cross_val.grid_scores_ scores = [x[1] for x in score] scores = 1 - np.array(scores).reshape(len(Cs), len(g)) print('Min Accuracy Error:') print(np.min(scores)) print(cross_val.best_params_) pl.figure(figsize=(7, 6)) pl.imshow(scores, cmap=pl.cm.spectral_r) pl.ylabel('C') pl.xlabel('Gamma') pl.xticks(np.arange(len(g)), g, rotation=45) pl.yticks(np.arange(len(Cs)), Cs) pl.colorbar() pl.show() # - pl.figure(figsize=(7, 6)) pl.subplots_adjust(left=0.15, right=0.95, bottom=0.15, top=0.95) pl.imshow(scores, cmap=pl.cm.spectral) pl.xlabel('Gamma') pl.ylabel('C') pl.xticks(np.arange(len(g)), g, rotation=45) pl.yticks(np.arange(len(Cs)), Cs) pl.colorbar() pl.show() print(score) X_train, X_test, y_train, y_test =train_test_split(X_svm, y_svm , test_size=0.3) final = SVC(kernel='rbf',gamma=0.01,C=6) final.fit(X_train,y_train) final.dual_coef_
Part 2/GaussianSVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Learning Objectives # # In this lab, we will learn the following: # # * CUDA-aware MPI concepts and APIs. # * Impact of fine-tuning CUDA-aware MPI on application performance. # * Underlying GPUDirect technologies like P2P and RDMA. # # **Note:** Execution results can vary significantly based on the MPI installation, supporting libraries, workload manager, and underlying CPU and GPU hardware configuration and topology. The codes in this lab have been tested on DGX-1 8 Tesla V100 16 GB nodes connected by Mellanox InfiniBand NICs running OpenMPI v4.1.1 with HPCX 2.8.1 and CUDA v11.3.0.0. # # # Improving Application Performance # # ## Analysis # # Thus far, we have passed host (system) memory pointers to the MPI calls. With a regular MPI implementation only pointers to host memory can be passed to MPI. However, if we combine MPI and CUDA, we need to send (and receive) GPU buffers instead of host buffers. Thus, using regular MPI, we need to stage GPU buffers through host memory explicitly using `cudaMemcpy` as we saw in the previous lab. # # As mentioned in previous lab, initially MPI calls take a lot of time and they gradually improve in latency and throughput. It is therefore helpful to zoom out of a particular Jacobi iteration and look at the bigger picture, that is, the average time taken for a halo exchange. Recall that with the `--stats=true` flag, stats are visible on the terminal as well. In particular, observe the NVTX Push-Pop stats: # # ![mpi_memcpy_nvtx_stats](../../images/mpi_memcpy_nvtx_stats.png) # # The minimum, maximum and average time taken for single halo exchange, including software overhead, is visible. The average time is $84\mu$s, minimum is $50\mu$s, and maximum is $6382\mu$s. The average time taken is a useful statistic for us. # # We can also view the throughput and latency of HtoD and DtoH copy operations as follows: # # ![mpi_host_staging_throughput_latency](../../images/mpi_host_staging_throughput_latency.png) # # ### Opportunity for improvement # # There is considerable software overhead of using multiple Memcpy operations with the MPI call. Moreover, the HtoD and DtoH throughput/ latency are worse compared to DtoD because PCIe is used for CPU-GPU communication and NVLinks are not utilized. # # With regular MPI, we can try to write a program where intra-node communication is handled in a single process and then we can enable P2P and other optimizations that we learnt in previous labs. We will also need a separate inter-node communication code. This is a complex and time-consuming approach and will not scale well espcially with more communication intensive programs. # # Thus, we need to make use of CUDA-aware MPI which simplifies the code substantially and enables many optimizations under the hood transparently to the user. # # ## CUDA-aware MPI # # With CUDA-aware MPI, the GPU buffers can be passed directly to MPI. A CUDA-aware MPI implementation handles buffers differently depending on whether it resides in host or device memory. With the Unified Virtual Addressing (UVA) feature, the host memory and the memory of all GPUs in a system (a single node) are combined into one large (virtual) address space. The function is then able to infer from the memory pointer as to whether it resides on host or on the device and handles the operations accordingly. # # From an API standpoint, CUDA-aware MPI results in simplified codes where CUDA memory pointers can seamlessly be used in MPI calls. Without CUDA-aware MPI, we need to stage GPU buffers through host memory buffers (`s_buf_h`, `r_buf_h`), using `cudaMemcpy` as shown in the following code excerpt: # # ```c # //MPI rank 0 # cudaMemcpy(s_buf_h, s_buf_d, size, cudaMemcpyDeviceToHost); # MPI_Send(s_buf_h, size, MPI_CHAR, 1, 0, MPI_COMM_WORLD); # # //MPI rank 1 # MPI_Recv(r_buf_h, size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status); # cudaMemcpy(r_buf_d, r_buf_h, size, cudaMemcpyHostToDevice); # ``` # # With a CUDA-aware MPI library this is not necessary; the GPU buffers (`s_buf_d`, `r_buf_d`) can be directly passed to MPI as in the following excerpt: # # ```c # //MPI rank 0 # MPI_Send(s_buf_d, size, MPI_CHAR, 1, 0, MPI_COMM_WORLD); # # //MPI rank n-1 # MPI_Recv(r_buf_d, size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status); # ``` # # Indeed, the implementation is quite intuitive and easy-to-use. Now, let us use CUDA-aware MPI in our application. # # ## Implementation Exercise: Part 2 # # Open the [jacobi_cuda_aware_mpi.cpp](../../source_code/mpi/jacobi_cuda_aware_mpi.cpp) and [jacobi_kernels.cu](../../source_code/mpi/jacobi_kernels.cu) files. Alternatively, you can navigate to `CFD/English/C/source_code/mpi/` directory in Jupyter's file browser in the left pane. Then, click to open the `jacobi_cuda_aware_mpi.cpp` and `jacobi_kernels.cu` files. The `jacobi_kernels.cu` file is same as in previous lab. # # Also open the [Makefile](../../source_code/mpi/Makefile) and note how the compilation and linking is also same as in previous lab. # # Understand the flow of the `jacobi_cuda_aware_mpi.cpp` program and observe the following: # # 1. `local_rank` is used to set the current GPU device. # 2. Device kernel calls have been replaced with function wrappers for ease of compilation. # 3. Rank 0 is used to calculate efficiency and other metrics, even though all ranks compute `single_gpu` function to verify multi-GPU implementation's correctness. # 4. Each halo exchange is accomplished with an `MPI_Sendrecv` call with no explicit `cudaMemcpy` function calls. # # ### To-Do # # Now, implement the following marked as `TODO: Part 2-`: # # * Implement top and bottom halo exchanges using `MPI_Sendrecv` call for each exchange. Use only GPU buffers in the MPI call's function arguments. # * Reduce the rank-local L2 Norm to a global L2 norm using `MPI_Allreduce` function. # # After implementing these, compile the program:: # !cd ../../source_code/mpi && make clean && make jacobi_cuda_aware_mpi # Ensure there are no compilation errors. Now, let us validate the program with 16384$\times$32768 grid size across 2 nodes and with 16 GPUs: # ! cd ../../source_code/mpi && mpirun -np 16 --map-by ppr:4:socket ./jacobi_cuda_aware_mpi -ny 32768 # You may observe a drop in efficiency. On our DGX-1V system, the results are as follows: # # ```bash # Num GPUs: 16. # 16384x32768: 1 GPU: 8.9087 s, 16 GPUs: 1.1786 s, speedup: 7.56, efficiency: 47.24 # ``` # # Recall that initially MPI calls take a lot of time and they gradually improve in latency and throughput. Try running the program again with 5000 Jacobi loop iterations by using the `-niter 5000` option: # # # + tags=[] # ! cd ../../source_code/mpi && mpirun -np 16 --map-by ppr:4:socket ./jacobi_cuda_aware_mpi -ny 32768 -niter 5000 # - # The efficiency should improve. Our results are as follows: # # ```bash # Num GPUs: 16. # 16384x32768: 1 GPU: 44.5246 s, 16 GPUs: 3.7889 s, speedup: 11.75, efficiency: 73.45 # ``` # # Let us profile the program to understand what's happening here. # # # ## Profiling # # Before we profile the binary, note that our program runs both the single-GPU and multi-GPU versions to calculate efficiency and speedup. However, this feature is made available to check the correctness of multi-GPU code. Once we know that our implementation is correct, we don't need to run single-GPU version every time as it takes a lot of time, which you would have realized by running the 5000 iterations version. # # Moreover, we are not interested in profiling the single GPU version as profiling it increases both profiling time and the `.qdrep` file size. So, we will skip running the single-GPU version by passing the `-skip_single_gpu` flag to binary. Note that we will not get the speedup and efficiency numbers. # # That isn't a problem, however as NVTX statistics provide the runtime for our multi-GPU Jacobi loop as well as the time taken for halo exchange, we can use them for comparison. # # Now, let us profile only the multi-GPU version for the baseline 1K iterations: # # # ! cd ../../source_code/mpi && nsys profile --trace=mpi,cuda,nvtx --stats=true --force-overwrite true -o jacobi_cuda_aware_mpi_report \ # mpirun -np 16 --map-by ppr:4:socket ./jacobi_cuda_aware_mpi -ny 32768 -skip_single_gpu # Also profile the multi-GPU version for 5K iterations: # ! cd ../../source_code/mpi && nsys profile --trace=mpi,cuda,nvtx --stats=true --force-overwrite true -o jacobi_cuda_aware_mpi_report \ # mpirun -np 16 --map-by ppr:4:socket ./jacobi_cuda_aware_mpi -ny 32768 -skip_single_gpu -niter 5000 # We ran it for 10K and 25K iterations as we ll and we share the relevant NVTX stats for all these versions: # # ![mpi_cuda_aware_halo_exchange_latency](../../images/mpi_cuda_aware_halo_exchange_latency.png) # # We also run the `jacobi_memcpy_mpi` binary for 25K iterations and the results are as follows: # # ![mpi_memcpy_halo_exchange_latency](../../images/mpi_memcpy_halo_exchange_latency.png) # # At 25K iterations, the CUDA-aware MPI version outperforms the Memcpy+MPI version both in average halo exchange latency and total execution time for Jacobi loop. The average time taken for CUDA-aware MPI version is 19.1s compared to 20.5s for Memcpy+MPI version. # # ### Optimization Employed by CUDA-aware MPI # # Let us now understand the optimizations that are employed by CUDA-aware MPI transparently to the user. # # #### GPUDirect P2P # # We have already learnt about this technology in previous module on CUDA-based single-node multi-GPU programming. The Peer-to-Peer Memory Access is enabled by GPUDirect P2P technology. Here's a quick recap of how it works: # # ![gpudirect_p2p](../../images/gpudirect_p2p.png) # # This accelerates intra-node communication. Buffers can be directly copied between the memories of two GPUs in the same system with GPUDirect P2P. Recall that as NVLink is present in our DGX-1V system, it will be used for data transfer instead of PCIe. The profiler description confirms the same: # # ![mpi_cuda_aware_p2p_metrics](../../images/mpi_cuda_aware_p2p_metrics.png) # # #### GPUDirect RDMA # # With GPUDirect Remote Direct Memory Access (RDMA), abbreviated as GDR, buffers can be directly sent from the GPU memory to a network adapter without staging through host memory as shown below: # # ![gpudirect_rdma](../../images/gpudirect_rdma.png) # # To understand the impact of GDR, we will run our program on 2 GPUs with 1 GPU per node. This way, the GPUs must communicate either via GPUDirect RDMA or via host-staging. Moreover, we will decrease the grid size to $16384\times128$ to make the application more communication-bound. Note that the size of copy operation is still the same (16K * size of float (4B) = 64KB). # # Run the binary with GDR enabled (default configuration) for 1 GPU per node for 10K iterations: # # ! cd ../../source_code/mpi && mpirun -np 2 --map-by ppr:1:node ./jacobi_cuda_aware_mpi -ny 128 -skip_single_gpu -niter 10000 # We share the output from our DGX-1V system: # # ```bash # Num GPUs: 2. # 16384x128: 2 GPUs: 1.0814 s # ``` # # We can disable GDR by using the `-x UCX_IB_GPU_DIRECT_RDMA=no` flag with `mpirun` command. Run the program again with GDR disabled: # # ! cd ../../source_code/mpi && mpirun -np 2 --map-by ppr:1:node -x UCX_IB_GPU_DIRECT_RDMA=no ./jacobi_cuda_aware_mpi -ny 128 -skip_single_gpu -niter 10000 # ¶Our output is as follows: # # ```bash # Num GPUs: 2. # 16384x128: 2 GPUs: 1.3647 s # ``` # # The computation time increases considerably. In our case, it is an increase of about $25\%$ from 1.08s to 1.36s. # # The profiler output of these two runs highlights the significant difference in halo exchange time. Focus on the minimum latency as it will reflect the most optimized inter-process communication with the given configuration options. The average latency also decreases for GDR-enabled run. # # ![mpi_cuda_aware_gdr_latency](../../images/mpi_cuda_aware_gdr_latency.png) # # Note that GDR-based transfers are not visible in Nsight System Timeline. You will see an `MPI_Sendrecv` call in NVTX but no memory copy operations will be visible either in CPU or in GPU. # # **Note:** If your OpenMPI installation does not use UCX PML and instead relies on the `openib` BTL, you can disable GDR by using the `--mca btl_openib_want_cuda_gdr 1` flag. # # There are several other optimizations employed by CUDA-aware MPI that we will not cover in detail. Some of them are: # # * GDR Copy: While GPUDirect RDMA is meant for direct access to GPU memory from third-party devices like NICs, it is possible to use the same APIs to create valid CPU mappings of the GPU memory. The advantage of a CPU driven copy is the very small overhead involved. That might be useful when low latencies are required. # * GPUDirect for Accelerated Communication with Network and Storage Devices: This feature allows the network fabric driver (like MLX5) and the CUDA driver to share a common pinned buffer in order to avoid an unnecessary `memcpy` within host memory between the intermediate pinned buffers of the CUDA driver and the network fabric buffer. # * Pipelining: All operations that are required to carry out the message transfer can be pipelined. # # **Solution:** The solution for this exercise is present in `source_code/mpi/solutions` directory: [jacobi_cuda_aware_mpi.cpp](../../source_code/mpi/solutions/jacobi_cuda_aware_mpi.cpp). # # We now have an in-depth understanding of CUDA-aware MPI and how it simplifies the code while being highly performant. We have also covered GPUDirect technologies like P2P and RDMA and their effects on application performance. # # Now, let us learn about high-performance NVIDIA libraries NCCL and NVSHMEM that allow us to extract more performance while simplifying the code and runtime configuration further. # # Click below to access the lab and learn more about NVIDIA's NCCL library: # # # [Next: NCCL Library](../nccl/nccl.ipynb) # # Here's a link to the home notebook through which all other notebooks are accessible: # # # [HOME](../../../start_here.ipynb) # # --- # ## Links and Resources # # * [Concepts: CUDA-aware MPI and GPUDirect Technologies](https://developer.nvidia.com/blog/introduction-cuda-aware-mpi/) # * [Concepts: GPUDirect Technologies](http://developer.download.nvidia.com/devzone/devcenter/cuda/docs/GPUDirect_Technology_Overview.pdf) # * [Documentation: GPUDirect RDMA](https://docs.nvidia.com/cuda/gpudirect-rdma/index.html) # * [Documentation: CUDA support in OpenMPI](https://www.open-mpi.org/faq/?category=runcuda#mpi-cuda-support) # * [Code: GDRCopy Library](https://github.com/NVIDIA/gdrcopy) # * [Code: Multi-GPU Programming Models](https://github.com/NVIDIA/multi-gpu-programming-models) # * [Code: GPU Bootcamp](https://github.com/gpuhackathons-org/gpubootcamp/) # # Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. # # ## Licensing # # This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
hpc/multi_gpu_nways/labs/CFD/English/C/jupyter_notebook/mpi/cuda_aware.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Density Estimation # # # Relates to all of the probabilities we have been discussing. Will impact classification, clustering and many other operations. # # Question is: how do we find clusters or estimate density efficiently? # # <u> The _K-means_ algorithm </u> # # The first approach for finding clusters that is always taught is $K$-means (simple and works well) # # $K$-means partitions points into $K$ disjoint subsets ($C_k$) with each subset containing $N_k$ # points # # # It minimizes the objective/cost/likelihood function, # $\sum_{k=1}^K \sum_{i \in C_k} || x_i - \mu_k ||^2$ # # $\mu_k = \frac{1}{N_k} \sum_{i \in C_k} x_i$ is the mean of the # points in set $C_k$ # # # _Procedure:_ # # 1. define the number of clusters $K$ # 2. choose the centroid, $\mu_k$, of each of the $K$ clusters # 3. assign each point to the cluster that it is closest to # 4. update the centroid of each cluster by recomputing $\mu_k$ according to the new assignments. # 5. goto (3) until there are no new assignments. # # Global optima are not guaranteed but the process never increases the sum-of-squares error. # # Typically run multiple times with different starting values for the # centroids of $C_k$. # + # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from matplotlib.patches import Ellipse from scipy.stats import norm from sklearn.cluster import KMeans from sklearn import preprocessing from astroML.datasets import fetch_sdss_sspp #------------------------------------------------------------ # Get data data = fetch_sdss_sspp(cleaned=True) X = np.vstack([data['FeH'], data['alphFe']]).T # truncate dataset for speed X = X[::5] #------------------------------------------------------------ # Compute a 2D histogram of the input H, FeH_bins, alphFe_bins = np.histogram2d(data['FeH'], data['alphFe'], 50) #------------------------------------------------------------ # Compute the KMeans clustering n_clusters = 1 scaler = preprocessing.StandardScaler() clf = KMeans(n_clusters) clf.fit(scaler.fit_transform(X)) #------------------------------------------------------------ # Visualize the results fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot() # plot density ax = plt.axes() ax.imshow(H.T, origin='lower', interpolation='nearest', aspect='auto', extent=[FeH_bins[0], FeH_bins[-1], alphFe_bins[0], alphFe_bins[-1]], cmap=plt.cm.binary) # plot cluster centers cluster_centers = scaler.inverse_transform(clf.cluster_centers_) ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], s=40, c='w', edgecolors='k') # plot cluster boundaries FeH_centers = 0.5 * (FeH_bins[1:] + FeH_bins[:-1]) alphFe_centers = 0.5 * (alphFe_bins[1:] + alphFe_bins[:-1]) Xgrid = np.meshgrid(FeH_centers, alphFe_centers) Xgrid = np.array(Xgrid).reshape((2, 50 * 50)).T H = clf.predict(scaler.transform(Xgrid)).reshape((50, 50)) for i in range(n_clusters): Hcp = H.copy() flag = (Hcp == i) Hcp[flag] = 1 Hcp[~flag] = 0 ax.contour(FeH_centers, alphFe_centers, Hcp, [-0.5, 0.5], linewidths=2, colors='k') ax.xaxis.set_major_locator(plt.MultipleLocator(0.3)) ax.set_xlim(-1.101, 0.101) ax.set_ylim(alphFe_bins[0], alphFe_bins[-1]) ax.set_xlabel(r'$\rm [Fe/H]$') ax.set_ylabel(r'$\rm [\alpha/Fe]$') plt.show() # - # ** How do you choose the number of clusters?** # ## Nearest neighbor estimation # # Simple (simplest?) density estimator heavily used in astrophysics (cluster detection, large scale structure measures) # # For each point we find the distance to the $K$th-nearest neighbor, $d_K$. **Note: we are not choosing clusters here** # # # # Implied point density at an arbitrary position $x$ is # # $\hat{f}_K(x) = {K \over V_D(d_K)}$ # # with $V_D$ the volume. The assumption is that the density is locally constant. Can write it as # # $\hat{f}_K(x) = {C \over d_K^D}$ # # and calculate $C$ by setting the sum of the product of $\hat{f}_K(x)$ and pixel volume equal to the total number of data points. # # Error on $\hat{f}_K(x)$ is $\sigma_f = K^{1/2}/V_D (d_K)$ # # Fractional (relative) error is $\sigma_f/\hat{f} = 1/K^{1/2}$. # # * fractional accuracy increases with $K$ at expense of the spatial resolution (bias-variance trade-off) # * effective resolution scales with $K^{1/D}$ # # Method can be improved by considering distances to _all_ $K$ nearest neighbors # # $\hat{f}_K(x) = {C \over \sum_{i=1}^K d_i^D}$ # # Normalization when computing local density without regard to overall mean density # is # # $C = \frac{K\, (K + 1)}{2 V_D(r)}$ # # + # Author: <NAME> <<EMAIL>> # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import LogNorm from scipy.spatial import cKDTree from astroML.datasets import fetch_great_wall from astroML.density_estimation import KDE, KNeighborsDensity #------------------------------------------------------------ # Fetch the great wall data X = fetch_great_wall() #------------------------------------------------------------ # Create the grid on which to evaluate the results Nx = 50 Ny = 125 xmin, xmax = (-375, -175) ymin, ymax = (-300, 200) #------------------------------------------------------------ # Evaluate for several models Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx), np.linspace(ymin, ymax, Ny)))).T kde = KDE(metric='gaussian', h=10) dens_KDE = kde.fit(X).eval(Xgrid).reshape((Ny, Nx)) knn5 = KNeighborsDensity('bayesian', 5) dens_k5 = knn5.fit(X).eval(Xgrid).reshape((Ny, Nx)) knn40 = KNeighborsDensity('bayesian', 40) dens_k40 = knn40.fit(X).eval(Xgrid).reshape((Ny, Nx)) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(9, 4.0)) fig.subplots_adjust(left=0.1, right=0.95, bottom=0.14, top=0.9, hspace=0.01, wspace=0.01) # First plot: scatter the points ax1 = plt.subplot(221, aspect='equal') ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k') ax1.text(0.98, 0.95, "input", ha='right', va='top', transform=ax1.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) # Second plot: KDE ax2 = plt.subplot(222, aspect='equal') ax2.imshow(dens_KDE.T, origin='lower', norm=LogNorm(), extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax2.text(0.98, 0.95, "KDE: gaussian $(h=5)$", ha='right', va='top', transform=ax2.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) # Third plot: KNN, k=5 ax3 = plt.subplot(223, aspect='equal') ax3.imshow(dens_k5.T, origin='lower', norm=LogNorm(), extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax3.text(0.98, 0.95, "KNN $(k=5)$", ha='right', va='top', transform=ax3.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) # Fourth plot: KNN, k=40 ax4 = plt.subplot(224, aspect='equal') ax4.imshow(dens_k40.T, origin='lower', norm=LogNorm(), extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax4.text(0.98, 0.95, "KNN $(k=40)$", ha='right', va='top', transform=ax4.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) for ax in [ax1, ax2, ax3, ax4]: ax.set_xlim(ymin, ymax - 0.01) ax.set_ylim(xmin, xmax) for ax in [ax1, ax2]: ax.xaxis.set_major_formatter(plt.NullFormatter()) for ax in [ax3, ax4]: ax.set_xlabel('$y$ (Mpc)') for ax in [ax2, ax4]: ax.yaxis.set_major_formatter(plt.NullFormatter()) for ax in [ax1, ax3]: ax.set_ylabel('$x$ (Mpc)') plt.show() # - # ## Estimating the density non-parametrically # # Effectively an enhancement to histograms (piecewise constant or top-hat kernel) in multidimensional space # # **Kernel Density Estimation** # # $N(x) = \frac{1}{Nh^D} \sum_{i=1}^N K\left( \frac{d(x,x_i)}{h} \right),$ # # K: kernel (defined by the bandwidth h) is any smooth function which is positive at all values # # Too narrow a kernel, too spiky the results (high variance) # # Too broad a kernel, too smooth or washed out the results (bias) # # _Common kernels_ # # Gaussian: $ K(u) = \frac{1}{(2\pi)^{D/2}} e^{- u^2 / 2}$ D: dimension # # Tophat: $ K(u) = \left\{ # \begin{array}{ll} # \frac{1}{V_D(r)} & {\rm if}\ u \le r,\\ # 0 & {\rm if}\ u > r, # \end{array} # \right.$ # # Exponential: $ K(u) = \frac{1}{D!\, V_D(r)}e^{-|u|}$ # # with $V_D(r)$ the volume of a hypersphere radius $r$; $V_D(r) = \frac{2r^D\pi^{D/2}}{D\ \Gamma(D/2)}$ # # <img src="figures/funcs.png"> # # Perhaps surprisingly the primary feature is the bandwidth of these distributions not the exact shape. Choosing the bandwidth is usually done through cross-validation # # + # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import LogNorm from scipy.spatial import cKDTree from scipy.stats import gaussian_kde from astroML.datasets import fetch_great_wall from astroML.density_estimation import KDE #------------------------------------------------------------ # Fetch the great wall data X = fetch_great_wall() #------------------------------------------------------------ # Create the grid on which to evaluate the results Nx = 50 Ny = 125 xmin, xmax = (-375, -175) ymin, ymax = (-300, 200) #------------------------------------------------------------ # Evaluate for several models Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx), np.linspace(ymin, ymax, Ny)))).T kde1 = KDE(metric='gaussian', h=5) dens1 = kde1.fit(X).eval(Xgrid).reshape((Ny, Nx)) kde2 = KDE(metric='tophat', h=5) dens2 = kde2.fit(X).eval(Xgrid).reshape((Ny, Nx)) kde3 = KDE(metric='exponential', h=5) dens3 = kde3.fit(X).eval(Xgrid).reshape((Ny, Nx)) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(9, 4.0)) fig.subplots_adjust(left=0.1, right=0.95, bottom=0.14, top=0.9, hspace=0.01, wspace=0.01) # First plot: scatter the points ax1 = plt.subplot(221, aspect='equal') ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k') ax1.text(0.98, 0.95, "input", ha='right', va='top', transform=ax1.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) vmin = 0. vmax= 1. # Second plot: gaussian kernel ax2 = plt.subplot(222, aspect='equal') ax2.imshow(dens1.T, origin='lower', vmin=vmin, vmax=vmax, extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax2.text(0.98, 0.95, "gaussian $(h=5)$", ha='right', va='top', transform=ax2.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) # Third plot: top-hat kernel ax3 = plt.subplot(223, aspect='equal') ax3.imshow(dens2.T, origin='lower', vmin=vmin, vmax=vmax, extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax3.text(0.98, 0.95, "top-hat $(h=5)$", ha='right', va='top', transform=ax3.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) # Fourth plot: exponential kernel ax4 = plt.subplot(224, aspect='equal') ax4.imshow(dens3.T, origin='lower', vmin=vmin, vmax=vmax, extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary) ax4.text(0.98, 0.95, "exponential $(h=5)$", ha='right', va='top', transform=ax4.transAxes, fontsize=12, bbox=dict(boxstyle='round', ec='k', fc='w')) for ax in [ax1, ax2, ax3, ax4]: ax.set_xlim(ymin, ymax - 0.01) ax.set_ylim(xmin, xmax) for ax in [ax1, ax2]: ax.xaxis.set_major_formatter(plt.NullFormatter()) for ax in [ax3, ax4]: ax.set_xlabel('$y$ (Mpc)') for ax in [ax2, ax4]: ax.yaxis.set_major_formatter(plt.NullFormatter()) for ax in [ax1, ax3]: ax.set_ylabel('$x$ (Mpc)') plt.show() # - # ## Mixture Models: Gaussian # # Mixture models use the sum of functions to represent the density distributions - used in defining the density, classifications, cloning of a data set.... # # Gaussian mixture models (GMMs) are the most common implementation of mixture models # # $\rho(\mathbf{x}) = N\, p(\mathbf{x}) # = N\, \sum_{j=1}^M \alpha_j \mathcal{N}(\mu_j, \Sigma_j)$ # # with $p(\mathbf{x}) = \sum_j \alpha_j \mathcal{N}(\mathbf{x}|\mathbf{\mu}_{j},\mathbf{\Sigma}_{j})$ # # and # # $\mathcal{N}(\mathbf{x}|\mathbf{\mu}_j,\mathbf{\Sigma}_j) = # \frac{1}{\sqrt{(2\pi)^D\mbox{det}(\mathbf{\Sigma}_j)}} # \exp\Big(-\frac{1}{2}(\mathbf{x}-\mathbf{\mu})^T\mathbf{\Sigma}_j^{-1}(\mathbf{x}-\mathbf{\mu})\Big)\, $ # # # # where the model consists of $M$ Gaussians with locations $\mu_j$ # and covariances $\Sigma_j$. The log-likelihood is straightforward to define by this model. # # <u> _Expectation maximization_ </u> is typically employed to solve for the mixture of Gaussians # # # - _Expectation_ # # - Given a set of Gaussians compute the “expected” classes of all points # # - _Maximization_ # # - Estimate the MLE of $\mu$, amplitude, and $\Sigma$ given the data’s class membership # # Iterative proceedure until variance does not change. Guaranteed to converge - but not to the correct answer # + # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from scipy.stats import norm from sklearn.mixture import GaussianMixture, GMM from astroML.utils import convert_2D_cov from astroML.plotting.tools import draw_ellipse plt.ion() #------------------------------------------------------------ # Set up the dataset # We'll use scikit-learn's Gaussian Mixture Model to sample # data from a mixture of Gaussians. The usual way of using # this involves fitting the mixture to data: we'll see that # below. Here we'll set the internal means, covariances, # and weights by-hand. # we'll define clusters as (mu, sigma1, sigma2, alpha, frac) clusters = [((60, 60), 2, 2, np.pi / 2, 0.6), ((65, 65), 5, 3, 0., 0.4),] gmm_input = GMM(len(clusters), covariance_type='full') gmm_input.means_ = np.array([c[0] for c in clusters]) gmm_input.covars_ = np.array([convert_2D_cov(*c[1:4]) for c in clusters]) gmm_input.weights_ = np.array([c[4] for c in clusters]) gmm_input.weights_ /= gmm_input.weights_.sum() gmm_input.converged_ = True #------------------------------------------------------------ # Compute and plot the results fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111) fig.canvas.draw() Nclusters = len(clusters) Npts = 1000 X = gmm_input.sample(Npts) def fitAndPlot(X, n_components=2, n_iter=0): ax.plot(X[:, 0], X[:, 1], '.', c='red', ms=1, zorder=1) ax.set_xlim(X[:,0].min(), X[:,0].max()) ax.set_ylim(X[:,1].min(), X[:,1].max()) ax.set_ylabel('$y$') ax.set_xlabel('$x$') fig.canvas.draw() for n in np.arange(1, n_iter, 3): clf = GaussianMixture(n_components=2, max_iter=n, random_state=0, init_params='random') clf.fit(X) #print "%i points convergence:" % Npts, clf.converged_ # scatter the points # plot the components for i in range(clf.n_components): mean = clf.means_[i] cov = clf.covariances_[i] if cov.ndim == 1: cov = np.diag(cov) draw_ellipse(mean, cov, ax=ax, scales=[1], fc='none', ec='k', zorder=2) fig.canvas.draw() for i in range(clf.n_components): mean = clf.means_[i] cov = clf.covariances_[i] if cov.ndim == 1: cov = np.diag(cov) draw_ellipse(mean, cov, ax=ax, scales=[1], fc='none', ec='blue', zorder=2) fig.canvas.draw() fitAndPlot(X, n_components=2, n_iter=20) # - # ## How do we choose the number of components # # Typically used as a a density estimation and not as a way of determining the number of Gaussians in a distribution (e.g. imagine what would happen with a single cluster and a uniform background) # # AIC, BIC, and cross-validation are often used to define the number of parameters (though this is rarely well defined) # # Specifying the number of components (or clusters) is a relatively # poorly posed question. It is rare to find distinct, isolated Gaussian clusters of data (almost all distributions are continuous). # # + import numpy as np from matplotlib import pyplot as plt from scipy.stats import norm from sklearn.mixture import GMM from astroML.datasets import fetch_sdss_sspp from astroML.decorators import pickle_results from astroML.plotting.tools import draw_ellipse # Suppress deprecation warnings import warnings warnings.filterwarnings("ignore") #------------------------------------------------------------ # Get the Segue Stellar Parameters Pipeline data data = fetch_sdss_sspp(cleaned=True) X = np.vstack([data['FeH'], data['alphFe']]).T # truncate dataset for speed X = X[::5] #------------------------------------------------------------ # Compute GMM models & AIC/BIC N = np.arange(1, 14) @pickle_results("GMM_metallicity.pkl") def compute_GMM(N, covariance_type='full', n_iter=1000): models = [None for n in N] for i in range(len(N)): print (N[i]) models[i] = GMM(n_components=N[i], n_iter=n_iter, covariance_type=covariance_type) models[i].fit(X) return models models = compute_GMM(N) AIC = [m.aic(X) for m in models] BIC = [m.bic(X) for m in models] i_best = 12 gmm_best = models[i_best] print ("best fit converged:", gmm_best.converged_) print ("n_components = %i" % N[i_best]) #------------------------------------------------------------ # compute 2D density FeH_bins = 51 alphFe_bins = 51 H, FeH_bins, alphFe_bins = np.histogram2d(data['FeH'], data['alphFe'], (FeH_bins, alphFe_bins)) Xgrid = np.array(list(map(np.ravel, np.meshgrid(0.5 * (FeH_bins[:-1] + FeH_bins[1:]), 0.5 * (alphFe_bins[:-1] + alphFe_bins[1:]))))).T log_dens = gmm_best.score(Xgrid).reshape((51, 51)) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(19, 6)) fig.subplots_adjust(wspace=0.4, bottom=0.2, top=0.9, left=0.1, right=0.95) # plot density ax = fig.add_subplot(141) ax.imshow(H.T, origin='lower', interpolation='nearest', aspect='auto', extent=[FeH_bins[0], FeH_bins[-1], alphFe_bins[0], alphFe_bins[-1]], cmap=plt.cm.binary) ax.set_xlabel(r'$\rm [Fe/H]$') ax.set_ylabel(r'$\rm [\alpha/Fe]$') ax.xaxis.set_major_locator(plt.MultipleLocator(0.3)) ax.set_xlim(-1.101, 0.101) ax.text(0.96, 0.96, "Input\nDistribution", fontsize=12, va='top', ha='right', transform=ax.transAxes) # plot AIC/BIC ax = fig.add_subplot(142) ax.plot(N, AIC, '-k', label='AIC') ax.plot(N, BIC, '--k', label='BIC') ax.legend(loc=1, prop=dict(size=12)) ax.set_xlabel('N components') plt.setp(ax.get_yticklabels(), fontsize=12) # plot best configurations for AIC and BIC ax = fig.add_subplot(143) ax.imshow(np.exp(log_dens), origin='lower', interpolation='nearest', aspect='auto', extent=[FeH_bins[0], FeH_bins[-1], alphFe_bins[0], alphFe_bins[-1]], cmap=plt.cm.binary) ax.scatter(gmm_best.means_[:, 0], gmm_best.means_[:, 1], c='w') for mu, C, w in zip(gmm_best.means_, gmm_best.covars_, gmm_best.weights_): draw_ellipse(mu, C, scales=[1.5], ax=ax, fc='none', ec='k') # plot best configurations for AIC and BIC ax.text(0.96, 0.96, "Converged\nconfiguration", fontsize=12, va='top', ha='right', transform=ax.transAxes) ax.set_xlim(-1.101, 0.101) ax.set_ylim(alphFe_bins[0], alphFe_bins[-1]) ax.xaxis.set_major_locator(plt.MultipleLocator(0.3)) ax.set_xlabel(r'$\rm [Fe/H]$') ax.set_ylabel(r'$\rm [\alpha/Fe]$') plt.show() # + import numpy as np from matplotlib import pyplot as plt from sklearn.mixture import GMM from astroML.datasets import fetch_great_wall from astroML.decorators import pickle_results #------------------------------------------------------------ # load great wall data X = fetch_great_wall() #------------------------------------------------------------ # Create a function which will save the results to a pickle file # for large number of clusters, computation will take a long time! @pickle_results('great_wall_GMM.pkl') def compute_GMM(n_clusters, n_iter=1000, min_covar=3, covariance_type='full'): clf = GMM(n_clusters, covariance_type=covariance_type, n_iter=n_iter, min_covar=min_covar) clf.fit(X) print ("converged:", clf.converged_) return clf #------------------------------------------------------------ # Compute a grid on which to evaluate the result Nx = 100 Ny = 250 xmin, xmax = (-375, -175) ymin, ymax = (-300, 200) Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx), np.linspace(ymin, ymax, Ny)))).T #------------------------------------------------------------ # Compute the results # # we'll use 100 clusters. In practice, one should cross-validate # with AIC and BIC to settle on the correct number of clusters. clf = compute_GMM(n_clusters=1000) log_dens = clf.score(Xgrid).reshape(Ny, Nx) #------------------------------------------------------------ # Plot the results fig = plt.figure() fig.subplots_adjust(hspace=0, left=0.1, right=0.95, bottom=0.1, top=0.9) ax = fig.add_subplot(211, aspect='equal') ax.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k') ax.set_xlim(ymin, ymax) ax.set_ylim(xmin, xmax) ax.xaxis.set_major_formatter(plt.NullFormatter()) plt.ylabel('$x$ (Mpc)') ax = fig.add_subplot(212, aspect='equal') ax.imshow(np.exp(log_dens.T), origin='lower', cmap=plt.cm.binary, extent=[ymin, ymax, xmin, xmax]) ax.set_xlabel('$y$ (Mpc)') ax.set_ylabel('$x$ (Mpc)') plt.show() # - # ## Hierarchical clustering # # Finding all clusters at all scales. # # _Procedure_ # # 1. partition the data into $N$ clusters (one for each point in the # data set) # 2. join two of the clusters (resulting in $N-1$ clusters). # 3. repeat until the $N$th partition contains one cluster. # # # If two points are in the same cluster at level $m$, and remain together at all subsequent levels, this is known as _hierarchical clustering_ # # _How do we merge components?_ # # Many ways to merge the points (minimum distance between points in two clusters, maximum distance between points, average distance between points in two clusters) # # The example: $d_{\rm min}(C_k,C_{k'}) = \min_{x \in C_k, x' \in C_{k'}} ||x-x'||$ is known as the _minimum spanning tree_. Clusters can then be isolated by sorting the links (or edges) by increasing length and deleting those edges longer than some threshold. (``friends-of-friends'' in clustering) # # # + # Author: <NAME> <<EMAIL>> # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com import numpy as np from matplotlib import pyplot as plt from scipy import sparse from sklearn.neighbors import kneighbors_graph from sklearn.mixture import GMM from astroML.clustering import HierarchicalClustering, get_graph_segments from astroML.datasets import fetch_great_wall from astroML.cosmology import Cosmology from scipy.sparse.csgraph import minimum_spanning_tree, connected_components #------------------------------------------------------------ # get data X = fetch_great_wall() xmin, xmax = (-375, -175) ymin, ymax = (-300, 200) #------------------------------------------------------------ # Compute the MST clustering model n_neighbors = 10 edge_cutoff = 0.9 cluster_cutoff = 30 model = HierarchicalClustering(n_neighbors=10, edge_cutoff=0.9, min_cluster_size=10) model.fit(X) n_components = model.n_components_ labels = model.labels_ #------------------------------------------------------------ # Get the x, y coordinates of the beginning and end of each line segment T_x, T_y = get_graph_segments(model.X_train_, model.full_tree_) T_trunc_x, T_trunc_y = get_graph_segments(model.X_train_, model.cluster_graph_) #---------------------------------------------------------------------- # Plot the results fig = plt.figure(figsize=(7, 8)) fig.subplots_adjust(hspace=0, left=0.1, right=0.95, bottom=0.1, top=0.9) ax = fig.add_subplot(311, aspect='equal') ax.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k') ax.set_xlim(ymin, ymax) ax.set_ylim(xmin, xmax) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylabel('$x$ (Mpc)') ax = fig.add_subplot(312, aspect='equal') ax.plot(T_y, T_x, c='k', lw=1) ax.set_xlim(ymin, ymax) ax.set_ylim(xmin, xmax) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_xlabel('$y$ (Mpc)') ax.set_ylabel('$x$ (Mpc)') ax = fig.add_subplot(313, aspect='equal') ax.plot(T_trunc_y, T_trunc_x, c='k', lw=1) #ax.scatter(X[clusters, 1], X[clusters, 0], c=labels[clusters], lw=0) ax.set_xlim(ymin, ymax) ax.set_ylim(xmin, xmax) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_xlabel('$y$ (Mpc)') ax.set_ylabel('$x$ (Mpc)') plt.show()
lectures/notes/Lecture7-density-estimation-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Lecture01 Hello, Python # # 第01讲 你好,Python # ### Problem 问题描述 # 1. Write and run a python code line and let the computer output a sentence "I am learning Math with python programming." 编写运行一行程序代码在屏幕上输出 “我正在用Python编程学数学。" # 2. Introduce Jupyter Lab. 介绍Jupyter Lab交互式环境 # 3. Calculate the following expressions using Python. 用Python语言计算下面算式的结果: # - $1+2=$ # - $3.14\times2\times5=$ # - $99\times97+99\times3=$ # - $100\div 20=$ # - $1+2+3+\cdots+97+98+99=$ # - $2\times2\times2\times2\times2\times2=$ # ### Mathematics 数学背景 # # - $+$, $-$, $\times$, $\div$ # + [markdown] heading_collapsed=true # ### Prerequisites 预备知识 # - # 1. In python, $\times$ is replaced by `*` and $\div$ is replaced by `/`. Python中乘法和除法符号分别被`*`和`/`替代 # 2. Command `print` 命令 # 3. role of a pair of parentheses`()` after a command. 一条命令后成对小括号`()`的作用 # 4. role of (double) quotes `""`, `''`. 单双引号的作用 # 5. what is a string? 字符串是什么? # ### Solution 编程求解 # + heading_collapsed=true print('I am learning Math with Python programming.') # - print("我正在用Python编程学数学。") # ### Summary 知识点小结 # # - Understand what a string is 理解什么是字符串 # - Be able to represent a string in Python 在Python中表示一个字符串 # - Be able to display a string on screen 能够在屏幕上显示一个字符串 # - Use Python as a calculator 把Python当成一个计算器使用 # - Jupyter Lab 交互环境 # ### CS Tips 计算机小知识 # - # + [markdown] heading_collapsed=true # ### Assignments 作业 # # - # 1. Use `print` to print out exactly the following sentences(including the final period). 使用`print`语句打印出完全一样的下面的句子(包括末尾的句号)。 # - I love China and Canada. # - I like Python programming language, and I like Math too. # - Our teacher's name is Qiang. # - "CN" is the abbreviation of China. # - The abbreviation of Canda is "CAD". # # The code for printing out first sentence is already given as an example: # 作为示例第一个句子已经给出答案: # ```python # print("I love China and Canada.") # ``` # Please write your codes for the rest sentences in below cells (one sentence in each cell). # 请在下面几个小格子里编写代码输出剩余的句子(每一个单元格输出一个句子)。 # + hidden=true # + hidden=true # + hidden=true # + hidden=true # + hidden=true # + [markdown] hidden=true # 2. In the following cells, write codes to calculate the following Arithmetic expressions (one cell for each): # 在下面的单元格中,编写代码计算下面的算术表达式(每一个式子使用一个单元格): # # - $1+3+5+7+9+11+13+15+17+19$ # - $10-9+8-7+6-5+4-3+2-1$ # - $3.14\times 2\times 5$ # - $2\times(6+9)$ # - $2\times\times2\times2\times2\times2\times2\times2\times2$ # + hidden=true # + hidden=true # + hidden=true # + hidden=true # - # + [markdown] hidden=true # 3. The length and width of a rectangle are 22cm and 16cm, respectively. What the perimeter and the area of this rectangle? write only two lines of codes to provide the answer directly, and an extra line of code to print out the result like the follow sentence: 一个长方形的长和宽分别是22厘米和16厘米,它的周长和面积分别是多少?通过只编写两行代码给出答案,并使用额外一行代码输出如下格式的一句话: # # "The perimeter of this rectangle is xxx cm, and its area is xxx cm*cm." # # Replace the xxx by your results. 用你得到的结果替代上面的 xxx。 # + hidden=true # + hidden=true # + hidden=true
source/2022/NotOnline/001_hello_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd table = [[0]*6] * 2 pd.DataFrame(table, columns='input_dim layer_1_neurons layer_2_neurons layer_3_neurons train_accuracy test_accuracy'.split())
jupyter-notebooks/backup/UCSD 400___ L4 DHDS -- Deep Learning and AI -- exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: sqlalchemy-mutable # language: python # name: sqlalchemy-mutable # --- # # SQLAlchemy-Mutable examples # ## SQAlchemy setup # + from sqlalchemy_mutable import Mutable, MutableType, MutableModelBase, Query, partial from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy.ext.declarative import declarative_base from sqlalchemy_mutable import HTMLAttrsType # create a session (standard) engine = create_engine('sqlite:///:memory:') session_factory = sessionmaker(bind=engine) Session = scoped_session(session_factory) session = Session() Base = declarative_base() # subclass `MutableModelBase` when creating database models # which may be stored in a `Mutable` object class MyModel(MutableModelBase, Base): __tablename__ = 'mymodel' id = Column(Integer, primary_key=True) greeting = Column(String) attrs = Column(HTMLAttrsType) # initialize a database column with `MutableType` mutable = Column(MutableType) # add a `query` class attribute initialized with a scoped_session # not necessary for use with Flask-SQLAlchemy query = Query(Session) def __init__(self): # set mutable column to `Mutable` object self.mutable = Mutable() # create the database (standard) Base.metadata.create_all(engine) # + from sqlalchemy_mutable import MutableManager MutableManager.session = session model0 = MyModel() model1 = MyModel() model0.mutable = model1 model0.mutable # + from sqlalchemy_mutable import MutableList from convert_list import ConvertList class HelloList(ConvertList, MutableList): @classmethod def convert(cls, item): return item if item.startswith('hello, ') else 'hello, '+item model = MyModel() model.mutable = HelloList(['world', 'moon']) model.mutable session.add(model) session.commit() print(model.mutable) model.mutable += model.mutable[:2] session.commit() model.mutable # - # ## Flask-SQLAlchemy setup # + from sqlalchemy_mutable import Mutable, MutableType, MutableModelBase from flask import Flask from flask_sqlalchemy import SQLAlchemy # create a session (standard) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) # subclass `MutableModelBase` when creating database models class MyModel(MutableModelBase, db.Model): id = db.Column(db.Integer, primary_key=True) greeting = db.Column(db.String) # initialize a database column with `MutableType` mutable = db.Column(MutableType) def __init__(self): # set mutable column to `Mutable` object self.mutable = Mutable() # create the database (standard) db.create_all() session = db.session # - # ## Index page examples # + model = MyModel() session.add(model) session.commit() # nested mutable objects model.mutable.nested_mutable = Mutable() session.commit() model.mutable.nested_mutable.greet = 'hello world' session.commit() print(model.mutable.nested_mutable.greet) # nested mutable list and dict model.mutable = {} session.commit() model.mutable['greet'] = ['hello world'] session.commit() print(model.mutable) # storing database models model.mutable = model session.commit() print(model.mutable) # common literals model.mutable = 'hello world' session.commit() print(model.mutable) # - # ## Basic objects examples model = MyModel() session.add(model) model.mutable.nested_mutable = Mutable() session.commit() # if `MyModel.mutable` weren't a `MutableType` column, # this change would not survive a commit model.mutable.nested_mutable.greeting = 'hello, world!' session.commit() model.mutable.nested_mutable.greeting # + class MyClass(): def greet(self, name='world'): return 'hello, {}!'.format(name) @Mutable.register_coerced_type(MyClass) class CoercedMyClass(Mutable, MyClass): pass model = MyModel() # without registering an associated coerced type, # this will throw an error model.mutable = MyClass() model.mutable.greet() # + class MyClass(): def __init__(self, name): self.name = name def greet(self): return 'hello, {}!'.format(self.name) @Mutable.register_tracked_type(MyClass) class MutableMyClass(MyClass, Mutable): def __init__(self, source=None, root=None): ''' Parameters ---------- source : MyClass Original instance of `MyClass`. This will be converted into a `MutableMyClass` object. root : Mutable or None, default=None Root mutable object. This is handled by SQLAlchemy-Mutable. Set to `None` by default. ''' super().__init__(name=source.name) model = MyModel() session.add(model) model.mutable = Mutable() model.mutable.object = MyClass('world') session.commit() model.mutable.object.name = 'moon' session.commit() model.mutable.object.greet() # - # ## Coerced types # + model = MyModel() model.mutable = True print(model.mutable) model.mutable = complex(1,1) print(model.mutable) model.mutable = 1. print(model.mutable) model.mutable = 1 print(model.mutable) model.mutable = 'hello world' print(model.mutable) import datetime model.mutable = datetime.datetime.now() print(model.mutable) def foo(*args, **kwargs): print('args', args) print('kwargs', kwargs) return 0 model.mutable = foo print(model.mutable('hello world', goodbye='moon')) # - # ## Storing models (model shell) model0 = MyModel() model1 = MyModel() session.add_all([model0, model1]) session.commit() model0.mutable = model1 # without subclassing MutableModelBase, # this would not retrieve `model1` model0.mutable # + from sqlalchemy_mutable.model_shell import ModelShell model = MyModel() session.add(model) session.commit() shell = ModelShell(model) shell == model # - model = MyModel() session.add(model) session.commit() model.mutable = {} model.mutable['model'] = model model.mutable.unshell() # ## Mutable tuple, list, and dict model0 = MyModel() model1 = MyModel() model0.mutable = [(model1,)] session.add_all([model0, model1]) session.commit() # without a mutable tuple, # this change would not appear after a commit model1.greeting = 'hello world' session.commit() model0.mutable[0][0].greeting model = MyModel() model.mutable = [] session.add(model) session.commit() # without a mutable list, # this change will not survive a commit model.mutable.append('hello world') session.commit() model.mutable model = MyModel() model.mutable = {} session.add(model) session.commit() # without a mutable dictionary, # this change will not survive a commit model.mutable['hello'] = 'world' session.commit() model.mutable # ## Type conversion # + @Mutable.register_tracked_type(list) class MutableList(Mutable, list): def __init__(self, source=[], root=None): # 1. convert potentially mutable attributes/items to Mutable objects converted_list = self._convert_iterable(source) super().__init__(converted_list) # 2. classes with mutable items must have a `_tracked_items` attribute # `_tracked_items` is a list of potentially mutable items @property def _tracked_items(self): return list(self) # 3. call `self._changed()` to register change with the root Mutable object def append(self, item): self._changed() super().append(self._convert_item(item)) model = MyModel() model.mutable = [] session.add(model) session.commit() # without using a mutable list, this change would not survive a commit model.mutable.append('hello world') session.commit() model.mutable # -
examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation import healpy as hp # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import astropy.units as u import os from collections import namedtuple from celerite import GP from celerite.terms import Matern32Term import pymc3 as pm from pymc3.smc import sample_smc import theano from corner import corner from lightkurve import search_lightcurvefile # - lcf = search_lightcurvefile("Kepler-411", mission='Kepler').download_all() lc = lcf.PDCSAP_FLUX.stitch() # + import pickle def load_trace(name): with open(name, 'rb') as buff: trace = pickle.load(buff) return trace # + from glob import glob paths = glob('kepler411_incconstrained/k411_*.pkl') inds = [int(p.split('_')[2].split('.pkl')[0]) for p in paths] # - inds t1[t1.shape[0]//2], t1.mean() # %matplotlib inline # plt.hist(trace[f'{n_spots}_P_eq']) # plt.axvline(P_equator) print(alpha) plt.hist(np.exp(trace[f'{n_spots}_ln_shear']), bins=25) plt.axvline(np.exp(np.median(trace[f'{n_spots}_ln_shear'])), color='r') # %matplotlib inline plt.hist(np.exp(samples[:, 2])) spot_props # + for i in inds: # %matplotlib inline t, f, e = lc.time[~np.isnan(lc.flux)], lc.flux[~np.isnan(lc.flux)], lc.flux_err[~np.isnan(lc.flux)] max_time = t.min() + 60 + 60 * i min_time = t.min() + 60 * i skip = 10 t1, f1, e1 = t[(t < max_time) & (t > min_time)][::skip], f[(t < max_time) & (t > min_time)][::skip], e[(t < max_time) & (t > min_time)][::skip] if len(t1) > 0 and t1.ptp() > 50: # if True: gp = GP(Matern32Term(log_sigma=1, log_rho=8)) gp.compute(t1 / 100, e1) gp_trend = gp.predict(f1, t1 / 100, return_cov=False) plt.plot(t1, f1 / gp_trend) f1 /= gp_trend plt.show() e1_tt = theano.shared(np.asarray(e1, dtype=theano.config.floatX), name='e1') n_spots = 4 with pm.Model(name=f'{n_spots}') as model1: f0 = pm.Uniform("f0", lower=0, upper=1) spot_model = 1 + f0 eq_period = pm.Uniform("P_eq", lower=8, upper=16) ln_shear = pm.Uniform("ln_shear", lower=-10, upper=np.log(0.6)) inclination = pm.Uniform("inc", lower=np.radians(0), upper=np.radians(45)) lon_lims = 2 * np.pi * np.arange(n_spots + 1) / n_spots for spot_ind in range(n_spots): latitude_cutoff = 10 lon = pm.Uniform(f"lon_{spot_ind}", lower=0, upper=2*np.pi) #lower=lon_lims[spot_ind], upper=lon_lims[spot_ind+1]) lat = pm.Uniform(f"lat_{spot_ind}", lower=np.radians(latitude_cutoff), upper=np.radians(180-latitude_cutoff)) rspot = pm.Uniform(f"R_spot_{spot_ind}", lower=0.001, upper=0.8) period_i = eq_period / (1 - pm.math.exp(ln_shear) * pm.math.sin(lat - np.pi/2)**2) phi = 2 * np.pi / period_i * (t1 - t1.mean()) - lon spot_position_x = (pm.math.cos(phi - np.pi/2) * pm.math.sin(inclination) * pm.math.sin(lat) + pm.math.cos(inclination) * pm.math.cos(lat)) spot_position_y = -pm.math.sin(phi - np.pi/2) * pm.math.sin(lat) spot_position_z = (pm.math.cos(lat) * pm.math.sin(inclination) - pm.math.sin(phi) * pm.math.cos(inclination) * pm.math.sin(lat)) rsq = spot_position_x**2 + spot_position_y**2 contrast = 0.4 spot_model -= rspot**2 * (1 - contrast) * pm.math.where(spot_position_z > 0, pm.math.sqrt(1 - rsq), 0) pm.Normal("obs", mu=spot_model, sigma=20 * e1_tt, observed=f1) trace = load_trace(f"kepler411_incconstrained/k411_{i}.pkl") with model1: ppc = pm.sample_posterior_predictive(trace, samples=100) plt.figure(figsize=(20, 3)) plt.errorbar(t1, f1, 20 * e1, fmt='.', color='k', ecolor='silver') plt.plot(t1, ppc[f'{n_spots}_obs'].T, color='DodgerBlue', lw=2, alpha=0.1) plt.gca().set(xlabel='Time [d]', ylabel='Flux') plt.show() samples = pm.trace_to_dataframe(trace).values stellar_inclination = 90 - np.degrees(np.median(samples[:, 3])) spot_props = np.median(samples[:, 4:], axis=0).reshape((n_spots, 3)) alpha = np.exp(np.median(samples[:, 2])) print(f'alpha = {alpha}; stellar_inclination = {stellar_inclination}') NSIDE = 2**8 NPIX = hp.nside2npix(NSIDE) P_equator = np.median(samples[:, 1]) #trace[f'{n_spots}_P_eq']) contrast = 0.4 times = t1#[::4] t_ref = t1.mean() # %matplotlib inline from matplotlib.gridspec import GridSpec maps = [] xsize = 400 xgrid = np.linspace(-1, 1, xsize) xx, yy = np.meshgrid(xgrid, xgrid) r = np.hypot(xx, yy) ld = (1 - 0.4 * r**2 - 0.2 * r) / (1 - 0.4/3 - 0.2/6) composite_maps = [] for t_i in times: spot_instances = [] m = np.zeros(NPIX) for lon, lat, rspot in spot_props: # print(f'lon {lon} lat {lat}') spot_period = P_equator / (1 - alpha * np.sin(lat - np.pi/2)**2) dphi = 2*np.pi * (t_i - t_ref) / spot_period - lon - 1.5 * np.pi spot_vec = hp.ang2vec(np.pi - lat, dphi) # spot_vec = hp.ang2vec(lat, dphi) ipix_spots = hp.query_disc(nside=NSIDE, vec=spot_vec, radius=rspot) m[ipix_spots] = 1 - contrast projmap = hp.orthview(m, half_sky=True, title="", hold=True, rot=(0, (90-stellar_inclination), 0),#rot=(0, -(90-stellar_inclination), 0), max=1.0, flip='geo', cbar=False, return_projected_map=True, xsize=xsize) plt.clf() mapimg = ld * (1-projmap) #spot_instances.append(mapimg) composite_maps.append(mapimg)#np.min(spot_instances, axis=0)) image_lc = np.array([cm[np.isfinite(cm)].sum() for cm in composite_maps]) # %matplotlib inline fakelc = image_lc/image_lc.mean() renorm_fakelc = (fakelc-1)/fakelc.ptp() * np.ptp(f1) + 1 plt.figure() plt.plot(t1, f1, 'k.') plt.plot(t1, ppc['4_obs'].T.mean(axis=1), color='DodgerBlue') plt.plot(times, fakelc, 'r') #renorm_fakelc) plt.axvline(t1.mean()) plt.show() # %matplotlib notebook # Frames per second fps = 5 gs = GridSpec(1, 5) # First set up the figure, the axis, and the plot element we want to animate fig = plt.figure( figsize=(7, 2), dpi=250 ) gs = GridSpec(1, 5, figure=fig) ax_image = plt.subplot(gs[0]) im = ax_image.imshow(composite_maps[0], aspect='equal', cmap=plt.cm.copper, extent=[-1, 1, -1, 1], vmin=0, vmax=1, origin='lower' ) ax_image.axis('off') ax_lc = plt.subplot(gs[2:]) ax_lc.plot(t1, ppc[f'{n_spots}_obs'].T, color='DodgerBlue', alpha=0.05) ax_lc.plot(t1, f1, '.', color='k') ax_lc.set(xlabel='Time', ylabel='Flux') for sp in ['right', 'top']: ax_lc.spines[sp].set_visible(False) time_marker = ax_lc.axvline(times[0], ls='--', color='gray') def animate_func(ii): if ii % fps == 0: print('.', end='') im.set_array(composite_maps[ii]) time_marker.set_data([times[ii], times[ii]], [0, 1]) return [im] anim = animation.FuncAnimation( fig, animate_func, frames = len(composite_maps), interval = 1000 / fps, # in ms ) anim.save(f'vis/k411_{i}.mp4', fps=fps, extra_args=['-vcodec', 'libx264']) print('done.') print('i', i) # - print(i) # + #load_trace(f'kepler411_incconstrained/k411_{i}.pkl') # + # # %matplotlib inline # from corner import corner # corner(pm.trace_to_dataframe(trace)) # -
vis_dr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sympy import * init_printing(use_unicode=True, use_latex=True) # # Unitary rotation operators around the Bloch Sphere # # ## Introduction # # In this document we are going to automatically derive the unitary operators representing the rotations arount the Bloch Sphere. This will be done under assumption Hamiltonian operator is time-independent. We will do it for the three Pauli operators $\sigma^x$, $\sigma^y$ and $\sigma^z$. # # ## Deriving the unitary operator # # Hamiltonian operator $H$ is a representation of acting on a quantum state $\left| \psi(t) \right>$ in a Schrödinger picture, where operators are constant and quantum states depend on time. On the other hand, in Heisenberg picture, a unitary operator $U(t)$ is an semantically equivalent representation in which the the operator is time-dependent and quantum state $\left| \psi \right>$ is static. # # Let us define a function which symbolically derives the $U(t)$ from $H$ using # # \begin{equation} # U(t) = e^{-\frac{i t}{2}H} # \end{equation} # # under assumption that $\frac{d}{dt}H=0$ or simply, $H$ does not depend on time. def timeIndependentHtoU(H, t) : rows, columns = H.shape U = zeros(rows, columns) eigenvects = H.eigenvects() for eigenvalue, multiplicity, eigenvectors in eigenvects : l = eigenvalue m = multiplicity for eigenvector in eigenvectors : normalized_eigenvector = eigenvector.normalized() entry = exp(-I*t*l*m/2)*normalized_eigenvector*conjugate(normalized_eigenvector.T) U += entry return U # ## Defining the Pauli operators # # Operators matrices are defined as # # \begin{align} # \sigma^x = \begin{bmatrix}0 & 1\\1 & 0\end{bmatrix}, # \sigma^y = \begin{bmatrix}0 & -i\\i & 0\end{bmatrix}, # \sigma^z = \begin{bmatrix}1 & 0\\0 & -1\end{bmatrix} # \end{align} # # Let us define them as `SymPy` matrices and derive the rotation operators $R_x(t)$, $R_y(t)$ and $R_z(t)$ correponsing to $\sigma^x$, $\sigma^y$ and $\sigma^z$ (respectively). # + t = Symbol('t') s_x = Matrix([[0, 1], [1, 0]]) s_y = Matrix([[0, -I], [I, 0]]) s_z = Matrix([[1, 0], [0, -1]]) r_x = timeIndependentHtoU(s_x, t) r_y = timeIndependentHtoU(s_y, t) r_z = timeIndependentHtoU(s_z, t) display(simplify(r_x), simplify(r_y), simplify(r_z)) # - # ## Discuss results # # Expected solution is # # \begin{align} # R_x(t) = \begin{bmatrix}cos(\frac{t}{2}) & -i sin(\frac{t}{2})\\-i sin(\frac{t}{2}) & cos(\frac{t}{2})\end{bmatrix}, # R_y(t) = \begin{bmatrix}cos(\frac{t}{2}) & - sin(\frac{t}{2})\\sin(\frac{t}{2}) & cos(\frac{t}{2})\end{bmatrix}, # R_z(t) = \begin{bmatrix}e^{-\frac{it}{2}} & 0\\0 & e^{\frac{it}{2}}\end{bmatrix} # \end{align} # # which matches the result we symbolically derived using `SymPy`.
bloch-sphere-rotations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Engineering # + from pathlib import Path import pandas as pd import numpy as np import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") DATASET = Path("datasets/lotto/data_processed.csv") # The Answer to the Ultimate Question of Life, the Universe, and Everything. np.random.seed(42) # - # Read into data frame dataset = pd.read_csv(DATASET, header=0, sep=',', quotechar='"', parse_dates=['DrawDate'], dtype={'PrizeType': str}) dataset.describe() df = dataset.copy() df.info() df.head(100).T # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # pytorch import torch from torchvision import transforms # fastai from fastai import * from fastai.vision import * # + # np.c_? # - for ind, column in enumerate(df.columns): print("[{}]".format(column), end=' ') if ind % 8 == 0: print() df = dataset.copy() numeric_features = ["DrawNo", "DaysSinceLastExactMatch", "DaysUntilNextExactMatch", "DaysSinceLastAnyMatch", "DaysUntilNextAnyMatch", "DrawYear", "DrawMonth", "DrawWeek", "DrawDay", "DrawDayofweek", "DrawDayofyear", "CumProbaExactMatch", "CumProbaAnyMatch", ] categorical_features = ["PrizeType"] datetime_features = ["DrawDate"] target_feature = ["LuckyNo"] df = df[target_feature + datetime_features + categorical_features + numeric_features] df.head(10) df.describe() # ## Visualization # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # + # # pd.cut? # np.inf # # pd.Categorical?? # + plt.tight_layout() df.hist(bins=50, figsize=(20,15)) # # df.hist?? # - corr_matrix = df.corr() corr_matrix['LuckyNo'].sort_values(ascending=False) # ## Numeric # # ``` # 1. Numeric feature preprocessing is different for tree and # non-tree models: # a. Tree-based models doesn’t depend on scaling # b. Non-tree-based models hugely depend on scaling # # # 2. Most often used preprocessings are: # a. MinMaxScaler - to [0,1] # b. StandardScaler - to mean==0, std==1 # c. Rank - sets spaces between sorted values to be equal # d. np.log(1+x) and np.sqrt(1+x) # # # 3. Scaling and Rank for numeric features: # a. Tree-based models doesn't depend on them # b. Non-tree-based models hugely depend on them # # 4. Most often used preprocessings are: # a. MinMaxScaler - to [0,1] # b. StandardScaler - to mean==0, std==1 # c. Rank - sets spaces between sorted values to be equal # d. np.log(1+x) and np.sqrt(1+x) # # 5. Feature generation is powered by: # a. Prior knowledge # b. Exploratory data analysis # # To [0,1] # sklearn.preprocessing.MinMaxScaler # X = (X X.min())/(X.max() X.min()) # # To mean=0, std=1 # sklearn.preprocessing.StandardScaler # X = (X X.mean())/X.std() # # UPPERBOUND, LOWERBOUND = np.percentile(x, [1,99]) # y = np.clip(x, UPPERBOUBD, LOWERBOUND) # pd.Series(y).hist(bins=30) # # ``` # numeric_features # ## Categorical # # ``` # - Label encoding # - One hot encoding # - Freq endoding # - Mean encoding # # 1. Values in ordinal features are sorted in some meaningful # order # 2. Label encoding maps categories to numbers # 3. Frequency encoding maps categories to their frequencies # 4. Label and Frequency encodings are often used for treebased models # 5. One-hot encoding is often used for non-tree-based models # 6. Interactions of categorical features can help linear models # and KNN # ``` # + # # pd.factorize? # # pd.get_dummies? # sklearn.preprocessing.OneHotEncoder # - # ## DateTime # ## Timeseries # ## Coordinate
02_1_feature_engineer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1 toc-item"><a href="#Short-study-of-the-Lempel-Ziv-complexity" data-toc-modified-id="Short-study-of-the-Lempel-Ziv-complexity-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Short study of the Lempel-Ziv complexity</a></div><div class="lev2 toc-item"><a href="#Short-definition" data-toc-modified-id="Short-definition-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Short definition</a></div><div class="lev2 toc-item"><a href="#Python-implementation" data-toc-modified-id="Python-implementation-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Python implementation</a></div><div class="lev2 toc-item"><a href="#Tests-(1/2)" data-toc-modified-id="Tests-(1/2)-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Tests (1/2)</a></div><div class="lev2 toc-item"><a href="#Cython-implementation" data-toc-modified-id="Cython-implementation-14"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Cython implementation</a></div><div class="lev2 toc-item"><a href="#Numba-implementation" data-toc-modified-id="Numba-implementation-15"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>Numba implementation</a></div><div class="lev2 toc-item"><a href="#Tests-(2/2)" data-toc-modified-id="Tests-(2/2)-16"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>Tests (2/2)</a></div><div class="lev2 toc-item"><a href="#Benchmarks" data-toc-modified-id="Benchmarks-17"><span class="toc-item-num">1.7&nbsp;&nbsp;</span>Benchmarks</a></div><div class="lev2 toc-item"><a href="#Complexity-?" data-toc-modified-id="Complexity-?-18"><span class="toc-item-num">1.8&nbsp;&nbsp;</span>Complexity ?</a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-19"><span class="toc-item-num">1.9&nbsp;&nbsp;</span>Conclusion</a></div><div class="lev2 toc-item"><a href="#(Experimental)-Julia-implementation" data-toc-modified-id="(Experimental)-Julia-implementation-110"><span class="toc-item-num">1.10&nbsp;&nbsp;</span>(Experimental) <a href="http://julialang.org" target="_blank">Julia</a> implementation</a></div><div class="lev2 toc-item"><a href="#Ending-notes" data-toc-modified-id="Ending-notes-111"><span class="toc-item-num">1.11&nbsp;&nbsp;</span>Ending notes</a></div> # - # # Short study of the Lempel-Ziv complexity # # In this short [Jupyter notebook](https://www.Jupyter.org/) aims at defining and explaining the [Lempel-Ziv complexity](https://en.wikipedia.org/wiki/Lempel-Ziv_complexity). # # [I](http://perso.crans.org/besson/) will give examples, and benchmarks of different implementations. # # - **Reference:** <NAME> and <NAME>, *« On the Complexity of Finite Sequences »*, IEEE Trans. on Information Theory, January 1976, p. 75–81, vol. 22, n°1. # ---- # ## Short definition # The Lempel-Ziv complexity is defined as the number of different substrings encountered as the stream is viewed from begining to the end. # # As an example: # # ```python # >>> s = '1001111011000010' # >>> lempel_ziv_complexity(s) # 1 / 0 / 01 / 11 / 10 / 110 / 00 / 010 # 8 # ``` # # Marking in the different substrings, this sequence $s$ has complexity $\mathrm{Lempel}$-$\mathrm{Ziv}(s) = 6$ because $s = 1001111011000010 = 1 / 0 / 01 / 11 / 10 / 110 / 00 / 010$. # # - See the page https://en.wikipedia.org/wiki/Lempel-Ziv_complexity for more details. # Other examples: # # ```python # >>> lempel_ziv_complexity('1010101010101010') # 1, 0, 10, 101, 01, 010, 1010 # 7 # >>> lempel_ziv_complexity('1001111011000010000010') # 1, 0, 01, 11, 10, 110, 00, 010, 000 # 9 # >>> lempel_ziv_complexity('100111101100001000001010') # 1, 0, 01, 11, 10, 110, 00, 010, 000, 0101 # 10 # ``` # ---- # ## Python implementation def lempel_ziv_complexity(sequence): """Lempel-Ziv complexity for a binary sequence, in simple Python code.""" sub_strings = set() n = len(sequence) ind = 0 inc = 1 # this while loop runs at most n times while True: if ind + inc > len(sequence): break # this can take some time, takes O(inc) sub_str = sequence[ind : ind + inc] # and this also, takes a O(log |size set|) in worst case # max value for inc = n / size set at the end # so worst case is that the set contains sub strings of the same size # and the worst loop takes a O(n / |S| * log(|S|)) # ==> so if n/|S| is constant, it gives O(n log(n)) at the end # but if n/|S| = O(n) then it gives O(n^2) if sub_str in sub_strings: inc += 1 else: sub_strings.add(sub_str) ind += inc inc = 1 return len(sub_strings) # ---- # ## Tests (1/2) s = '1001111011000010' lempel_ziv_complexity(s) # 1 / 0 / 01 / 11 / 10 / 110 / 00 / 010 # %timeit lempel_ziv_complexity(s) lempel_ziv_complexity('1010101010101010') # 1, 0, 10, 101, 01, 010, 1010 lempel_ziv_complexity('1001111011000010000010') # 1, 0, 01, 11, 10, 110, 00, 010, 000 lempel_ziv_complexity('100111101100001000001010') # 1, 0, 01, 11, 10, 110, 00, 010, 000, 0101 # %timeit lempel_ziv_complexity('100111101100001000001010') # + import random def random_string(size, alphabet="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): return "".join(random.choices(alphabet, k=size)) def random_binary_sequence(size): return random_string(size, alphabet="01") # - random_string(100) random_binary_sequence(100) for (r, name) in zip( [random_string, random_binary_sequence], ["random strings in A..Z", "random binary sequences"] ): print("\nFor {}...".format(name)) for n in [10, 100, 1000, 10000, 100000]: print(" of sizes {}, Lempel-Ziv complexity runs in:".format(n)) # %timeit lempel_ziv_complexity(r(n)) # We can start to see that the time complexity of this function seems to grow linearly as the size grows. # ---- # ## Cython implementation # As [this blog post](https://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/) explains it, we can easily try to use [Cython](http://Cython.org/) in a notebook cell. # # > See [the Cython documentation](http://docs.cython.org/en/latest/src/quickstart/build.html#using-the-jupyter-notebook) for more information. # %load_ext cython # + language="cython" # import cython # # ctypedef unsigned int DTYPE_t # # @cython.boundscheck(False) # turn off bounds-checking for entire function, quicker but less safe # def lempel_ziv_complexity_cython(str sequence not None): # """Lempel-Ziv complexity for a string, in simple Cython code (C extension).""" # # cdef set sub_strings = set() # cdef str sub_str = "" # cdef DTYPE_t n = len(sequence) # cdef DTYPE_t ind = 0 # cdef DTYPE_t inc = 1 # while True: # if ind + inc > len(sequence): # break # sub_str = sequence[ind : ind + inc] # if sub_str in sub_strings: # inc += 1 # else: # sub_strings.add(sub_str) # ind += inc # inc = 1 # return len(sub_strings) # - # Let try it! s = '1001111011000010' lempel_ziv_complexity_cython(s) # 1 / 0 / 01 / 11 / 10 / 110 / 00 / 010 # %timeit lempel_ziv_complexity(s) # %timeit lempel_ziv_complexity_cython(s) lempel_ziv_complexity_cython('1010101010101010') # 1, 0, 10, 101, 01, 010, 1010 lempel_ziv_complexity_cython('1001111011000010000010') # 1, 0, 01, 11, 10, 110, 00, 010, 000 lempel_ziv_complexity_cython('100111101100001000001010') # 1, 0, 01, 11, 10, 110, 00, 010, 000, 0101 # Now for a test of the speed? for (r, name) in zip( [random_string, random_binary_sequence], ["random strings in A..Z", "random binary sequences"] ): print("\nFor {}...".format(name)) for n in [10, 100, 1000, 10000, 100000]: print(" of sizes {}, Lempel-Ziv complexity in Cython runs in:".format(n)) # %timeit lempel_ziv_complexity_cython(r(n)) # > $\implies$ Yay! It seems faster indeed! but only x2 times faster... # ---- # ## Numba implementation # As [this blog post](https://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/) explains it, we can also try to use [Numba](http://Numba.PyData.org/) in a notebook cell. from numba import jit @jit def lempel_ziv_complexity_numba(sequence : str) -> int: """Lempel-Ziv complexity for a sequence, in Python code using numba.jit() for automatic speedup (hopefully).""" sub_strings = set() n : int= len(sequence) ind : int = 0 inc : int = 1 while True: if ind + inc > len(sequence): break sub_str : str = sequence[ind : ind + inc] if sub_str in sub_strings: inc += 1 else: sub_strings.add(sub_str) ind += inc inc = 1 return len(sub_strings) # Let try it! s = '1001111011000010' lempel_ziv_complexity_numba(s) # 1 / 0 / 01 / 1110 / 1100 / 0010 # %timeit lempel_ziv_complexity_numba(s) lempel_ziv_complexity_numba('1010101010101010') # 1, 0, 10, 101, 01, 010, 1010 lempel_ziv_complexity_numba('1001111011000010000010') # 1, 0, 01, 11, 10, 110, 00, 010, 000 9 lempel_ziv_complexity_numba('100111101100001000001010') # 1, 0, 01, 11, 10, 110, 00, 010, 000, 0101 # %timeit lempel_ziv_complexity_numba('100111101100001000001010') # > $\implies$ Well... It doesn't seem that much faster from the naive Python code. # > We specified the signature when calling [`@numba.jit`](http://numba.pydata.org/numba-doc/latest/user/jit.html), and used the more appropriate data structure (string is probably the smaller, numpy array are probably faster). # > But even these tricks didn't help that much. # # > I tested, and without specifying the signature, the fastest approach is using string, compared to using lists or numpy arrays. # > Note that the [`@jit`](http://numba.pydata.org/numba-doc/latest/user/jit.html)-powered function is compiled at runtime when first being called, so the signature used for the *first* call is determining the signature used by the compile function # ---- # ## Tests (2/2) # # To test more robustly, let us generate some (uniformly) random binary sequences. # + from numpy.random import binomial def bernoulli(p, size=1): """One or more samples from a Bernoulli of probability p.""" return binomial(1, p, size) # - bernoulli(0.5, 20) # That's probably not optimal, but we can generate a string with: ''.join(str(i) for i in bernoulli(0.5, 20)) def random_binary_sequence(n, p=0.5): """Uniform random binary sequence of size n, with rate of 0/1 being p.""" return ''.join(str(i) for i in bernoulli(p, n)) random_binary_sequence(50) random_binary_sequence(50, p=0.1) random_binary_sequence(50, p=0.25) random_binary_sequence(50, p=0.5) random_binary_sequence(50, p=0.75) random_binary_sequence(50, p=0.9) # And so, this function can test to check that the three implementations (naive, Cython-powered, Numba-powered) always give the same result. def tests_3_functions(n, p=0.5, debug=True): s = random_binary_sequence(n, p=p) c1 = lempel_ziv_complexity(s) if debug: print("Sequence s = {} ==> complexity C = {}".format(s, c1)) c2 = lempel_ziv_complexity_cython(s) c3 = lempel_ziv_complexity_numba(s) assert c1 == c2 == c3, "Error: the sequence {} gave different values of the Lempel-Ziv complexity from 3 functions ({}, {}, {})...".format(s, c1, c2, c3) return c1 tests_3_functions(5) tests_3_functions(20) tests_3_functions(50) tests_3_functions(500) tests_3_functions(5000) # ---- # ## Benchmarks # # On two example of strings (binary sequences), we can compare our three implementation. # %timeit lempel_ziv_complexity('100111101100001000001010') # %timeit lempel_ziv_complexity_cython('100111101100001000001010') # %timeit lempel_ziv_complexity_numba('100111101100001000001010') # %timeit lempel_ziv_complexity('10011110110000100000101000100100101010010111111011001111111110101001010110101010') # %timeit lempel_ziv_complexity_cython('10011110110000100000101000100100101010010111111011001111111110101001010110101010') # %timeit lempel_ziv_complexity_numba('10011110110000100000101000100100101010010111111011001111111110101001010110101010') # Let check the time used by all the three functions, for longer and longer sequences: # %timeit tests_3_functions(10, debug=False) # %timeit tests_3_functions(20, debug=False) # %timeit tests_3_functions(40, debug=False) # %timeit tests_3_functions(80, debug=False) # %timeit tests_3_functions(160, debug=False) # %timeit tests_3_functions(320, debug=False) def test_cython(n): s = random_binary_sequence(n) c = lempel_ziv_complexity_cython(s) return c # %timeit test_cython(10) # %timeit test_cython(20) # %timeit test_cython(40) # %timeit test_cython(80) # %timeit test_cython(160) # %timeit test_cython(320) # %timeit test_cython(640) # %timeit test_cython(1280) # %timeit test_cython(2560) # %timeit test_cython(5120) # %timeit test_cython(10240) # %timeit test_cython(20480) # ---- # ## Complexity ? # $\implies$ The function `lempel_ziv_complexity_cython` seems to be indeed (almost) linear in $n$, the length of the binary sequence $S$. # # But let check more precisely, as it could also have a complexity of $\mathcal{O}(n \log n)$. import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set(context="notebook", style="darkgrid", palette="hls", font="sans-serif", font_scale=1.4) import numpy as np import timeit # + sizes = np.array(np.trunc(np.logspace(1, 6, 30)), dtype=int) times = np.array([ timeit.timeit( stmt="lempel_ziv_complexity_cython(random_string({}))".format(n), globals=globals(), number=10, ) for n in sizes ]) # - plt.figure(figsize=(15, 10)) plt.plot(sizes, times, 'o-') plt.xlabel("Length $n$ of the binary sequence $S$") plt.ylabel(r"Time in $\mu\;\mathrm{s}$") plt.title("Time complexity of Lempel-Ziv complexity") plt.show() plt.figure(figsize=(15, 10)) plt.loglog(sizes, times, 'o-') plt.xlabel("Length $n$ of the binary sequence $S$") plt.ylabel(r"Time in $\mu\;\mathrm{s}$") plt.title("Time complexity of Lempel-Ziv complexity, loglog scale") plt.show() # It is linear in $\log\log$ scale, so indeed the algorithm seems to have a linear complexity. # # To sum-up, for a sequence $S$ of length $n$, it takes $\mathcal{O}(n)$ basic operations to compute its Lempel-Ziv complexity $\mathrm{Lempel}-\mathrm{Ziv}(S)$. # ---- # ## Conclusion # # - The Lempel-Ziv complexity is not too hard to implement, and it indeed represents a certain complexity of a binary sequence, capturing the regularity and reproducibility of the sequence. # # - Using the [Cython](http://Cython.org/) was quite useful to have a $\simeq \times 100$ speed up on our manual naive implementation ! # # - The algorithm is not easy to analyze, we have a trivial $\mathcal{O}(n^2)$ bound but experiments showed it is more likely to be $\mathcal{O}(n \log n)$ in the worst case, and $\mathcal{O}(n)$ in practice for "not too complicated sequences" (or in average, for random sequences). # ---- # ## (Experimental) [Julia](http://julialang.org) implementation # # I want to (quickly) try to see if I can use [Julia](http://julialang.org) to write a faster version of this function. # See [issue #1](https://github.com/Naereen/Lempel-Ziv_Complexity/issues/1). # + # %%time # %%script julia """Lempel-Ziv complexity for a sequence, in simple Julia code.""" function lempel_ziv_complexity(sequence) sub_strings = Set() n = length(sequence) ind = 1 inc = 1 while true if ind + inc > n break end sub_str = sequence[ind : ind + inc] if sub_str in sub_strings inc += 1 else push!(sub_strings, sub_str) ind += inc inc = 1 end end return length(sub_strings) end s = "1001111011000010" lempel_ziv_complexity(s) # 1 / 0 / 01 / 1110 / 1100 / 0010 M = 1000; N = 10000; for _ in 1:M s = join(rand(0:1, N)); lempel_ziv_complexity(s); end lempel_ziv_complexity(s) # 1 / 0 / 01 / 1110 / 1100 / 0010 # - # And to compare it fairly, let us use [Pypy](http://pypy.org) for comparison. # + # %%time # %%pypy def lempel_ziv_complexity(sequence): """Lempel-Ziv complexity for a binary sequence, in simple Python code.""" sub_strings = set() n = len(sequence) ind = 0 inc = 1 while True: if ind + inc > len(sequence): break sub_str = sequence[ind : ind + inc] if sub_str in sub_strings: inc += 1 else: sub_strings.add(sub_str) ind += inc inc = 1 return len(sub_strings) s = "1001111011000010" lempel_ziv_complexity(s) # 1 / 0 / 01 / 11 / 10 / 110 / 00 / 010 from random import random M = 1000 N = 10000 for _ in range(M): s = ''.join(str(int(random() < 0.5)) for _ in range(N)) lempel_ziv_complexity(s) # - # So we can check that on these 1000 random trials on strings of size 10000, the naive Julia version is slower than the naive Python version (executed by Pypy for speedup). # ---- # ## Ending notes # > Thanks for reading! # > My implementation is [now open-source and available on GitHub](https://github.com/Naereen/Lempel-Ziv_Complexity), on https://github.com/Naereen/Lempel-Ziv_Complexity. # # > It will be available from PyPi very soon, see https://pypi.python.org/pypi/lempel_ziv_complexity. # # > See [this repo on GitHub](https://github.com/Naereen/notebooks/) for more notebooks, or [on nbviewer.jupyter.org](https://nbviewer.jupyter.org/github/Naereen/notebooks/). # # > That's it for this demo! See you, folks!
Short_study_of_the_Lempel-Ziv_complexity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="c1d10341-001" colab_type="text" # #PoliceBot # A tool that helps enforce CM object name conventions by checking names against a set of client-defined patterns, and emailing violations to appropriate agency teams on a daily basis. # # + [markdown] id="c1d10341-002" colab_type="text" # #License # # Copyright 2020 Google LLC, # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # + [markdown] id="c1d10341-003" colab_type="text" # #Disclaimer # This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team. # # This code generated (see starthinker/scripts for possible source): # - **Command**: "python starthinker_ui/manage.py colab" # - **Command**: "python starthinker/tools/colab.py [JSON RECIPE]" # # # + [markdown] id="c1d10341-004" colab_type="text" # #1. Install Dependencies # First install the libraries needed to execute recipes, this only needs to be done once, then click play. # # + id="c1d10341-005" colab_type="code" # !pip install git+https://github.com/google/starthinker # + [markdown] id="c1d10341-006" colab_type="text" # #2. Set Configuration # # This code is required to initialize the project. Fill in required fields and press play. # # 1. If the recipe uses a Google Cloud Project: # - Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md). # # 1. If the recipe has **auth** set to **user**: # - If you have user credentials: # - Set the configuration **user** value to your user credentials JSON. # - If you DO NOT have user credentials: # - Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md). # # 1. If the recipe has **auth** set to **service**: # - Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md). # # # + id="c1d10341-007" colab_type="code" from starthinker.util.configuration import Configuration CONFIG = Configuration( project="", client={}, service={}, user="/content/user.json", verbose=True ) # + [markdown] id="c1d10341-008" colab_type="text" # #3. Enter PoliceBot Recipe Parameters # 1. Add this card to a recipe and save it. # 1. Then click <strong>Run Now</strong> to deploy. # 1. Follow the <a href="https://docs.google.com/document/d/1euSZt5VFmaMfV-vShb6NH6LWfA7a5KSPpSl1hYeNlAA">instructions</a> for setup. # Modify the values below for your use case, can be done multiple times, then click play. # # + id="c1d10341-009" colab_type="code" FIELDS = { 'recipe_name': '', # Name of document to deploy to. } print("Parameters Set To: %s" % FIELDS) # + [markdown] id="c1d10341-010" colab_type="text" # #4. Execute PoliceBot # This does NOT need to be modified unless you are changing the recipe, click play. # # + id="c1d10341-011" colab_type="code" from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'drive': { 'auth': 'user', 'hour': [ ], 'copy': { 'source': 'https://docs.google.com/spreadsheets/d/1dkESiK2s8YvdC03F3t4Jk_wvxJ0NMNk8CTGxO0HQk6I', 'destination': {'field': {'name': 'recipe_name', 'prefix': 'PoliceBot For ', 'kind': 'string', 'order': 1, 'description': 'Name of document to deploy to.', 'default': ''}} } } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True)
colabs/policebot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # **[OFE-01]** 必要なモジュールをインポートします。 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import cPickle as pickle # **[OFE-02]** データファイル「ORENIST.data」から画像とラベルのデータを読み込みます。 with open('ORENIST.data', 'rb') as file: images, labels = pickle.load(file) # **[OFE-03]** 画像データのサンプルを表示します。 fig = plt.figure(figsize=(10,5)) for i in range(40): subplot = fig.add_subplot(4, 10, i+1) subplot.set_xticks([]) subplot.set_yticks([]) subplot.set_title('%d' % np.argmax(labels[i])) subplot.imshow(images[i].reshape(28,28), vmin=0, vmax=1, cmap=plt.cm.gray_r, interpolation='nearest') # **[OFE-04]** フィルターの情報を格納した多次元リストを作る関数を用意します。 def edge_filter(): filter0 = np.array( [[ 2, 1, 0,-1,-2], [ 3, 2, 0,-2,-3], [ 4, 3, 0,-3,-4], [ 3, 2, 0,-2,-3], [ 2, 1, 0,-1,-2]]) / 23.0 filter1 = np.array( [[ 2, 3, 4, 3, 2], [ 1, 2, 3, 2, 1], [ 0, 0, 0, 0, 0], [-1,-2,-3,-2,-1], [-2,-3,-4,-3,-2]]) / 23.0 filter_array = np.zeros([5,5,1,2]) filter_array[:,:,0,0] = filter0 filter_array[:,:,0,1] = filter1 return tf.constant(filter_array, dtype=tf.float32) # **[OFE-05]** 画像データにフィルターを適用する計算式を用意します。 # + x = tf.placeholder(tf.float32, [None, 784]) x_image = tf.reshape(x, [-1,28,28,1]) W_conv = edge_filter() h_conv = tf.abs(tf.nn.conv2d(x_image, W_conv, strides=[1,1,1,1], padding='SAME')) h_conv_cutoff = tf.nn.relu(h_conv-0.2) h_pool =tf.nn.max_pool(h_conv_cutoff, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # - # **[OFE-06]** セッションを用意して、Variable を初期化します。 sess = tf.Session() sess.run(tf.initialize_all_variables()) # **[OFE-07]** 最初の9個分の画像データに対して、畳み込みフィルターを適用した結果を計算します。 filter_vals, conv_vals = sess.run([W_conv, h_conv_cutoff], feed_dict={x:images[:9]}) # **[OFE-08]** 得られた結果を画像として表示します。 # # ここでは、オリジナルの画像と2種類のフィルターを適用したそれぞれの結果を表示しています。 # # 左端は、適用したフィルターを画像化したものです。 # + fig = plt.figure(figsize=(10,3)) for i in range(2): subplot = fig.add_subplot(3, 10, 10*(i+1)+1) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(filter_vals[:,:,0,i], cmap=plt.cm.gray_r, interpolation='nearest') v_max = np.max(conv_vals) for i in range(9): subplot = fig.add_subplot(3, 10, i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.set_title('%d' % np.argmax(labels[i])) subplot.imshow(images[i].reshape((28,28)), vmin=0, vmax=1, cmap=plt.cm.gray_r, interpolation='nearest') subplot = fig.add_subplot(3, 10, 10+i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(conv_vals[i,:,:,0], vmin=0, vmax=v_max, cmap=plt.cm.gray_r, interpolation='nearest') subplot = fig.add_subplot(3, 10, 20+i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(conv_vals[i,:,:,1], vmin=0, vmax=v_max, cmap=plt.cm.gray_r, interpolation='nearest') # - # **[OFE-09]** フィルターに加えて、プーリング層を適用した結果を取得します。 pool_vals = sess.run(h_pool, feed_dict={x:images[:9]}) # **[OFE-10]** 得られた結果を画像として表示します。 # + fig = plt.figure(figsize=(10,3)) for i in range(2): subplot = fig.add_subplot(3, 10, 10*(i+1)+1) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(filter_vals[:,:,0,i], cmap=plt.cm.gray_r, interpolation='nearest') v_max = np.max(pool_vals) for i in range(9): subplot = fig.add_subplot(3, 10, i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.set_title('%d' % np.argmax(labels[i])) subplot.imshow(images[i].reshape((28,28)), vmin=0, vmax=1, cmap=plt.cm.gray_r, interpolation='nearest') subplot = fig.add_subplot(3, 10, 10+i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(pool_vals[i,:,:,0], vmin=0, vmax=v_max, cmap=plt.cm.gray_r, interpolation='nearest') subplot = fig.add_subplot(3, 10, 20+i+2) subplot.set_xticks([]) subplot.set_yticks([]) subplot.imshow(pool_vals[i,:,:,1], vmin=0, vmax=v_max, cmap=plt.cm.gray_r, interpolation='nearest')
Chapter04/ORENIST filter example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # 多GPU训练 # :label:`sec_multi_gpu` # # 到目前为止,我们讨论了如何在CPU和GPU上高效地训练模型,同时在 :numref:`sec_auto_para`中展示了深度学习框架如何在CPU和GPU之间自动地并行化计算和通信,还在 :numref:`sec_use_gpu`中展示了如何使用`nvidia-smi`命令列出计算机上所有可用的GPU。 # 但是我们没有讨论如何真正实现深度学习训练的并行化。 # 是否一种方法,以某种方式分割数据到多个设备上,并使其能够正常工作呢? # 本节将详细介绍如何从零开始并行地训练网络, # 这里需要运用小批量随机梯度下降算法(详见 :numref:`sec_minibatch_sgd`)。 # 后面我还讲介绍如何使用高级API并行训练网络(请参阅 :numref:`sec_multi_gpu_concise`)。 # # ## 问题拆分 # # 我们从一个简单的计算机视觉问题和一个稍稍过时的网络开始。 # 这个网络有多个卷积层和汇聚层,最后可能有几个全连接的层,看起来非常类似于LeNet :cite:`LeCun.Bottou.Bengio.ea.1998`或AlexNet :cite:`Krizhevsky.Sutskever.Hinton.2012`。 # 假设我们有多个GPU(如果是桌面服务器则有$2$个,AWS g4dn.12xlarge上有$4$个,p3.16xlarge上有$8$个,p2.16xlarge上有$16$个)。 # 我们希望以一种方式对训练进行拆分,为实现良好的加速比,还能同时受益于简单且可重复的设计选择。 # 毕竟,多个GPU同时增加了内存和计算能力。 # 简而言之,对于需要分类的小批量训练数据,我们有以下选择。 # # 第一种方法,在多个GPU之间拆分网络。 # 也就是说,每个GPU将流入特定层的数据作为输入,跨多个后续层对数据进行处理,然后将数据发送到下一个GPU。 # 与单个GPU所能处理的数据相比,我们可以用更大的网络处理数据。 # 此外,每个GPU占用的*显存*(memory footprint)可以得到很好的控制,虽然它只是整个网络显存的一小部分。 # # 然而,GPU的接口之间需要的密集同步可能是很难办的,特别是层之间计算的工作负载不能正确匹配的时候, # 还有层之间的接口需要大量的数据传输的时候(例如:激活值和梯度,数据量可能会超出GPU总线的带宽)。 # 此外,计算密集型操作的顺序对于拆分来说也是非常重要的,这方面的最好研究可参见 :cite:`Mirhoseini.Pham.Le.ea.2017`,其本质仍然是一个困难的问题,目前还不清楚研究是否能在特定问题上实现良好的线性缩放。 # 综上所述,除非存框架或操作系统本身支持将多个GPU连接在一起,否则不建议这种方法。 # # 第二种方法,拆分层内的工作。 # 例如,将问题分散到$4$个GPU,每个GPU生成$16$个通道的数据,而不是在单个GPU上计算$64$个通道。 # 对于全连接的层,同样可以拆分输出单元的数量。 # :numref:`fig_alexnet_original`描述了这种设计,其策略用于处理显存非常小(当时为2GB)的GPU。 # 当通道或单元的数量不太小时,使计算性能有良好的提升。 # 此外,由于可用的显存呈线性扩展,多个GPU能够处理不断变大的网络。 # # ![由于GPU显存有限,原有AlexNet设计中的模型并行](../img/alexnet-original.svg) # :label:`fig_alexnet_original` # # 然而,我们需要大量的同步或*屏障操作*(barrier operation),因为每一层都依赖于所有其他层的结果。 # 此外,需要传输的数据量也可能比跨GPU拆分层时还要大。 # 因此,基于带宽的成本和复杂性,我们同样不推荐这种方法。 # # 最后一种方法,跨多个GPU对数据进行拆分。 # 这种方式下,所有GPU尽管有不同的观测结果,但是执行着相同类型的工作。 # 在完成每个小批量数据的训练之后,梯度在GPU上聚合。 # 这种方法最简单,并可以应用于任何情况,同步只需要在每个小批量数据处理之后进行。 # 也就是说,当其他梯度参数仍在计算时,完成计算的梯度参数就可以开始交换。 # 而且,GPU的数量越多,小批量包含的数据量就越大,从而就能提高训练效率。 # 但是,添加更多的GPU并不能让我们训练更大的模型。 # # ![在多个GPU上并行化。从左到右:原始问题、网络并行、分层并行、数据并行](../img/splitting.svg) # :label:`fig_splitting` # # :numref:`fig_splitting`中比较了多个GPU上不同的并行方式。 # 总体而言,只要GPU的显存足够大,数据并行是最方便的。 # 有关分布式训练分区的详细描述,请参见 :cite:`Li.Andersen.Park.ea.2014`。 # 在深度学习的早期,GPU的显存曾经是一个棘手的问题,然而如今除了非常特殊的情况,这个问题已经解决。 # 下面我们将重点讨论数据并行性。 # # ## 数据并行性 # # 假设一台机器有$k$个GPU。 # 给定需要训练的模型,虽然每个GPU上的参数值都是相同且同步的,但是每个GPU都将独立地维护一组完整的模型参数。 # 例如, :numref:`fig_data_parallel`演示了在$k=2$时基于数据并行方法训练模型。 # # ![利用两个GPU上的数据,并行计算小批量随机梯度下降](../img/data-parallel.svg) # :label:`fig_data_parallel` # # 一般来说,$k$个GPU并行训练过程如下: # # * 在任何一次训练迭代中,给定的随机的小批量样本都将被分成$k$个部分,并均匀地分配到GPU上。 # * 每个GPU根据分配给它的小批量子集,计算模型参数的损失和梯度。 # * 将$k$个GPU中的局部梯度聚合,以获得当前小批量的随机梯度。 # * 聚合梯度被重新分发到每个GPU中。 # * 每个GPU使用这个小批量随机梯度,来更新它所维护的完整的模型参数集。 # # # 在实践中请注意,当在$k$个GPU上训练时,需要扩大小批量的大小为$k$的倍数,这样每个GPU都有相同的工作量,就像只在单个GPU上训练一样。 # 因此,在16-GPU服务器上可以显著地增加小批量数据量的大小,同时可能还需要相应地提高学习率。 # 还请注意, :numref:`sec_batch_norm`中的批量规范化也需要调整,例如,为每个GPU保留单独的批量规范化参数。 # # 下面我们将使用一个简单网络来演示多GPU训练。 # # + origin_pos=2 tab=["pytorch"] # %matplotlib inline import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l # + [markdown] origin_pos=3 # ## [**简单网络**] # # 我们使用 :numref:`sec_lenet`中介绍的(稍加修改的)LeNet, # 从零开始定义它,从而详细说明参数交换和同步。 # # + origin_pos=5 tab=["pytorch"] # 初始化模型参数 scale = 0.01 W1 = torch.randn(size=(20, 1, 3, 3)) * scale b1 = torch.zeros(20) W2 = torch.randn(size=(50, 20, 5, 5)) * scale b2 = torch.zeros(50) W3 = torch.randn(size=(800, 128)) * scale b3 = torch.zeros(128) W4 = torch.randn(size=(128, 10)) * scale b4 = torch.zeros(10) params = [W1, b1, W2, b2, W3, b3, W4, b4] # 定义模型 def lenet(X, params): h1_conv = F.conv2d(input=X, weight=params[0], bias=params[1]) h1_activation = F.relu(h1_conv) h1 = F.avg_pool2d(input=h1_activation, kernel_size=(2, 2), stride=(2, 2)) h2_conv = F.conv2d(input=h1, weight=params[2], bias=params[3]) h2_activation = F.relu(h2_conv) h2 = F.avg_pool2d(input=h2_activation, kernel_size=(2, 2), stride=(2, 2)) h2 = h2.reshape(h2.shape[0], -1) h3_linear = torch.mm(h2, params[4]) + params[5] h3 = F.relu(h3_linear) y_hat = torch.mm(h3, params[6]) + params[7] return y_hat # 交叉熵损失函数 loss = nn.CrossEntropyLoss(reduction='none') # + [markdown] origin_pos=6 # ## 数据同步 # # 对于高效的多GPU训练,我们需要两个基本操作。 # 首先,我们需要[**向多个设备分发参数**]并附加梯度(`get_params`)。 # 如果没有参数,就不可能在GPU上评估网络。 # 第二,需要跨多个设备对参数求和,也就是说,需要一个`allreduce`函数。 # # + origin_pos=8 tab=["pytorch"] def get_params(params, device): new_params = [p.to(device) for p in params] for p in new_params: p.requires_grad_() return new_params # + [markdown] origin_pos=9 # 通过将模型参数复制到一个GPU。 # # + origin_pos=10 tab=["pytorch"] new_params = get_params(params, d2l.try_gpu(0)) print('b1 权重:', new_params[1]) print('b1 梯度:', new_params[1].grad) # + [markdown] origin_pos=11 # 由于还没有进行任何计算,因此权重参数的梯度仍然为零。 # 假设现在有一个向量分布在多个GPU上,下面的[**`allreduce`函数将所有向量相加,并将结果广播给所有GPU**]。 # 请注意,我们需要将数据复制到累积结果的设备,才能使函数正常工作。 # # + origin_pos=13 tab=["pytorch"] def allreduce(data): for i in range(1, len(data)): data[0][:] += data[i].to(data[0].device) for i in range(1, len(data)): data[i][:] = data[0].to(data[i].device) # + [markdown] origin_pos=14 # 通过在不同设备上创建具有不同值的向量并聚合它们。 # # + origin_pos=16 tab=["pytorch"] data = [torch.ones((1, 2), device=d2l.try_gpu(i)) * (i + 1) for i in range(2)] print('allreduce之前:\n', data[0], '\n', data[1]) allreduce(data) print('allreduce之后:\n', data[0], '\n', data[1]) # + [markdown] origin_pos=17 # ## 数据分发 # # 我们需要一个简单的工具函数,[**将一个小批量数据均匀地分布在多个GPU上**]。 # 例如,有两个GPU时,我们希望每个GPU可以复制一半的数据。 # 因为深度学习框架的内置函数编写代码更方便、更简洁,所以在$4 \times 5$矩阵上使用它进行尝试。 # # + origin_pos=19 tab=["pytorch"] data = torch.arange(20).reshape(4, 5) devices = [torch.device('cuda:0'), torch.device('cuda:1')] split = nn.parallel.scatter(data, devices) print('input :', data) print('load into', devices) print('output:', split) # + [markdown] origin_pos=20 # 为了方便以后复用,我们定义了可以同时拆分数据和标签的`split_batch`函数。 # # + origin_pos=22 tab=["pytorch"] #@save def split_batch(X, y, devices): """将X和y拆分到多个设备上""" assert X.shape[0] == y.shape[0] return (nn.parallel.scatter(X, devices), nn.parallel.scatter(y, devices)) # + [markdown] origin_pos=23 # ## 训练 # # 现在我们可以[**在一个小批量上实现多GPU训练**]。 # 在多个GPU之间同步数据将使用刚才讨论的辅助函数`allreduce`和`split_and_load`。 # 我们不需要编写任何特定的代码来实现并行性。 # 因为计算图在小批量内的设备之间没有任何依赖关系,因此它是“自动地”并行执行。 # # + origin_pos=25 tab=["pytorch"] def train_batch(X, y, device_params, devices, lr): X_shards, y_shards = split_batch(X, y, devices) # 在每个GPU上分别计算损失 ls = [loss(lenet(X_shard, device_W), y_shard).sum() for X_shard, y_shard, device_W in zip( X_shards, y_shards, device_params)] for l in ls: # 反向传播在每个GPU上分别执行 l.backward() # 将每个GPU的所有梯度相加,并将其广播到所有GPU with torch.no_grad(): for i in range(len(device_params[0])): allreduce( [device_params[c][i].grad for c in range(len(devices))]) # 在每个GPU上分别更新模型参数 for param in device_params: d2l.sgd(param, lr, X.shape[0]) # 在这里,我们使用全尺寸的小批量 # + [markdown] origin_pos=26 # 现在,我们可以[**定义训练函数**]。 # 与前几章中略有不同:训练函数需要分配GPU并将所有模型参数复制到所有设备。 # 显然,每个小批量都是使用`train_batch`函数来处理多个GPU。 # 我们只在一个GPU上计算模型的精确度,而让其他GPU保持空闲,尽管这是相对低效的,但是使用方便且代码简洁。 # # + origin_pos=28 tab=["pytorch"] def train(num_gpus, batch_size, lr): train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) devices = [d2l.try_gpu(i) for i in range(num_gpus)] # 将模型参数复制到num_gpus个GPU device_params = [get_params(params, d) for d in devices] num_epochs = 10 animator = d2l.Animator('epoch', 'test acc', xlim=[1, num_epochs]) timer = d2l.Timer() for epoch in range(num_epochs): timer.start() for X, y in train_iter: # 为单个小批量执行多GPU训练 train_batch(X, y, device_params, devices, lr) torch.cuda.synchronize() timer.stop() # 在GPU0上评估模型 animator.add(epoch + 1, (d2l.evaluate_accuracy_gpu( lambda x: lenet(x, device_params[0]), test_iter, devices[0]),)) print(f'测试精度:{animator.Y[0][-1]:.2f},{timer.avg():.1f}秒/轮,' f'在{str(devices)}') # + [markdown] origin_pos=29 # 让我们看看[**在单个GPU上运行**]效果得有多好。 # 首先使用的批量大小是$256$,学习率是$0.2$。 # # + origin_pos=30 tab=["pytorch"] train(num_gpus=1, batch_size=256, lr=0.2) # + [markdown] origin_pos=31 # 保持批量大小和学习率不变,并[**增加为2个GPU**],我们可以看到测试精度与之前的实验基本相同。 # 不同的GPU个数在算法寻优方面是相同的。 # 不幸的是,这里没有任何有意义的加速:模型实在太小了;而且数据集也太小了,在这个数据集中,我们实现的多GPU训练的简单方法受到了巨大的Python开销的影响。 # 在未来,我们将遇到更复杂的模型和更复杂的并行化方法。 # 尽管如此,让我们看看Fashion-MNIST数据集上会发生什么。 # # + origin_pos=32 tab=["pytorch"] train(num_gpus=2, batch_size=256, lr=0.2) # + [markdown] origin_pos=33 # ## 小结 # # * 有多种方法可以在多个GPU上拆分深度网络的训练。拆分可以在层之间、跨层或跨数据上实现。前两者需要对数据传输过程进行严格编排,而最后一种则是最简单的策略。 # * 数据并行训练本身是不复杂的,它通过增加有效的小批量数据量的大小提高了训练效率。 # * 在数据并行中,数据需要跨多个GPU拆分,其中每个GPU执行自己的前向传播和反向传播,随后所有的梯度被聚合为一,之后聚合结果向所有的GPU广播。 # * 小批量数据量更大时,学习率也需要稍微提高一些。 # # ## 练习 # # 1. 在$k$个GPU上进行训练时,将批量大小从$b$更改为$k \cdot b$,即按GPU的数量进行扩展。 # 1. 比较不同学习率时模型的精确度,随着GPU数量的增加学习率应该如何扩展? # 1. 实现一个更高效的`allreduce`函数用于在不同的GPU上聚合不同的参数?为什么这样的效率更高? # 1. 实现模型在多GPU下测试精度的计算。 # # + [markdown] origin_pos=35 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/2800) #
pytorch/chapter_computational-performance/multiple-gpus.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Deep Q-Network implementation. # # This homework shamelessly demands you to implement DQN — an approximate Q-learning algorithm with experience replay and target networks — and see if it works any better this way. # # Original paper: # https://arxiv.org/pdf/1312.5602.pdf # **This notebook is the main notebook.** Another notebook is given for debug. (**homework_pytorch_main**). The tasks are similar and share most of the code. The main difference is in environments. In main notebook it can take some 2 hours for the agent to start improving so it seems reasonable to launch the algorithm on a simpler env first. In debug one it is CartPole and it will train in several minutes. # # **We suggest the following pipeline:** First implement debug notebook then implement the main one. # # **About evaluation:** All points are given for the main notebook with one exception: if agent fails to beat the threshold in main notebook you can get 1 pt (instead of 3 pts) for beating the threshold in debug notebook. # + import sys, os if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'): # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week04_approx_rl/atari_wrappers.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week04_approx_rl/utils.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week04_approx_rl/replay_buffer.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week04_approx_rl/framebuffer.py # !touch .setup_complete # This code creates a virtual display to draw game images on. # It will have no effect if your machine has a monitor. if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start os.environ['DISPLAY'] = ':1' # - # __Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for PyTorch, but you find it easy to adapt it to almost any Python-based deep learning framework. import random import numpy as np import torch import utils import gym import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # ### Let's play some old videogames # ![img](https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/nerd.png) # # This time we're gonna apply approximate Q-learning to an Atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. # ENV_NAME = "BreakoutNoFrameskip-v4" # ## Preprocessing (3 pts) # Let's see what observations look like. # + env = gym.make(ENV_NAME) env.reset() n_cols = 5 n_rows = 2 fig = plt.figure(figsize=(16, 9)) for row in range(n_rows): for col in range(n_cols): ax = fig.add_subplot(n_rows, n_cols, row * n_cols + col + 1) ax.imshow(env.render('rgb_array')) env.step(env.action_space.sample()) plt.show() # - # **Let's play a little.** # # Pay attention to zoom and fps args of play function. Control: A, D, space. # + # # Does not work in Colab. # # Use KeyboardInterrupt (Kernel → Interrupt in Jupyter) to continue. # from gym.utils.play import play # play(env=gym.make(ENV_NAME), zoom=5, fps=30) # - # ### Processing game image # # Raw Atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn from them. # # We can thus save a lot of time by preprocessing game image, including # * Resizing to a smaller shape, 64x64 # * Converting to grayscale # * Cropping irrelevant image parts (top, bottom and edges) # # Also please keep one dimension for channel so that final shape would be 1x64x64. # # Tip: You can implement your own grayscale converter and assign a huge weight to the red channel. This dirty trick is not necessary but it will speed up learning. # + from gym.core import ObservationWrapper from gym.spaces import Box class PreprocessAtariObs(ObservationWrapper): def __init__(self, env): """A gym wrapper that crops, scales image into the desired shapes and grayscales it.""" ObservationWrapper.__init__(self, env) self.img_size = (1, 64, 64) self.observation_space = Box(0.0, 1.0, self.img_size) def _to_gray_scale(self, rgb, channel_weights=[0.8, 0.1, 0.1]): <YOUR CODE> def observation(self, img): """what happens to each observation""" # Here's what you need to do: # * crop image, remove irrelevant parts # * resize image to self.img_size # (Use imresize from any library you want, # e.g. opencv, PIL, keras. Don't use skimage.imresize # because it is extremely slow.) # * cast image to grayscale # * convert image pixels to (0,1) range, float32 type <YOUR CODE> return <YOUR CODE> # + import gym # spawn game instance for tests env = gym.make(ENV_NAME) # create raw env env = PreprocessAtariObs(env) observation_shape = env.observation_space.shape n_actions = env.action_space.n env.reset() obs, _, _, _ = env.step(env.action_space.sample()) # test observation assert obs.ndim == 3, "observation must be [channel, h, w] even if there's just one channel" assert obs.shape == observation_shape, obs.shape assert obs.dtype == 'float32' assert len(np.unique(obs)) > 2, "your image must not be binary" assert 0 <= np.min(obs) and np.max( obs) <= 1, "convert image pixels to [0,1] range" assert np.max(obs) >= 0.5, "It would be easier to see a brighter observation" assert np.mean(obs) >= 0.1, "It would be easier to see a brighter observation" print("Formal tests seem fine. Here's an example of what you'll get.") n_cols = 5 n_rows = 2 fig = plt.figure(figsize=(16, 9)) obs = env.reset() for row in range(n_rows): for col in range(n_cols): ax = fig.add_subplot(n_rows, n_cols, row * n_cols + col + 1) ax.imshow(obs[0, :, :], interpolation='none', cmap='gray') obs, _, _, _ = env.step(env.action_space.sample()) plt.show() # - # ### Wrapping. # **About the game:** You have 5 lives and get points for breaking the wall. Higher bricks cost more than the lower ones. There are 4 actions: start game (should be called at the beginning and after each life is lost), move left, move right and do nothing. There are some common wrappers used for Atari environments. # + import atari_wrappers def PrimaryAtariWrap(env, clip_rewards=True): assert 'NoFrameskip' in env.spec.id # This wrapper holds the same action for <skip> frames and outputs # the maximal pixel value of 2 last frames (to handle blinking # in some envs) env = atari_wrappers.MaxAndSkipEnv(env, skip=4) # This wrapper sends done=True when each life is lost # (not all the 5 lives that are givern by the game rules). # It should make easier for the agent to understand that losing is bad. env = atari_wrappers.EpisodicLifeEnv(env) # This wrapper laucnhes the ball when an episode starts. # Without it the agent has to learn this action, too. # Actually it can but learning would take longer. env = atari_wrappers.FireResetEnv(env) # This wrapper transforms rewards to {-1, 0, 1} according to their sign if clip_rewards: env = atari_wrappers.ClipRewardEnv(env) # This wrapper is yours :) env = PreprocessAtariObs(env) return env # - # **Let's see if the game is still playable after applying the wrappers.** # At playing the EpisodicLifeEnv wrapper seems not to work but actually it does (because after when life finishes a new ball is dropped automatically - it means that FireResetEnv wrapper understands that a new episode began). # + # # Does not work in Colab. # # Use KeyboardInterrupt (Kernel → Interrupt in Jupyter) to continue. # from gym.utils.play import play # def make_play_env(): # env = gym.make(ENV_NAME) # env = PrimaryAtariWrap(env) # # in PyTorch images have shape [c, h, w] instead of common [h, w, c] # env = atari_wrappers.AntiTorchWrapper(env) # return env # play(make_play_env(), zoom=10, fps=3) # - # ### Frame buffer # # Our agent can only process one observation at a time, so we gotta make sure it contains enough information to find optimal actions. For instance, agent has to react to moving objects so it must be able to measure object's velocity. # # To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you, not really by the staff of the course :) # + from framebuffer import FrameBuffer def make_env(clip_rewards=True, seed=None): env = gym.make(ENV_NAME) # create raw env if seed is not None: env.seed(seed) env = PrimaryAtariWrap(env, clip_rewards) env = FrameBuffer(env, n_frames=4, dim_order='pytorch') return env env = make_env() env.reset() n_actions = env.action_space.n state_shape = env.observation_space.shape # + for _ in range(12): obs, _, _, _ = env.step(env.action_space.sample()) plt.figure(figsize=[12,10]) plt.title("Game image") plt.imshow(env.render("rgb_array")) plt.show() plt.figure(figsize=[15,15]) plt.title("Agent observation (4 frames top to bottom)") plt.imshow(utils.img_by_obs(obs, state_shape), cmap='gray') plt.show() # - # ## DQN as it is (4 pts) # ### Building a network # # We now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory. # # You can build any architecture you want, but for reference, here's something that will more or less work: # ![img](https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/dqn_arch.png) # **Dueling network: (+2 pts)** # $$Q_{\theta}(s, a) = V_{\eta}(f_{\xi}(s)) + A_{\psi}(f_{\xi}(s), a) - \frac{\sum_{a'}A_{\psi}(f_{\xi}(s), a')}{N_{actions}},$$ # where $\xi$, $\eta$, and $\psi$ are, respectively, the parameters of the # shared encoder $f_ξ$ , of the value stream $V_\eta$ , and of the advan # tage stream $A_\psi$; and $\theta = \{\xi, \eta, \psi\}$ is their concatenation. # # For the architecture on the image $V$ and $A$ heads can follow the dense layer instead of $Q$. Please don't worry that the model becomes a little bigger. import torch import torch.nn as nn device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # those who have a GPU but feel unfair to use it can uncomment: # device = torch.device('cpu') device def conv2d_size_out(size, kernel_size, stride): """ common use case: cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride) cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride) to understand the shape for dense layer's input """ return (size - (kernel_size - 1) - 1) // stride + 1 class DQNAgent(nn.Module): def __init__(self, state_shape, n_actions, epsilon=0): super().__init__() self.epsilon = epsilon self.n_actions = n_actions self.state_shape = state_shape # Define your network body here. Please make sure agent is fully contained here # nn.Flatten() can be useful <YOUR CODE> def forward(self, state_t): """ takes agent's observation (tensor), returns qvalues (tensor) :param state_t: a batch of 4-frame buffers, shape = [batch_size, 4, h, w] """ # Use your network to compute qvalues for given state qvalues = <YOUR CODE> assert qvalues.requires_grad, "qvalues must be a torch tensor with grad" assert ( len(qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == n_actions ) return qvalues def get_qvalues(self, states): """ like forward, but works on numpy arrays, not tensors """ model_device = next(self.parameters()).device states = torch.tensor(states, device=model_device, dtype=torch.float32) qvalues = self.forward(states) return qvalues.data.cpu().numpy() def sample_actions(self, qvalues): """pick actions given qvalues. Uses epsilon-greedy exploration strategy. """ epsilon = self.epsilon batch_size, n_actions = qvalues.shape random_actions = np.random.choice(n_actions, size=batch_size) best_actions = qvalues.argmax(axis=-1) should_explore = np.random.choice( [0, 1], batch_size, p=[1-epsilon, epsilon]) return np.where(should_explore, random_actions, best_actions) agent = DQNAgent(state_shape, n_actions, epsilon=0.5).to(device) # Now let's try out our agent to see if it raises any errors. def evaluate(env, agent, n_games=1, greedy=False, t_max=10000): """ Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """ rewards = [] for _ in range(n_games): s = env.reset() reward = 0 for _ in range(t_max): qvalues = agent.get_qvalues([s]) action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0] s, r, done, _ = env.step(action) reward += r if done: break rewards.append(reward) return np.mean(rewards) evaluate(env, agent, n_games=1) # ### Experience replay # For this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here **to get 2 bonus points**. # # ![img](https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/exp_replay.png) # #### The interface is fairly simple: # * `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer # * `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples. # * `len(exp_replay)` - returns number of elements stored in replay buffer. # + from replay_buffer import ReplayBuffer exp_replay = ReplayBuffer(10) for _ in range(30): exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False) obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5) assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is" # - def play_and_record(initial_state, agent, env, exp_replay, n_steps=1): """ Play the game for exactly n_steps, record every (s,a,r,s', done) to replay buffer. Whenever game ends, add record with done=True and reset the game. It is guaranteed that env has done=False when passed to this function. PLEASE DO NOT RESET ENV UNLESS IT IS "DONE" :returns: return sum of rewards over time and the state in which the env stays """ s = initial_state sum_rewards = 0 # Play the game for n_steps as per instructions above <YOUR CODE> return sum_rewards, s # + # testing your code. exp_replay = ReplayBuffer(2000) state = env.reset() play_and_record(state, agent, env, exp_replay, n_steps=1000) # if you're using your own experience replay buffer, some of those tests may need correction. # just make sure you know what your code does assert len(exp_replay) == 1000, \ "play_and_record should have added exactly 1000 steps, " \ "but instead added %i" % len(exp_replay) is_dones = list(zip(*exp_replay._storage))[-1] assert 0 < np.mean(is_dones) < 0.1, \ "Please make sure you restart the game whenever it is 'done' and " \ "record the is_done correctly into the buffer. Got %f is_done rate over " \ "%i steps. [If you think it's your tough luck, just re-run the test]" % ( np.mean(is_dones), len(exp_replay)) for _ in range(100): obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10) assert obs_batch.shape == next_obs_batch.shape == (10,) + state_shape assert act_batch.shape == (10,), \ "actions batch should have shape (10,) but is instead %s" % str(act_batch.shape) assert reward_batch.shape == (10,), \ "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape) assert is_done_batch.shape == (10,), \ "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape) assert [int(i) in (0, 1) for i in is_dones], \ "is_done should be strictly True or False" assert [0 <= a < n_actions for a in act_batch], "actions should be within [0, n_actions)" print("Well done!") # - # ### Target networks # # We also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values: # # The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often. # # $$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$ # # ![img](https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/target_net.png) target_network = DQNAgent(agent.state_shape, agent.n_actions, epsilon=0.5).to(device) # This is how you can load weights from agent into target network target_network.load_state_dict(agent.state_dict()) # ### Learning with... Q-learning # Here we write a function similar to `agent.update` from tabular q-learning. # Compute Q-learning TD error: # # $$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$ # # With Q-reference defined as # # $$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$ # # Where # * $Q_{target}(s',a')$ denotes Q-value of next state and next action predicted by __target_network__ # * $s, a, r, s'$ are current state, action, reward and next state respectively # * $\gamma$ is a discount factor defined two cells above. # # # __Note 1:__ there's an example input below. Feel free to experiment with it before you write the function. # # __Note 2:__ compute_td_loss is a source of 99% of bugs in this homework. If reward doesn't improve, it often helps to go through it line by line [with a rubber duck](https://rubberduckdebugging.com/). # # **Double DQN (+2 pts)** # # $$ Q_{reference}(s,a) = r(s, a) + \gamma \cdot # Q_{target}(s',argmax_{a'}Q_\theta(s', a')) $$ def compute_td_loss(states, actions, rewards, next_states, is_done, agent, target_network, gamma=0.99, check_shapes=False, device=device): """ Compute td loss using torch operations only. Use the formulae above. """ states = torch.tensor(states, device=device, dtype=torch.float32) # shape: [batch_size, *state_shape] actions = torch.tensor(actions, device=device, dtype=torch.int64) # shape: [batch_size] rewards = torch.tensor(rewards, device=device, dtype=torch.float32) # shape: [batch_size] # shape: [batch_size, *state_shape] next_states = torch.tensor(next_states, device=device, dtype=torch.float) is_done = torch.tensor( is_done.astype('float32'), device=device, dtype=torch.float32, ) # shape: [batch_size] is_not_done = 1 - is_done # get q-values for all actions in current states predicted_qvalues = agent(states) # shape: [batch_size, n_actions] # compute q-values for all actions in next states predicted_next_qvalues = target_network(next_states) # shape: [batch_size, n_actions] # select q-values for chosen actions predicted_qvalues_for_actions = predicted_qvalues[range(len(actions)), actions] # shape: [batch_size] # compute V*(next_states) using predicted next q-values next_state_values = <YOUR CODE> assert next_state_values.dim() == 1 and next_state_values.shape[0] == states.shape[0], \ "must predict one value per state" # compute "target q-values" for loss - it's what's inside square parentheses in the above formula. # at the last state use the simplified formula: Q(s,a) = r(s,a) since s' doesn't exist # you can multiply next state values by is_not_done to achieve this. target_qvalues_for_actions = <YOUR CODE> # mean squared error loss to minimize loss = torch.mean((predicted_qvalues_for_actions - target_qvalues_for_actions.detach()) ** 2) if check_shapes: assert predicted_next_qvalues.data.dim() == 2, \ "make sure you predicted q-values for all actions in next state" assert next_state_values.data.dim() == 1, \ "make sure you computed V(s') as maximum over just the actions axis and not all axes" assert target_qvalues_for_actions.data.dim() == 1, \ "there's something wrong with target q-values, they must be a vector" return loss # Sanity checks # + obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10) loss = compute_td_loss(obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, agent, target_network, gamma=0.99, check_shapes=True) loss.backward() assert loss.requires_grad and tuple(loss.data.size()) == (), \ "you must return scalar loss - mean over batch" assert np.any(next(agent.parameters()).grad.data.cpu().numpy() != 0), \ "loss must be differentiable w.r.t. network weights" assert np.all(next(target_network.parameters()).grad is None), \ "target network should not have grads" # - # ## Main loop (3 pts) # # **If deadline is tonight and it has not converged:** It is ok. Send the notebook today and when it converges send it again. # If the code is exactly the same points will not be discounted. # # It's time to put everything together and see if it learns anything. from tqdm import trange from IPython.display import clear_output import matplotlib.pyplot as plt seed = <YOUR CODE: your favourite random seed> random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) # + env = make_env(seed) state_shape = env.observation_space.shape n_actions = env.action_space.n state = env.reset() agent = DQNAgent(state_shape, n_actions, epsilon=1).to(device) target_network = DQNAgent(state_shape, n_actions).to(device) target_network.load_state_dict(agent.state_dict()) # - # Buffer of size $10^4$ fits into 5 Gb RAM. # # Larger sizes ($10^5$ and $10^6$ are common) can be used. It can improve the learning, but $10^4$ is quite enough. $10^2$ will probably fail learning. # + REPLAY_BUFFER_SIZE = 10**4 N_STEPS = 100 exp_replay = ReplayBuffer(REPLAY_BUFFER_SIZE) for i in trange(REPLAY_BUFFER_SIZE // N_STEPS): if not utils.is_enough_ram(min_available_gb=0.1): print(""" Less than 100 Mb RAM available. Make sure the buffer size in not too huge. Also check, maybe other processes consume RAM heavily. """ ) break play_and_record(state, agent, env, exp_replay, n_steps=N_STEPS) if len(exp_replay) == REPLAY_BUFFER_SIZE: break print(len(exp_replay)) # + timesteps_per_epoch = 1 batch_size = 16 total_steps = 3 * 10**6 decay_steps = 10**6 opt = torch.optim.Adam(agent.parameters(), lr=1e-4) init_epsilon = 1 final_epsilon = 0.1 loss_freq = 50 refresh_target_network_freq = 5000 eval_freq = 5000 max_grad_norm = 50 n_lives = 5 # - mean_rw_history = [] td_loss_history = [] grad_norm_history = [] initial_state_v_history = [] step = 0 # + import time def wait_for_keyboard_interrupt(): try: while True: time.sleep(1) except KeyboardInterrupt: pass # - state = env.reset() with trange(step, total_steps + 1) as progress_bar: for step in progress_bar: if not utils.is_enough_ram(): print('less that 100 Mb RAM available, freezing') print('make sure everything is ok and use KeyboardInterrupt to continue') wait_for_keyboard_interrupt() agent.epsilon = utils.linear_decay(init_epsilon, final_epsilon, step, decay_steps) # play _, state = play_and_record(state, agent, env, exp_replay, timesteps_per_epoch) # train <YOUR CODE: sample batch_size of data from experience replay> loss = <YOUR CODE: compute TD loss> loss.backward() grad_norm = nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm) opt.step() opt.zero_grad() if step % loss_freq == 0: td_loss_history.append(loss.data.cpu().item()) grad_norm_history.append(grad_norm.cpu()) if step % refresh_target_network_freq == 0: # Load agent weights into target_network <YOUR CODE> if step % eval_freq == 0: mean_rw_history.append(evaluate( make_env(clip_rewards=True, seed=step), agent, n_games=3 * n_lives, greedy=True) ) initial_state_q_values = agent.get_qvalues( [make_env(seed=step).reset()] ) initial_state_v_history.append(np.max(initial_state_q_values)) clear_output(True) print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon)) plt.figure(figsize=[16, 9]) plt.subplot(2, 2, 1) plt.title("Mean reward per life") plt.plot(mean_rw_history) plt.grid() assert not np.isnan(td_loss_history[-1]) plt.subplot(2, 2, 2) plt.title("TD loss history (smoothened)") plt.plot(utils.smoothen(td_loss_history)) plt.grid() plt.subplot(2, 2, 3) plt.title("Initial state V") plt.plot(initial_state_v_history) plt.grid() plt.subplot(2, 2, 4) plt.title("Grad norm history (smoothened)") plt.plot(utils.smoothen(grad_norm_history)) plt.grid() plt.show() # Agent is evaluated for 1 life, not for a whole episode of 5 lives. Rewards in evaluation are also truncated. Cuz this is what environment the agent is learning in and in this way mean rewards per life can be compared with initial state value # # **The goal is to get 15 points in the real env**. So 3 or better 4 points in the preprocessed one will probably be enough. You can interrupt learning then. # Final scoring is done on a whole episode with all 5 lives. final_score = evaluate( make_env(clip_rewards=False, seed=9), agent, n_games=30, greedy=True, t_max=10 * 1000 ) print('final score:', final_score) assert final_score >= 3, 'not as cool as DQN can' print('Cool!') # ## How to interpret plots: # # This aint no supervised learning so don't expect anything to improve monotonously. # * **TD loss** is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance. # * **grad norm** just shows the intensivity of training. Not ok is growing to values of about 100 (or maybe even 50) though it depends on network architecture. # * **mean reward** is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). # * In basic q-learning implementation it takes about 40k steps to "warm up" agent before it starts to get better. # * **Initial state V** is the expected discounted reward for episode in the oppinion of the agent. It should behave more smoothly than **mean reward**. It should get higher over time but sometimes can experience drawdowns because of the agaent's overestimates. # * **buffer size** - this one is simple. It should go up and cap at max size. # * **epsilon** - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - it means you need to increase epsilon. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down. # * Smoothing of plots is done with a gaussian kernel # # At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points. # # **Training will take time.** A lot of it actually. Probably you will not see any improvment during first **150k** time steps (note that by default in this notebook agent is evaluated every 5000 time steps). # # But hey, long training time isn't _that_ bad: # ![img](https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/training.png) # ## About hyperparameters: # # The task has something in common with supervised learning: loss is optimized through the buffer (instead of Train dataset). But the distribution of states and actions in the buffer **is not stationary** and depends on the policy that generated it. It can even happen that the mean TD error across the buffer is very low but the performance is extremely poor (imagine the agent collecting data to the buffer always manages to avoid the ball). # # * Total timesteps and training time: It seems to be so huge, but actually it is normal for RL. # # * $\epsilon$ decay shedule was taken from the original paper and is like traditional for epsilon-greedy policies. At the beginning of the training the agent's greedy policy is poor so many random actions should be taken. # # * Optimizer: In the original paper RMSProp was used (they did not have Adam in 2013) and it can work not worse than Adam. For us Adam was default and it worked. # # * lr: $10^{-3}$ would probably be too huge # # * batch size: This one can be very important: if it is too small the agent can fail to learn. Huge batch takes more time to process. If batch of size 8 can not be processed on the hardware you use take 2 (or even 4) batches of size 4, divide the loss on them by 2 (or 4) and make optimization step after both backward() calls in torch. # # * target network update frequency: has something in common with learning rate. Too frequent updates can lead to divergence. Too rare can lead to slow leraning. For millions of total timesteps thousands of inner steps seem ok. One iteration of target network updating is an iteration of the (this time approximate) $\gamma$-compression that stands behind Q-learning. The more inner steps it makes the more accurate is the compression. # * max_grad_norm - just huge enough. In torch clip_grad_norm also evaluates the norm before clipping and it can be convenient for logging. # ### Video # + # Record sessions import gym.wrappers with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor: sessions = [evaluate(env_monitor, agent, n_games=n_lives, greedy=True) for _ in range(10)] # + # Show video. This may not work in some setups. If it doesn't # work for you, you can download the videos and view them locally. from pathlib import Path from base64 import b64encode from IPython.display import HTML video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4']) video_path = video_paths[-1] # You can also try other indices if 'google.colab' in sys.modules: # https://stackoverflow.com/a/57378660/1214547 with video_path.open('rb') as fp: mp4 = fp.read() data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode() else: data_url = str(video_path) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format(data_url)) # - # ## Let's have a closer look at this. # # If average episode score is below 200 using all 5 lives, then probably DQN has not converged fully. But anyway let's make a more complete record of an episode. eval_env = make_env(clip_rewards=False) record = utils.play_and_log_episode(eval_env, agent) print('total reward for life:', np.sum(record['rewards'])) for key in record: print(key) # + fig = plt.figure(figsize=(5, 5)) ax = fig.add_subplot(1, 1, 1) ax.scatter(record['v_mc'], record['v_agent']) ax.plot(sorted(record['v_mc']), sorted(record['v_mc']), 'black', linestyle='--', label='x=y') ax.grid() ax.legend() ax.set_title('State Value Estimates') ax.set_xlabel('Monte-Carlo') ax.set_ylabel('Agent') plt.show() # - # $\hat V_{Monte-Carlo}(s_t) = \sum_{\tau=0}^{episode~end} \gamma^{\tau-t}r_t$ # Is there a big bias? It's ok, anyway it works. # ## Bonus I (2 pts) # **1.** Plot several (say 3) states with high and low spreads of Q estimate by actions i.e. # $$\max_a \hat Q(s,a) - \min_a \hat Q(s,a)\$$ # Please take those states from different episodes to make sure that the states are really different. # # What should high and low spread mean at least in the world of perfect Q-fucntions? # # Comment the states you like most. # # **2.** Plot several (say 3) states with high td-error and several states with high values of # $$| \hat V_{Monte-Carlo}(s) - \hat V_{agent}(s)|,$$ # $$\hat V_{agent}(s)=\max_a \hat Q(s,a).$$ Please take those states from different episodes to make sure that the states are really different. From what part (i.e. beginning, middle, end) of an episode did these states come from? # # Comment the states you like most. # + from utils import play_and_log_episode, img_by_obs <YOUR CODE> # - # ## Bonus II (1-5 pts). Get High Score! # # 1 point to you for each 50 points of your agent. Truncated by 5 points. Starting with 50 points, **not** 50 + threshold. # # One way is to train for several days and use heavier hardware (why not actually). # # Another way is to apply modifications (see **Bonus III**). # ## Bonus III (2+ pts). Apply modifications to DQN. # # For inspiration see [Rainbow](https://arxiv.org/abs/1710.02298) - a version of q-learning that combines lots of them. # # Points for Bonus II and Bonus III fully stack. So if modified agent gets score 250+ you get 5 pts for Bonus II + points for modifications. If the final score is 40 then you get the points for modifications. # # # Some modifications: # * [Prioritized experience replay](https://arxiv.org/abs/1511.05952) (5 pts for your own implementation, 3 pts for using a ready one) # * [double q-learning](https://arxiv.org/abs/1509.06461) (2 pts) # * [dueling q-learning](https://arxiv.org/abs/1511.06581) (2 pts) # * multi-step heuristics (see [Rainbow](https://arxiv.org/abs/1710.02298)) (3 pts) # * [Noisy Nets](https://arxiv.org/abs/1706.10295) (3 pts) # * [distributional RL](https://arxiv.org/abs/1707.06887)(distributional and distributed stand for different things here) (5 pts) # * Other modifications (2+ pts depending on complexity) # ## Bonus IV (4+ pts). Distributed RL. # # Solve the task in a distributed way. It can strongly speed up learning. See [article](https://arxiv.org/pdf/1602.01783.pdf) or some guides. # **As usual bonus points for all the tasks fully stack.**
week04_approx_rl/homework_pytorch_main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.1 64-bit (''pyUdemy'': conda)' # name: python38164bitpyudemyconda8c705f49a8e643418ce4b1ca64c8ab63 # --- # + # 'is' and 'is not': memory equality # '==' and '!=': value equality # Is: Identity operator (memory adresses) # Value Equality: Operator to compare object values # - my_value1 = 10 # int(10) my_value2 = 10.0 # float(10.0) if my_value1 == my_value2: print("my_value1 == my_value2") if my_value1 is my_value2: print("my_value1 is my_value2") my_list1 = [1, 2] my_list2 = [1, 2] if my_list1 == my_list2: print("my_list1 == my_list2") if my_list1 is my_list2: print("my_value1 is my_list2")
Chapter3_BasicFeatures/Logic/is_vs_equality.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="EEjL0YblACJl" colab_type="text" # ## > Import Packages # + id="Yp1QzZA2FqrR" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1600709140508, "user_tz": -330, "elapsed": 4151, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} ## Import packages from scipy.io import loadmat from sklearn import preprocessing from tabulate import tabulate from sklearn.multiclass import OneVsRestClassifier from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import cross_val_score, GridSearchCV import matplotlib.patches as mpatches from matplotlib import pyplot as plt from skimage.color import label2rgb from sklearn.svm import SVC from sklearn import metrics from sklearn import svm import pandas as pd import numpy as np import statistics import math import time import sys ## Import DL import keras from keras.layers.core import Dense, Dropout, Activation # Types of layers to be used in our model from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Conv2D, MaxPool2D , Conv1D, Flatten, MaxPooling1D from keras.models import Sequential # + id="QEPgmFP3FWIf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1600709216295, "user_tz": -330, "elapsed": 79911, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="16f7e2f8-e560-4979-c7da-38a1fc966eb8" ## Mounting Google Drive from google.colab import drive drive.mount('/content/drive') # + [markdown] id="udbKmgi3AJj_" colab_type="text" # ## > Load Data # + id="v529PL_eqbXK" colab_type="code" colab={} i=1 # + id="4Dw0glz2GTzR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} executionInfo={"status": "error", "timestamp": 1600709233362, "user_tz": -330, "elapsed": 4667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="30325872-2494-43cf-d92e-5f01e367f23c" ## Load Data img = loadmat('/content/drive/My Drive/Major_Project/Data/PaviaU.mat') img_gt = loadmat('/content/drive/My Drive/Major_Project/Data/PaviaU_gt.mat') # img = loadmat('/content/drive/My Drive/Major_Project/Data/Indian_Pines.mat') # img_gt = loadmat('/content/drive/My Drive/Major_Project/Data/Indian_Pines_gt.mat') img_dr = np.load('/content/drive/My Drive/Major_Project/Test_Results3/test3_reduced_img_25.npy') # img_dr = np.load('/content/drive/My Drive/Major_Project/Supervised_Results/Indian_Pines/reduced_img_f_dr_25.npy') # img_dr = np.load('/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/reduced_img_f_dr_28.npy') img = img['paviaU'] gt = img_gt['paviaU_gt'] # img = img['indian_pines_corrected'] # gt = img_gt['indian_pines_gt'] height, width, bands = img.shape[0], img.shape[1], img.shape[2] # img = np.reshape(img, [height*width, bands]) # img_gt = np.reshape(gt, [height*width,]) # plt.figure(2) # plt.plot(img[0,]) # plt.title('Spectral Response Curve') # plt.xlabel('Band Number') # plt.ylabel('Reflectance') # plt.savefig('SRC.png',dpi=300) # num_classes = len(np.unique(gt)) # mymap = plt.get_cmap('parula', 10) # fig, ax = plt.figure() # heatmap = ax.pcolor(data, cmap=parula) #legend # cbar = plt.colorbar(heatmap) # plt.figure() fig, (ax1, ax) = plt.subplots(nrows=1, ncols=2) # plt.imshow(gt,cmap='viridis', interpolation=None) lab = ['Background', 'Asphalt - 6631', 'Meadows - 18649', 'Gravel - 2099', 'Trees - 3064', 'Painted metal sheets - 1345', 'Bare Soil - 5029', 'Bitumen - 1330', 'Self-Blocking Bricks - 3682','Shadows - 947'] ax1.imshow(img[:,:,50], cmap='gray') ax1.set_axis_off() cax = ax.imshow(g t, interpolation='nearest', cmap=parula_map) ax.set_axis_off() # fig.add_axes(ax1) # fig.add_axes(ax) t = np.linspace(0.5,8.5,10) print(t.shape) # Add colorbar, make sure to specify tick locations to match desired ticklabels cbar = fig.colorbar(cax, ticks=t) cbar.ax.set_yticklabels(lab, va="center") # vertically oriented colorbar # plt.colorbar() plt.savefig('PaviaU_label.png',dpi=300, bbox_inches='tight') #reshaping image to vector img = np.reshape(img, [height*width, bands]) img_gt = np.reshape(gt, [height*width,]) #checking background pixels - they will be omitted later from training data background = img_gt[img_gt == 0] #sanity checks print("Number of Label Classes: ", num_classes) print("Image size: ", img.shape) # print("Reduced Image size: ", img_dr.shape) print("Label size: ", img_gt.shape) print("Background Pixels: ", background.shape) print("\n Class distribution: => ") print(tabulate(np.unique(img_gt, return_counts=True), tablefmt="grid")) # plt.figure(1) # plt.imshow(gt) # + id="uxziwToiS5N9" colab_type="code" colab={} # + id="dOpmGW-CCqt0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 717} executionInfo={"status": "ok", "timestamp": 1599929922318, "user_tz": -330, "elapsed": 2738, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="65718fe6-3f2a-40e7-8f83-41521cb38766" ## Load Data img = loadmat('/content/drive/My Drive/Major_Project/Data/Indian_Pines.mat') img_gt = loadmat('/content/drive/My Drive/Major_Project/Data/Indian_Pines_gt.mat') # img_dr = np.load('/content/drive/My Drive/Major_Project/Test_Results3/test3_reduced_img_25.npy') # img_dr = np.load('/content/drive/My Drive/Major_Project/Supervised_Results/Indian_Pines/reduced_img_f_dr_25.npy') # img_dr = np.load('/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/reduced_img_f_dr_28.npy') img = img['indian_pines_corrected'] gt = img_gt['indian_pines_gt'] height, width, bands = img.shape[0], img.shape[1], img.shape[2] num_classes = len(np.unique(gt)) # mymap = plt.get_cmap('parula', 10) # fig, ax = plt.figure() # heatmap = ax.pcolor(data, cmap=parula) #legend # cbar = plt.colorbar(heatmap) # plt.figure() fig, ax = plt.subplots() lab = [ 'Alfalfa - 46', 'Corn-notill - 1428', 'Corn-mintill - 830', 'Corn - 237', 'Grass-pasture - 483', 'Grass-trees - 730', 'Grass-pasture-mowed - 28', 'Hay-windrowed - 478', 'Oats - 20', 'Soybean-notill - 972', 'Soybean-mintill - 2455', 'Soybean-clean - 593', 'Wheat - 205', 'Woods - 1265', 'Buildings-Grass-Trees-Drives - 386', 'Stone-Steel-Towers - 93'] cax = ax.imshow(gt, interpolation='nearest', cmap=parula_map) ax.set_axis_off() fig.add_axes(ax) t = np.arange(1)+0.5 # Add colorbar, make sure to specify tick locations to match desired ticklabels cbar = fig.colorbar(cax, ticks=t) cbar.ax.set_yticklabels(lab) # vertically oriented colorbar # plt.colorbar() plt.savefig('PaviaU_label.png',dpi=300, bbox_inches='tight') #reshaping image to vector img = np.reshape(img, [height*width, bands]) img_gt = np.reshape(gt, [height*width,]) #checking background pixels - they will be omitted later from training data background = img_gt[img_gt == 0] #sanity checks print("Number of Label Classes: ", num_classes) print("Image size: ", img.shape) # print("Reduced Image size: ", img_dr.shape) print("Label size: ", img_gt.shape) print("Background Pixels: ", background.shape) print("\n Class distribution: => ") print(tabulate(np.unique(img_gt, return_counts=True), tablefmt="grid")) # plt.figure(1) # plt.imshow(gt) plt.figure(2) plt.plot(img[0,]) plt.title('Spectral Response Curve') plt.xlabel('Wavelenth Number') plt.ylabel('Reflectance') plt.savefig('SRC.png',dpi=300) # + id="d2AKTnND71xH" colab_type="code" colab={} from matplotlib.colors import LinearSegmentedColormap cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905], [0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143], [0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952, 0.779247619], [0.1252714286, 0.3242428571, 0.8302714286], [0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238, 0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571], [0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571, 0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429], [0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667, 0.8467], [0.0779428571, 0.5039857143, 0.8383714286], [0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571, 0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429], [0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524, 0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048, 0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667], [0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381, 0.7607190476], [0.0383714286, 0.6742714286, 0.743552381], [0.0589714286, 0.6837571429, 0.7253857143], [0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429], [0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429, 0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048], [0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619, 0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667], [0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524, 0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905], [0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476, 0.4493904762], [0.609852381, 0.7473142857, 0.4336857143], [0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333], [0.7184095238, 0.7411333333, 0.3904761905], [0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667, 0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762], [0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217], [0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857, 0.2886428571], [0.9738952381, 0.7313952381, 0.266647619], [0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857, 0.2164142857], [0.9955333333, 0.7860571429, 0.196652381], [0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857], [0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309], [0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333, 0.0948380952], [0.9661, 0.9514428571, 0.0755333333], [0.9763, 0.9831, 0.0538]] parula_map = LinearSegmentedColormap.from_list('parula', cm_data, N=10) # For use of "viscm view" # + id="O2Arhajz4o8Z" colab_type="code" colab={} _parula_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905], [0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143], [0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952, 0.779247619], [0.1252714286, 0.3242428571, 0.8302714286], [0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238, 0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571], [0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571, 0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429], [0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667, 0.8467], [0.0779428571, 0.5039857143, 0.8383714286], [0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571, 0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429], [0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524, 0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048, 0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667], [0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381, 0.7607190476], [0.0383714286, 0.6742714286, 0.743552381], [0.0589714286, 0.6837571429, 0.7253857143], [0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429], [0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429, 0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048], [0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619, 0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667], [0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524, 0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905], [0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476, 0.4493904762], [0.609852381, 0.7473142857, 0.4336857143], [0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333], [0.7184095238, 0.7411333333, 0.3904761905], [0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667, 0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762], [0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217], [0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857, 0.2886428571], [0.9738952381, 0.7313952381, 0.266647619], [0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857, 0.2164142857], [0.9955333333, 0.7860571429, 0.196652381], [0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857], [0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309], [0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333, 0.0948380952], [0.9661, 0.9514428571, 0.0755333333], [0.9763, 0.9831, 0.0538]] _viridis_data = [[0.267004, 0.004874, 0.329415], [0.268510, 0.009605, 0.335427], [0.269944, 0.014625, 0.341379], [0.271305, 0.019942, 0.347269], [0.272594, 0.025563, 0.353093], [0.273809, 0.031497, 0.358853], [0.274952, 0.037752, 0.364543], [0.276022, 0.044167, 0.370164], [0.277018, 0.050344, 0.375715], [0.277941, 0.056324, 0.381191], [0.278791, 0.062145, 0.386592], [0.279566, 0.067836, 0.391917], [0.280267, 0.073417, 0.397163], [0.280894, 0.078907, 0.402329], [0.281446, 0.084320, 0.407414], [0.281924, 0.089666, 0.412415], [0.282327, 0.094955, 0.417331], [0.282656, 0.100196, 0.422160], [0.282910, 0.105393, 0.426902], [0.283091, 0.110553, 0.431554], [0.283197, 0.115680, 0.436115], [0.283229, 0.120777, 0.440584], [0.283187, 0.125848, 0.444960], [0.283072, 0.130895, 0.449241], [0.282884, 0.135920, 0.453427], [0.282623, 0.140926, 0.457517], [0.282290, 0.145912, 0.461510], [0.281887, 0.150881, 0.465405], [0.281412, 0.155834, 0.469201], [0.280868, 0.160771, 0.472899], [0.280255, 0.165693, 0.476498], [0.279574, 0.170599, 0.479997], [0.278826, 0.175490, 0.483397], [0.278012, 0.180367, 0.486697], [0.277134, 0.185228, 0.489898], [0.276194, 0.190074, 0.493001], [0.275191, 0.194905, 0.496005], [0.274128, 0.199721, 0.498911], [0.273006, 0.204520, 0.501721], [0.271828, 0.209303, 0.504434], [0.270595, 0.214069, 0.507052], [0.269308, 0.218818, 0.509577], [0.267968, 0.223549, 0.512008], [0.266580, 0.228262, 0.514349], [0.265145, 0.232956, 0.516599], [0.263663, 0.237631, 0.518762], [0.262138, 0.242286, 0.520837], [0.260571, 0.246922, 0.522828], [0.258965, 0.251537, 0.524736], [0.257322, 0.256130, 0.526563], [0.255645, 0.260703, 0.528312], [0.253935, 0.265254, 0.529983], [0.252194, 0.269783, 0.531579], [0.250425, 0.274290, 0.533103], [0.248629, 0.278775, 0.534556], [0.246811, 0.283237, 0.535941], [0.244972, 0.287675, 0.537260], [0.243113, 0.292092, 0.538516], [0.241237, 0.296485, 0.539709], [0.239346, 0.300855, 0.540844], [0.237441, 0.305202, 0.541921], [0.235526, 0.309527, 0.542944], [0.233603, 0.313828, 0.543914], [0.231674, 0.318106, 0.544834], [0.229739, 0.322361, 0.545706], [0.227802, 0.326594, 0.546532], [0.225863, 0.330805, 0.547314], [0.223925, 0.334994, 0.548053], [0.221989, 0.339161, 0.548752], [0.220057, 0.343307, 0.549413], [0.218130, 0.347432, 0.550038], [0.216210, 0.351535, 0.550627], [0.214298, 0.355619, 0.551184], [0.212395, 0.359683, 0.551710], [0.210503, 0.363727, 0.552206], [0.208623, 0.367752, 0.552675], [0.206756, 0.371758, 0.553117], [0.204903, 0.375746, 0.553533], [0.203063, 0.379716, 0.553925], [0.201239, 0.383670, 0.554294], [0.199430, 0.387607, 0.554642], [0.197636, 0.391528, 0.554969], [0.195860, 0.395433, 0.555276], [0.194100, 0.399323, 0.555565], [0.192357, 0.403199, 0.555836], [0.190631, 0.407061, 0.556089], [0.188923, 0.410910, 0.556326], [0.187231, 0.414746, 0.556547], [0.185556, 0.418570, 0.556753], [0.183898, 0.422383, 0.556944], [0.182256, 0.426184, 0.557120], [0.180629, 0.429975, 0.557282], [0.179019, 0.433756, 0.557430], [0.177423, 0.437527, 0.557565], [0.175841, 0.441290, 0.557685], [0.174274, 0.445044, 0.557792], [0.172719, 0.448791, 0.557885], [0.171176, 0.452530, 0.557965], [0.169646, 0.456262, 0.558030], [0.168126, 0.459988, 0.558082], [0.166617, 0.463708, 0.558119], [0.165117, 0.467423, 0.558141], [0.163625, 0.471133, 0.558148], [0.162142, 0.474838, 0.558140], [0.160665, 0.478540, 0.558115], [0.159194, 0.482237, 0.558073], [0.157729, 0.485932, 0.558013], [0.156270, 0.489624, 0.557936], [0.154815, 0.493313, 0.557840], [0.153364, 0.497000, 0.557724], [0.151918, 0.500685, 0.557587], [0.150476, 0.504369, 0.557430], [0.149039, 0.508051, 0.557250], [0.147607, 0.511733, 0.557049], [0.146180, 0.515413, 0.556823], [0.144759, 0.519093, 0.556572], [0.143343, 0.522773, 0.556295], [0.141935, 0.526453, 0.555991], [0.140536, 0.530132, 0.555659], [0.139147, 0.533812, 0.555298], [0.137770, 0.537492, 0.554906], [0.136408, 0.541173, 0.554483], [0.135066, 0.544853, 0.554029], [0.133743, 0.548535, 0.553541], [0.132444, 0.552216, 0.553018], [0.131172, 0.555899, 0.552459], [0.129933, 0.559582, 0.551864], [0.128729, 0.563265, 0.551229], [0.127568, 0.566949, 0.550556], [0.126453, 0.570633, 0.549841], [0.125394, 0.574318, 0.549086], [0.124395, 0.578002, 0.548287], [0.123463, 0.581687, 0.547445], [0.122606, 0.585371, 0.546557], [0.121831, 0.589055, 0.545623], [0.121148, 0.592739, 0.544641], [0.120565, 0.596422, 0.543611], [0.120092, 0.600104, 0.542530], [0.119738, 0.603785, 0.541400], [0.119512, 0.607464, 0.540218], [0.119423, 0.611141, 0.538982], [0.119483, 0.614817, 0.537692], [0.119699, 0.618490, 0.536347], [0.120081, 0.622161, 0.534946], [0.120638, 0.625828, 0.533488], [0.121380, 0.629492, 0.531973], [0.122312, 0.633153, 0.530398], [0.123444, 0.636809, 0.528763], [0.124780, 0.640461, 0.527068], [0.126326, 0.644107, 0.525311], [0.128087, 0.647749, 0.523491], [0.130067, 0.651384, 0.521608], [0.132268, 0.655014, 0.519661], [0.134692, 0.658636, 0.517649], [0.137339, 0.662252, 0.515571], [0.140210, 0.665859, 0.513427], [0.143303, 0.669459, 0.511215], [0.146616, 0.673050, 0.508936], [0.150148, 0.676631, 0.506589], [0.153894, 0.680203, 0.504172], [0.157851, 0.683765, 0.501686], [0.162016, 0.687316, 0.499129], [0.166383, 0.690856, 0.496502], [0.170948, 0.694384, 0.493803], [0.175707, 0.697900, 0.491033], [0.180653, 0.701402, 0.488189], [0.185783, 0.704891, 0.485273], [0.191090, 0.708366, 0.482284], [0.196571, 0.711827, 0.479221], [0.202219, 0.715272, 0.476084], [0.208030, 0.718701, 0.472873], [0.214000, 0.722114, 0.469588], [0.220124, 0.725509, 0.466226], [0.226397, 0.728888, 0.462789], [0.232815, 0.732247, 0.459277], [0.239374, 0.735588, 0.455688], [0.246070, 0.738910, 0.452024], [0.252899, 0.742211, 0.448284], [0.259857, 0.745492, 0.444467], [0.266941, 0.748751, 0.440573], [0.274149, 0.751988, 0.436601], [0.281477, 0.755203, 0.432552], [0.288921, 0.758394, 0.428426], [0.296479, 0.761561, 0.424223], [0.304148, 0.764704, 0.419943], [0.311925, 0.767822, 0.415586], [0.319809, 0.770914, 0.411152], [0.327796, 0.773980, 0.406640], [0.335885, 0.777018, 0.402049], [0.344074, 0.780029, 0.397381], [0.352360, 0.783011, 0.392636], [0.360741, 0.785964, 0.387814], [0.369214, 0.788888, 0.382914], [0.377779, 0.791781, 0.377939], [0.386433, 0.794644, 0.372886], [0.395174, 0.797475, 0.367757], [0.404001, 0.800275, 0.362552], [0.412913, 0.803041, 0.357269], [0.421908, 0.805774, 0.351910], [0.430983, 0.808473, 0.346476], [0.440137, 0.811138, 0.340967], [0.449368, 0.813768, 0.335384], [0.458674, 0.816363, 0.329727], [0.468053, 0.818921, 0.323998], [0.477504, 0.821444, 0.318195], [0.487026, 0.823929, 0.312321], [0.496615, 0.826376, 0.306377], [0.506271, 0.828786, 0.300362], [0.515992, 0.831158, 0.294279], [0.525776, 0.833491, 0.288127], [0.535621, 0.835785, 0.281908], [0.545524, 0.838039, 0.275626], [0.555484, 0.840254, 0.269281], [0.565498, 0.842430, 0.262877], [0.575563, 0.844566, 0.256415], [0.585678, 0.846661, 0.249897], [0.595839, 0.848717, 0.243329], [0.606045, 0.850733, 0.236712], [0.616293, 0.852709, 0.230052], [0.626579, 0.854645, 0.223353], [0.636902, 0.856542, 0.216620], [0.647257, 0.858400, 0.209861], [0.657642, 0.860219, 0.203082], [0.668054, 0.861999, 0.196293], [0.678489, 0.863742, 0.189503], [0.688944, 0.865448, 0.182725], [0.699415, 0.867117, 0.175971], [0.709898, 0.868751, 0.169257], [0.720391, 0.870350, 0.162603], [0.730889, 0.871916, 0.156029], [0.741388, 0.873449, 0.149561], [0.751884, 0.874951, 0.143228], [0.762373, 0.876424, 0.137064], [0.772852, 0.877868, 0.131109], [0.783315, 0.879285, 0.125405], [0.793760, 0.880678, 0.120005], [0.804182, 0.882046, 0.114965], [0.814576, 0.883393, 0.110347], [0.824940, 0.884720, 0.106217], [0.835270, 0.886029, 0.102646], [0.845561, 0.887322, 0.099702], [0.855810, 0.888601, 0.097452], [0.866013, 0.889868, 0.095953], [0.876168, 0.891125, 0.095250], [0.886271, 0.892374, 0.095374], [0.896320, 0.893616, 0.096335], [0.906311, 0.894855, 0.098125], [0.916242, 0.896091, 0.100717], [0.926106, 0.897330, 0.104071], [0.935904, 0.898570, 0.108131], [0.945636, 0.899815, 0.112838], [0.955300, 0.901065, 0.118128], [0.964894, 0.902323, 0.123941], [0.974417, 0.903590, 0.130215], [0.983868, 0.904867, 0.136897], [0.993248, 0.906157, 0.143936]] from matplotlib.colors import ListedColormap cmaps = {} for (name, data) in (('viridis', _viridis_data), ('parula', _parula_data)): cmaps[name] = ListedColormap(data, name=name,N=10) viridis = cmaps['viridis'] parula_D = cmaps['parula'] # + id="sVw50N1qmR2w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1599918966907, "user_tz": -330, "elapsed": 1027, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="9aabc52c-62fe-479d-8ede-42bd05dea036" a = np.arange(10) print(a) # + id="H3kqcEIhWlJj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1599914813009, "user_tz": -330, "elapsed": 3303, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="9544a2b5-39ef-49c5-9fef-808eff34d44e" import numpy as np import matplotlib.pyplot as plt mat = np.random.random((10,10)) plt.imshow(mat, origin="lower", cmap='gray', interpolation='nearest') plt.colorbar() plt.show() # + id="2cHnYsAPdbZm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 131} executionInfo={"status": "error", "timestamp": 1598680203191, "user_tz": -330, "elapsed": 1892, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="cd1a3ae5-f684-4bd4-e17d-d4ea01505060" np.save('/content/drive/My Drive/Major_Project/Data/Img.npy', img) # + [markdown] id="PCU8f7EkAe1x" colab_type="text" # ## > Preprocess # + id="R0yTck9qdvNF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 823} executionInfo={"status": "ok", "timestamp": 1599402583250, "user_tz": -330, "elapsed": 2206, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="6d73c459-967f-4e4b-84bd-4b5c97fa5f25" ## Preprocess img_dr=img #separate foreground and background img_fg = img[img_gt != 0] img_bg = img[img_gt == 0] img_dr_fg = img_dr[img_gt != 0] img_dr_bg = img_dr[img_gt == 0] img_gt_fg = img_gt[img_gt != 0] img_gt_bg = img_gt[img_gt == 0] #shuffle data with seed 10 shufflePermutation = np.random.RandomState(seed=10).permutation(len(img_fg)) img_fg = img_fg[shufflePermutation] img_dr_fg = img_dr_fg[shufflePermutation] img_gt_fg = img_gt_fg[shufflePermutation] #define train/test split ratio split_ratio = 0.7 split = math.ceil(img_fg.shape[0]*split_ratio) ## split into training and testing #orignal dimension image img_train = img_fg[:(split+1),:] img_gt_train = img_gt_fg[:(split+1)] img_test = img_fg[(split+1):,:] img_gt_test = img_gt_fg[(split+1):] #reduced dimension image img_dr_train = img_dr_fg[:(split+1),:] img_dr_test = img_dr_fg[(split+1):,:] # plt.hist(img_train[700,]) # plt.show() print(tabulate(np.unique(img_gt_train, return_counts=True), tablefmt="grid")) print(tabulate(np.unique(img_gt_test, return_counts=True), tablefmt="grid")) plt.figure() plt.plot(img_train[:,1]) plt.title('fid 1') plt.show() ## normalize data min_max_scaler = preprocessing.MinMaxScaler() #orignal dimension image img_try= min_max_scaler.fit_transform(img.astype('float32')) img_train = preprocessing.normalize(img_train.astype('float32')) #OR plt.figure() plt.plot(img_train[:,1]) plt.title('fid 1') plt.show() img_train = min_max_scaler.fit_transform(img_train.astype('float32')) #preprocessing.scale(img_train.astype('float32')) # plt.figure() # plt.hist(img_train[700,]) # plt.show() img_test = preprocessing.normalize(img_test.astype('float32')) # img_test = min_max_scaler.fit_transform(img_test.astype('float32')) # OR preprocessing.normalize(img_test.astype('float32')) OR preprocessing.scale(img_test.astype('float32')) #reduced dimension image img_dr_train = min_max_scaler.fit_transform(img_dr_train.astype('float32')) img_dr_test = min_max_scaler.fit_transform(img_dr_test.astype('float32')) #for result of plot before and after reduction # img_try_fg = img_try[img_gt != 0] # plt.plot(img_dr_fg[2000]) # plt.figure() # plt.plot(img_try_fg[2000]) # print(img_gt_fg[2000]) #sanity checks print("Train vector size: ", img_train.shape) print("Test vector size: ", img_test.shape) print("Reduced Train vector size: ", img_dr_train.shape) print("Reduced Test vector size: ", img_dr_test.shape) print("Label train vector size: ", img_gt_train.shape) print("Label test vector size: ", img_gt_test.shape) # + id="QveTxMtN5gp2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1598682523677, "user_tz": -330, "elapsed": 1590, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="7e00dcd1-23ac-4fdc-d055-44bc3a081ca1" plt.hist(img_train[700,]) plt.show() # + [markdown] id="ALwLjMdEdZKd" colab_type="text" # ## > Functions # + [markdown] id="wE-F-1U_fEnn" colab_type="text" # ### > SVM # + [markdown] id="a0j-jB8i91KH" colab_type="text" # > Model # + id="y01nlkeidb7E" colab_type="code" colab={} ## svm from warnings import simplefilter from sklearn.exceptions import ConvergenceWarning simplefilter("ignore", category=ConvergenceWarning) def svmFit(xtrain, ytrain, C, gamma, kernel, grid = True): if grid: #define params and grid params_grid = [{'decision_function_shape': ['ovr'], 'max_iter': [1000], 'kernel': ['poly'], 'gamma': [0.005, 1e-3], 'C': [1, 10, 100, 1000]}] svmModel = GridSearchCV(SVC(), params_grid, cv=5, return_train_score=True) else: #define params svmModel = svm.SVC(decision_function_shape='ovr', max_iter=-1, gamma=gamma, kernel =kernel, C=C) #start time start_time = time.time() svmModel.fit(xtrain, ytrain) #end time - display estimated time duration for fitting duration = time.time() - start_time print("\n --- %s seconds --- \n" % duration) return svmModel # + [markdown] id="x90xtwvw92tE" colab_type="text" # > Results # + id="UOHWe-GReQZi" colab_type="code" colab={} ## svm results def svmResults(xtrain, ytrain, xtest, ytest, svmModel): #get predicted labels for test data img_gt_train_predicted_svm = svmModel.predict(xtrain) img_gt_predicted_svm = svmModel.predict(xtest) test_shape = img_gt_predicted_svm.shape[0] #comparsion table - make table to display predcited and true labels # label_table_svm = np.concatenate((img_gt_predicted_svm.reshape((test_shape, 1)), ytest.reshape((test_shape, 1))), axis = 1) #compute train & test accuracy train_score = svmModel.score(xtrain , ytrain) test_score = svmModel.score(xtest , ytest ) train_kappa = metrics.cohen_kappa_score(ytrain, img_gt_train_predicted_svm) test_kappa = metrics.cohen_kappa_score(ytest, img_gt_predicted_svm) #print test and train accuracy print("Report for Classifier: \n") print("Training set score for SVM: %f" % train_score) print("Testing set score for SVM: %f" % test_score) print("Train Kappa score for SVM: %f" % train_kappa) print("Test Kappa score for SVM: %f" % test_kappa) print("\n") #print confusion matrix and classification rpeort print(confusion_matrix(ytest, img_gt_predicted_svm)) print("\n") print(classification_report(ytest, img_gt_predicted_svm)) #print comparison table (first 10) # headers = ["Predicted Label", "Ground Truth Label"] # print(tabulate(label_table_svm[1:10], headers, tablefmt="grid")) table.append([ C, gamma, kernel, train_score, test_score, train_kappa, test_kappa]) # + [markdown] id="HAIH0--Q8Lf6" colab_type="text" # ### > CNN # + [markdown] id="cw113ZFq_6hj" colab_type="text" # > Model # + id="wRZGhZuW8UsM" colab_type="code" colab={} ## CNN model def cnnFit(r, xtrain, ytrain_ohe, xtest, ytest_ohe): #define params, preprocess verbose, epochs, batch_size = 0, 10, 32 n_timesteps, n_features, n_outputs = 200, 1, ytrain_ohe.shape[1] xtrain = np.reshape(xtrain,[xtrain.shape[0] , xtrain.shape[1], 1]) xtest = np.reshape(xtest,[xtest.shape[0] , xtest.shape[1], 1]) #define model model = Sequential() model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) model.add(Conv1D(filters=64, kernel_size=3, activation='relu')) # model.add(Dropout(0.5)) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(n_outputs, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() #start time start_time = time.time() #fit model model.fit(xtrain, ytrain_ohe, epochs=epochs, batch_size=batch_size, verbose=verbose) #end time - display estimated time diration for fitting print("--- %s seconds ---" % (time.time() - start_time)) #end time #evaluate model _, accuracy = model.evaluate(xtest, ytest_ohe, batch_size=batch_size, verbose=0) return [model, accuracy] # + [markdown] id="wCrTOBIM_9ts" colab_type="text" # > Results # + id="l9sHnF_l__AF" colab_type="code" colab={} def cnnResults(scores, cnnModel, cnnScore, xtrain, ytrain_ohe, xtest, ytest_ohe): #derive iter number and scores sc = [[i] for i in scores] it = [[i] for i in range(1,6)] #insert in table as columns side by side table = np.concatenate((it, sc), axis = 1) #print summary table print(tabulate(table, ["Iteration", "Score"], tablefmt="github")) #print mean and std of scores m, s = statistics.mean(scores), statistics.stdev(scores) print('\n Mean Accuracy: %.3f%% Standard Deviation: (+/-%.3f)' % (m, s)) #reshaping xtrain = np.reshape(xtrain,[xtrain.shape[0], xtrain.shape[1], 1]) xtest = np.reshape(xtest,[xtest.shape[0] ,xtest.shape[1], 1]) #print train & test accuracy print("Report for CNN Classifier: \n") ypredicted_cnn, train_score = cnnModel.evaluate(xtrain, ytest_ohe, verbose=0) print("Training set score CNN: %f" % train_score) print("Testing set score for CNN: %f" % cnnScore) #print confusion matrix and classification rpeort print(confusion_matrix(ytest, ypredicted_cnn)) print("\n") print(classification_report(ytest, ypredicted_cnn)) # + [markdown] id="B2l8AvyAFe8u" colab_type="text" # ## > Original Dimension - Training # + [markdown] id="IORNE6EN6GBj" colab_type="text" # ### > SVM # + [markdown] id="73jOCYelFm9e" colab_type="text" # > SVM - Model # + id="3wlZOKTM9x57" colab_type="code" colab={} table = [] table_header = ["C", "Gamma", "Kernel", "Train_Score", "Test_Score", "Train_Kappa", "Test_Kappa"] orig_stdout = sys.stdout f = open('/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/out.txt', 'w') sys.stdout = f for C in [1,5,10,50,100,500,1000]: for gamma in [0.5,0.1,0.05,0.005,'scale']: for kernel in ['rbf','poly']: print("-------------------------------------------------------------") svmModel = svmFit(img_train, img_gt_train, C, gamma, kernel, grid = False) svmResults(img_train, img_gt_train, img_test, img_gt_test, svmModel) print("-------------------------------------------------------------") del svmModel df = pd.DataFrame(table, columns= table_header) full_path = '/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/Classification.csv' df.to_csv(full_path, index = False, header=True) print(tabulate(table, table_header, tablefmt="github")) sys.stdout = orig_stdout f.close() # + id="AoaeRMfedn_2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} executionInfo={"status": "ok", "timestamp": 1598691997923, "user_tz": -330, "elapsed": 15264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="c9f77162-b326-427d-d41b-7d4a2afd0d6f" #svm- w/o grid Search - model svmModel = svmFit(img_train, img_gt_train, 500, 0.09, 'poly', grid = False) #svm- grid Search - Results # svmModel_grid = svmFit(img_train, img_gt_train, grid = True) # print(svmModel_grid.cv_results_) # + [markdown] id="lpoP_mB6JaGr" colab_type="text" # > SVM - Results # + id="_ekqzRzcfu3Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 923} executionInfo={"status": "ok", "timestamp": 1598692038061, "user_tz": -330, "elapsed": 40090, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="6d19ff2f-7d59-47f1-c244-5a4b81d59bd1" #svm w/o gridsearch - Results svmResults(img_train, img_gt_train, img_test, img_gt_test, svmModel) #choose best estimator svm model from grid # svmModel_grid = svmModel_grid.best_estimator_ #svm - gridsearch - Results # svmResults(img_train, img_gt_train, img_test, img_gt_test, svmModel_grid) # + [markdown] id="ZLbE_2hO6OVf" colab_type="text" # ### > CNN # + [markdown] id="8YrgH0SQ98rA" colab_type="text" # > CNN - Model # + id="QsrpKD3_-W5C" colab_type="code" colab={} #one hot encoding img_gt_train_ohe = keras.utils.to_categorical(img_gt_train-1) #Eliminating background class in OHE- i.e. 16 classes now img_gt_test_ohe = keras.utils.to_categorical(img_gt_test-1) #sanity check print(img_gt_train_ohe.shape) #list to store scores for 5 iterations of model scores = list() best_score = 0 #run model for 5 iterations - will consider average for r in range(5): #evaluate and fit model [model, score] = cnnFit(r, img_train, img_gt_train_ohe, img_test, img_gt_test_ohe) score = score * 100.0 #get best model from 5 iterations if score > best_score: best_cnnModel = model best_score = score # if score>temp_score: # best_cnnModel = model # best_score = score # else: print('>#%d: %.3f' % (r+1, score)) #add obtained score to the list scores.append(score) # + [markdown] id="_GUwtHQMBixx" colab_type="text" # > CNN - Results # + id="OLWsCduyBlmg" colab_type="code" colab={} cnnResults(scores, best_cnnModel, best_score, img_train, img_gt_train_ohe, img_test, img_gt_test_ohe) # + [markdown] id="ilRIslSMJE7i" colab_type="text" # ## > Reduced Dimension - Training # + [markdown] id="rjncLgHO6RPo" colab_type="text" # ### > SVM # + [markdown] id="EAgh7XA83obM" colab_type="text" # > SVM - Model # + id="DXqlRSvAIIFu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 732} executionInfo={"status": "ok", "timestamp": 1598678533522, "user_tz": -330, "elapsed": 194087, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="9e6cf635-47ea-45db-8e42-d951bf154be9" table = [] table_header = ["C", "Gamma", "Kernel", "Train_Score", "Test_Score", "Train_Kappa", "Test_Kappa"] orig_stdout = sys.stdout f = open('/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/out_DR.txt', 'w') sys.stdout = f for C in [1,5,10,50,100,500,1000]: for gamma in [0.5,0.1,0.05,0.005,'scale']: for kernel in ['rbf','poly']: print("-------------------------------------------------------------") svmModel_dr = svmFit(img_dr_train, img_gt_train, C, gamma, kernel, grid = False) svmResults(img_dr_train, img_gt_train, img_dr_test, img_gt_test, svmModel_dr) print("-------------------------------------------------------------") del svmModel_dr df = pd.DataFrame(table, columns= table_header) full_path = '/content/drive/My Drive/Major_Project/unSupervised_Results/Indian_Pines/Classification_DR.csv' df.to_csv(full_path, index = False, header=True) print(tabulate(table, table_header, tablefmt="github")) sys.stdout = orig_stdout f.close() # + id="CGpT2HxkrpMq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} executionInfo={"status": "ok", "timestamp": 1598632044837, "user_tz": -330, "elapsed": 3189, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="8f65067f-cc2e-4fd1-ae2b-4d24e6e2c88a" #svm- w/o gridSearch - model svmModel_dr = svmFit(img_dr_train, img_gt_train, 500, 0.09, 'poly', grid = False) #svm- gridSearch - Results # svmModel_dr_grid = svmFit(img_dr_train, img_gt_train, grid = True) # + [markdown] id="zuUyqbyfy9vG" colab_type="text" # > SVM - Results # + id="sAnh8m7hz5nO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 923} executionInfo={"status": "ok", "timestamp": 1598632052453, "user_tz": -330, "elapsed": 7586, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="82ce71c4-42fc-44b8-b688-2a53fab200d1" #svm w/o gridsearch - Results svmResults(img_dr_train, img_gt_train, img_dr_test, img_gt_test, svmModel_dr) #choose best estimator svm model from grid # svmModel_dr_grid = svmModel_dr_grid.best_estimator_ #svm - gridsearch - Results # svmResults(img_dr_train, img_gt_train, img_dr_test, img_gt_test, svmModel_dr_grid) # + [markdown] id="81WunQEt6dW4" colab_type="text" # ### > CNN # + [markdown] id="zyY3xawlHP39" colab_type="text" # > CNN - Model # + id="IfCjOcdP-Lm7" colab_type="code" colab={} #one hot encoding img_gt_train_ohe = keras.utils.to_categorical(img_gt_train-1) #Eliminating background class in OHE- i.e. 16 classes now img_gt_test_ohe = keras.utils.to_categorical(img_gt_test-1) #sanity check print(img_gt_train_ohe.shape) #list to store scores for 5 iterations of model scores_dr = list() best_score_dr = 0 #run model for 5 iterations - will consider average for r in range(5): #evaluate and fit model [model, score] = cnnFit(r, img_dr_train, img_gt_train_ohe, img_dr_test, img_gt_test_ohe) score = score * 100.0 #get best model from 5 iterations if score > best_score_dr: best_cnnModel_dr = model best_score_dr = score print('>#%d: %.3f' % (r+1, score)) #add obtained score to the list scores_dr.append(score) # + [markdown] id="vM6MWoYBHVMC" colab_type="text" # > CNN - Results # + id="QsexTZRXHXX6" colab_type="code" colab={} cnnResults(scores_dr, best_cnnModel_dr, best_score_dr, img_dr_train, img_gt_train_ohe, img_dr_test, img_gt_test_ohe) # + id="2jrpBnMWnt0D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597756891876, "user_tz": -330, "elapsed": 998, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggy_o7pC97iMLwReJFws779DMXX4Bt_gerr7_ka=s64", "userId": "05011419419690803092"}} outputId="ef7c68d7-61fb-4d3d-b39b-e4ffd536865e" # img_test_new = np.concatenate((img_test, img_bg)) # img_gt_test_new = np.concatenate((ypredicted_svm, img_gt_bg)) # svmScore_new = svmModel.score(img_test_new, img_gt_test++_new) # print(svmScore_new) import numpy as np a = np.load('/content/drive/My Drive/Major_Project/Supervised_Results/reduced_img_f_dr_20.npy') print(a.shape)
Class_img.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" # # Neuromatch Academy: Week 1, Day 2, Tutorial 2 # # Modeling Practice: Model implementation and evaluation # __Content creators:__ <NAME>, <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # + [markdown] colab_type="text" # --- # # Tutorial objectives # # We are investigating a simple phenomena, working through the 10 steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)) in two notebooks: # # **Framing the question** # # 1. finding a phenomenon and a question to ask about it # 2. understanding the state of the art # 3. determining the basic ingredients # 4. formulating specific, mathematically defined hypotheses # # **Implementing the model** # # 5. selecting the toolkit # 6. planning the model # 7. implementing the model # # **Model testing** # # 8. completing the model # 9. testing and evaluating the model # # **Publishing** # # 10. publishing models # # We did steps 1-5 in Tutorial 1 and will cover steps 6-10 in Tutorial 2 (this notebook). # + [markdown] colab_type="text" # # Setup # # # + cellView="both" colab={} colab_type="code" import numpy as np import matplotlib.pyplot as plt from scipy import stats from scipy.stats import gamma from IPython.display import YouTubeVideo # + cellView="form" colab={} colab_type="code" # @title Figure settings import ipywidgets as widgets # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" colab={} colab_type="code" # @title Helper functions def my_moving_window(x, window=3, FUN=np.mean): """ Calculates a moving estimate for a signal Args: x (numpy.ndarray): a vector array of size N window (int): size of the window, must be a positive integer FUN (function): the function to apply to the samples in the window Returns: (numpy.ndarray): a vector array of size N, containing the moving average of x, calculated with a window of size window There are smarter and faster solutions (e.g. using convolution) but this function shows what the output really means. This function skips NaNs, and should not be susceptible to edge effects: it will simply use all the available samples, which means that close to the edges of the signal or close to NaNs, the output will just be based on fewer samples. By default, this function will apply a mean to the samples in the window, but this can be changed to be a max/min/median or other function that returns a single numeric value based on a sequence of values. """ # if data is a matrix, apply filter to each row: if len(x.shape) == 2: output = np.zeros(x.shape) for rown in range(x.shape[0]): output[rown, :] = my_moving_window(x[rown, :], window=window, FUN=FUN) return output # make output array of the same size as x: output = np.zeros(x.size) # loop through the signal in x for samp_i in range(x.size): values = [] # loop through the window: for wind_i in range(int(1 - window), 1): if ((samp_i + wind_i) < 0) or (samp_i + wind_i) > (x.size - 1): # out of range continue # sample is in range and not nan, use it: if not(np.isnan(x[samp_i + wind_i])): values += [x[samp_i + wind_i]] # calculate the mean in the window for this point in the output: output[samp_i] = FUN(values) return output def my_plot_percepts(datasets=None, plotconditions=False): if isinstance(datasets, dict): # try to plot the datasets # they should be named... # 'expectations', 'judgments', 'predictions' plt.figure(figsize=(8, 8)) # set aspect ratio = 1? not really plt.ylabel('perceived self motion [m/s]') plt.xlabel('perceived world motion [m/s]') plt.title('perceived velocities') # loop through the entries in datasets # plot them in the appropriate way for k in datasets.keys(): if k == 'expectations': expect = datasets[k] plt.scatter(expect['world'], expect['self'], marker='*', color='xkcd:green', label='my expectations') elif k == 'judgments': judgments = datasets[k] for condition in np.unique(judgments[:, 0]): c_idx = np.where(judgments[:, 0] == condition)[0] cond_self_motion = judgments[c_idx[0], 1] cond_world_motion = judgments[c_idx[0], 2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'world-motion condition judgments' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'self-motion condition judgments' else: c_label = f"condition [{condition:d}] judgments" plt.scatter(judgments[c_idx, 3], judgments[c_idx, 4], label=c_label, alpha=0.2) elif k == 'predictions': predictions = datasets[k] for condition in np.unique(predictions[:, 0]): c_idx = np.where(predictions[:, 0] == condition)[0] cond_self_motion = predictions[c_idx[0], 1] cond_world_motion = predictions[c_idx[0], 2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'predicted world-motion condition' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'predicted self-motion condition' else: c_label = f"condition [{condition:d}] prediction" plt.scatter(predictions[c_idx, 4], predictions[c_idx, 3], marker='x', label=c_label) else: print("datasets keys should be 'hypothesis', \ 'judgments' and 'predictions'") if plotconditions: # this code is simplified but only works for the dataset we have: plt.scatter([1], [0], marker='<', facecolor='none', edgecolor='xkcd:black', linewidths=2, label='world-motion stimulus', s=80) plt.scatter([0], [1], marker='>', facecolor='none', edgecolor='xkcd:black', linewidths=2, label='self-motion stimulus', s=80) plt.legend(facecolor='xkcd:white') plt.show() else: if datasets is not None: print('datasets argument should be a dict') raise TypeError def my_plot_stimuli(t, a, v): plt.figure(figsize=(10, 6)) plt.plot(t, a, label='acceleration [$m/s^2$]') plt.plot(t, v, label='velocity [$m/s$]') plt.xlabel('time [s]') plt.ylabel('[motion]') plt.legend(facecolor='xkcd:white') plt.show() def my_plot_motion_signals(): dt = 1 / 10 a = gamma.pdf(np.arange(0, 10, dt), 2.5, 0) t = np.arange(0, 10, dt) v = np.cumsum(a * dt) fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(14, 6)) fig.suptitle('Sensory ground truth') ax1.set_title('world-motion condition') ax1.plot(t, -v, label='visual [$m/s$]') ax1.plot(t, np.zeros(a.size), label='vestibular [$m/s^2$]') ax1.set_xlabel('time [s]') ax1.set_ylabel('motion') ax1.legend(facecolor='xkcd:white') ax2.set_title('self-motion condition') ax2.plot(t, -v, label='visual [$m/s$]') ax2.plot(t, a, label='vestibular [$m/s^2$]') ax2.set_xlabel('time [s]') ax2.set_ylabel('motion') ax2.legend(facecolor='xkcd:white') plt.show() def my_plot_sensorysignals(judgments, opticflow, vestibular, returnaxes=False, addaverages=False, integrateVestibular=False, addGroundTruth=False): if addGroundTruth: dt = 1 / 10 a = gamma.pdf(np.arange(0, 10, dt), 2.5, 0) t = np.arange(0, 10, dt) v = a wm_idx = np.where(judgments[:, 0] == 0) sm_idx = np.where(judgments[:, 0] == 1) opticflow = opticflow.transpose() wm_opticflow = np.squeeze(opticflow[:, wm_idx]) sm_opticflow = np.squeeze(opticflow[:, sm_idx]) if integrateVestibular: vestibular = np.cumsum(vestibular * .1, axis=1) if addGroundTruth: v = np.cumsum(a * dt) vestibular = vestibular.transpose() wm_vestibular = np.squeeze(vestibular[:, wm_idx]) sm_vestibular = np.squeeze(vestibular[:, sm_idx]) X = np.arange(0, 10, .1) fig, my_axes = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(15, 10)) fig.suptitle('Sensory signals') my_axes[0][0].plot(X, wm_opticflow, color='xkcd:light red', alpha=0.1) my_axes[0][0].plot([0, 10], [0, 0], ':', color='xkcd:black') if addGroundTruth: my_axes[0][0].plot(t, -v, color='xkcd:red') if addaverages: my_axes[0][0].plot(X, np.average(wm_opticflow, axis=1), color='xkcd:red', alpha=1) my_axes[0][0].set_title('optic-flow in world-motion condition') my_axes[0][0].set_ylabel('velocity signal [$m/s$]') my_axes[0][1].plot(X, sm_opticflow, color='xkcd:azure', alpha=0.1) my_axes[0][1].plot([0, 10], [0, 0], ':', color='xkcd:black') if addGroundTruth: my_axes[0][1].plot(t, -v, color='xkcd:blue') if addaverages: my_axes[0][1].plot(X, np.average(sm_opticflow, axis=1), color='xkcd:blue', alpha=1) my_axes[0][1].set_title('optic-flow in self-motion condition') my_axes[1][0].plot(X, wm_vestibular, color='xkcd:light red', alpha=0.1) my_axes[1][0].plot([0, 10], [0, 0], ':', color='xkcd:black') if addaverages: my_axes[1][0].plot(X, np.average(wm_vestibular, axis=1), color='xkcd:red', alpha=1) my_axes[1][0].set_title('vestibular signal in world-motion condition') if addGroundTruth: my_axes[1][0].plot(t, np.zeros(100), color='xkcd:red') my_axes[1][0].set_xlabel('time [s]') if integrateVestibular: my_axes[1][0].set_ylabel('velocity signal [$m/s$]') else: my_axes[1][0].set_ylabel('acceleration signal [$m/s^2$]') my_axes[1][1].plot(X, sm_vestibular, color='xkcd:azure', alpha=0.1) my_axes[1][1].plot([0, 10], [0, 0], ':', color='xkcd:black') if addGroundTruth: my_axes[1][1].plot(t, v, color='xkcd:blue') if addaverages: my_axes[1][1].plot(X, np.average(sm_vestibular, axis=1), color='xkcd:blue', alpha=1) my_axes[1][1].set_title('vestibular signal in self-motion condition') my_axes[1][1].set_xlabel('time [s]') if returnaxes: return my_axes else: plt.show() def my_threshold_solution(selfmotion_vel_est, threshold): is_move = (selfmotion_vel_est > threshold) return is_move def my_moving_threshold(selfmotion_vel_est, thresholds): pselfmove_nomove = np.empty(thresholds.shape) pselfmove_move = np.empty(thresholds.shape) prop_correct = np.empty(thresholds.shape) pselfmove_nomove[:] = np.NaN pselfmove_move[:] = np.NaN prop_correct[:] = np.NaN for thr_i, threshold in enumerate(thresholds): # run my_threshold that the students will write: try: is_move = my_threshold(selfmotion_vel_est, threshold) except Exception: is_move = my_threshold_solution(selfmotion_vel_est, threshold) # store results: pselfmove_nomove[thr_i] = np.mean(is_move[0:100]) pselfmove_move[thr_i] = np.mean(is_move[100:200]) # calculate the proportion classified correctly: # (1-pselfmove_nomove) + () # Correct rejections: p_CR = (1 - pselfmove_nomove[thr_i]) # correct detections: p_D = pselfmove_move[thr_i] # this is corrected for proportion of trials in each condition: prop_correct[thr_i] = (p_CR + p_D) / 2 return [pselfmove_nomove, pselfmove_move, prop_correct] def my_plot_thresholds(thresholds, world_prop, self_prop, prop_correct): plt.figure(figsize=(12, 8)) plt.title('threshold effects') plt.plot([min(thresholds), max(thresholds)], [0, 0], ':', color='xkcd:black') plt.plot([min(thresholds), max(thresholds)], [0.5, 0.5], ':', color='xkcd:black') plt.plot([min(thresholds), max(thresholds)], [1, 1], ':', color='xkcd:black') plt.plot(thresholds, world_prop, label='world motion condition') plt.plot(thresholds, self_prop, label='self motion condition') plt.plot(thresholds, prop_correct, color='xkcd:purple', label='correct classification') plt.xlabel('threshold') plt.ylabel('proportion correct or classified as self motion') plt.legend(facecolor='xkcd:white') plt.show() def my_plot_predictions_data(judgments, predictions): # conditions = np.concatenate((np.abs(judgments[:, 1]), # np.abs(judgments[:, 2]))) # veljudgmnt = np.concatenate((judgments[:, 3], judgments[:, 4])) # velpredict = np.concatenate((predictions[:, 3], predictions[:, 4])) # self: # conditions_self = np.abs(judgments[:, 1]) veljudgmnt_self = judgments[:, 3] velpredict_self = predictions[:, 3] # world: # conditions_world = np.abs(judgments[:, 2]) veljudgmnt_world = judgments[:, 4] velpredict_world = predictions[:, 4] fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharey='row', figsize=(12, 5)) ax1.scatter(veljudgmnt_self, velpredict_self, alpha=0.2) ax1.plot([0, 1], [0, 1], ':', color='xkcd:black') ax1.set_title('self-motion judgments') ax1.set_xlabel('observed') ax1.set_ylabel('predicted') ax2.scatter(veljudgmnt_world, velpredict_world, alpha=0.2) ax2.plot([0, 1], [0, 1], ':', color='xkcd:black') ax2.set_title('world-motion judgments') ax2.set_xlabel('observed') ax2.set_ylabel('predicted') plt.show() # + cellView="form" colab={} colab_type="code" # @title Data retrieval import os fname="W1D2_data.npz" if not os.path.exists(fname): # !wget https://osf.io/c5xyf/download -O $fname filez = np.load(file=fname, allow_pickle=True) judgments = filez['judgments'] opticflow = filez['opticflow'] vestibular = filez['vestibular'] # + [markdown] colab_type="text" # --- # # Section 6: Model planning # + cellView="form" colab={} colab_type="code" # @title Video 6: Planning video = YouTubeVideo(id='dRTOFFigxa0', width=854, height=480, fs=1) print(f"Video available at https://youtube.com/watch?v={video.id}") video # + [markdown] colab_type="text" # # **Goal:** Identify the key components of the model and how they work together. # # Our goal all along has been to model our perceptual estimates of sensory data. # Now that we have some idea of what we want to do, we need to line up the components of the model: what are the input and output? Which computations are done and in what order? # # Our model will have: # * **inputs**: the values the system has available - this can be broken down in _data:_ the sensory signals, _parameters:_ the threshold and the window sizes for filtering # * **outputs**: these are the predictions our model will make - for this tutorial these are the perceptual judgments on each trial in m/s, just like the judgments participants made. # * **model functions**: A set of functions that perform the hypothesized computations. # # We will define a set of functions that take our data and some parameters as input, can run our model, and output a prediction for the judgment data. # # **Recap of what we've accomplished so far:** # # To model perceptual estimates from our sensory data, we need to # 1. _integrate:_ to ensure sensory information are in appropriate units # 2. _filter:_ to reduce noise and set timescale # 3. _threshold:_ to model detection # # This will be done with these operations: # 1. _integrate:_ `np.cumsum()` # 2. _filter:_ `my_moving_window()` # 3. _threshold:_ `if` with a comparison (`>` or `<`) and `else` # # **_Planning our model:_** # # We will now start putting all the pieces together. Normally you would sketch this yourself, but here is an overview of how the functions comprising the model are going to work: # # ![model functions purpose](https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D2_ModelingPractice/static/NMA-W1D2-fig05.png) # # Below is the main function with a detailed explanation of what the function is supposed to do, exactly what input is expected, and what output will be generated. # # The model is not complete, so it only returns nans (**n**ot-**a**-**n**umber) for now. However, this outlines how most model code works: it gets some measured data (the sensory signals) and a set of parameters as input, and as output returns a prediction on other measured data (the velocity judgments). # # The goal of this function is to define the top level of a simulation model which: # * receives all input # * loops through the cases # * calls functions that computes predicted values for each case # * outputs the predictions # + [markdown] colab_type="text" # **Main model function** # + cellView="both" colab={} colab_type="code" def my_train_illusion_model(sensorydata, params): """ Generate output predictions of perceived self-motion and perceived world-motion velocity based on input visual and vestibular signals. Args: sensorydata: (dict) dictionary with two named entries: opticflow: (numpy.ndarray of float) NxM array with N trials on rows and M visual signal samples in columns vestibular: (numpy.ndarray of float) NxM array with N trials on rows and M vestibular signal samples in columns params: (dict) dictionary with named entries: threshold: (float) vestibular threshold for credit assignment filterwindows: (list of int) determines the strength of filtering for the vestibular and visual signals, respectively integrate (bool): whether to integrate the vestibular signals, will be set to True if absent FUN (function): function used in the filter, will be set to np.mean if absent samplingrate (float): the number of samples per second in the sensory data, will be set to 10 if absent Returns: dict with two entries: selfmotion: (numpy.ndarray) vector array of length N, with predictions of perceived self motion worldmotion: (numpy.ndarray) vector array of length N, with predictions of perceived world motion """ # sanitize input a little if not('FUN' in params.keys()): params['FUN'] = np.mean if not('integrate' in params.keys()): params['integrate'] = True if not('samplingrate' in params.keys()): params['samplingrate'] = 10 # number of trials: ntrials = sensorydata['opticflow'].shape[0] # set up variables to collect output selfmotion = np.empty(ntrials) worldmotion = np.empty(ntrials) # loop through trials? for trialN in range(ntrials): # these are our sensory variables (inputs) vis = sensorydata['opticflow'][trialN, :] ves = sensorydata['vestibular'][trialN, :] # generate output predicted perception: selfmotion[trialN],\ worldmotion[trialN] = my_perceived_motion(vis=vis, ves=ves, params=params) return {'selfmotion': selfmotion, 'worldmotion': worldmotion} # here is a mock version of my_perceived motion. # so you can test my_train_illusion_model() def my_perceived_motion(*args, **kwargs): return [np.nan, np.nan] # let's look at the preditions we generated for two sample trials (0,100) # we should get a 1x2 vector of self-motion prediction and another # for world-motion sensorydata={'opticflow': opticflow[[0, 100], :0], 'vestibular': vestibular[[0, 100], :0]} params={'threshold': 0.33, 'filterwindows': [100, 50]} my_train_illusion_model(sensorydata=sensorydata, params=params) # + [markdown] colab_type="text" # We've also completed the `my_perceived_motion()` function for you below. Follow this example to complete the template for `my_selfmotion()` and `my_worldmotion()`. Write out the inputs and outputs, and the steps required to calculate the outputs from the inputs. # # **Perceived motion function** # + cellView="both" colab={} colab_type="code" # Full perceived motion function def my_perceived_motion(vis, ves, params): """ Takes sensory data and parameters and returns predicted percepts Args: vis (numpy.ndarray) : 1xM array of optic flow velocity data ves (numpy.ndarray) : 1xM array of vestibular acceleration data params : (dict) dictionary with named entries: see my_train_illusion_model() for details Returns: [list of floats] : prediction for perceived self-motion based on vestibular data, and prediction for perceived world-motion based on perceived self-motion and visual data """ # estimate self motion based on only the vestibular data # pass on the parameters selfmotion = my_selfmotion(ves=ves, params=params) # estimate the world motion, based on the selfmotion and visual data # pass on the parameters as well worldmotion = my_worldmotion(vis=vis, selfmotion=selfmotion, params=params) return [selfmotion, worldmotion] # + [markdown] colab_type="text" # ## TD 6.1: Formulate purpose of the self motion function # # Now we plan out the purpose of one of the remaining functions. **Only name input arguments, write help text and comments, _no code_.** The goal of this exercise is to make writing the code (in Micro-tutorial 7) much easier. Based on our work before the break, you should now be able to answer these questions for each function: # # * what (sensory) data is necessary? # * what parameters does the function need, if any? # * which operations will be performed on the input? # * what is the output? # # The number of arguments is correct. # + [markdown] colab_type="text" # **Template calculate self motion** # # Name the _input arguments_, complete the _help text_, and add _comments_ in the function below to describe the inputs, the outputs, and operations using elements from the recap at the top of this notebook (or from micro-tutorials 3 and 4 in part 1), in order to plan out the function. Do not write any code. # + cellView="both" colab={} colab_type="code" def my_selfmotion(arg1, arg2): """ Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument Returns: what output does the function generate? Any further description? """ ################################################## # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # 4. # what output should this function produce? ################################################## return output # + [markdown] cellView="both" colab={} colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_06ea80b7.py) # # # + [markdown] colab_type="text" # **Template calculate world motion** # # We have drafted the help text and written comments in the function below that describe the inputs, the outputs, and operations we use to estimate world motion, based on the recap above. # + cellView="both" colab={} colab_type="code" # World motion function def my_worldmotion(vis, selfmotion, params): """ Estimates world motion based on the visual signal, the estimate of Args: vis (numpy.ndarray): 1xM array with the optic flow signal selfmotion (float): estimate of self motion params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of world motion in m/s """ ################################################## # 1. running window function # 2. take final value # 3. subtract selfmotion from value # return final value ################################################## return output # + [markdown] colab_type="text" # --- # # Section 7: Model implementation # + cellView="form" colab={} colab_type="code" # @title Video 7: Implementation video = YouTubeVideo(id='DMSIt7t-LO8', width=854, height=480, fs=1) print(f"Video available at https://youtube.com/watch?v={video.id}") video # + [markdown] colab_type="text" # # **Goal:** We write the components of the model in actual code. # # For the operations we picked, there function ready to use: # * integration: `np.cumsum(data, axis=1)` (axis=1: per trial and over samples) # * filtering: `my_moving_window(data, window)` (window: int, default 3) # * take last `selfmotion` value as our estimate # * threshold: if (value > thr): <operation 1> else: <operation 2> # # # + [markdown] colab_type="text" # ## TD 7.1: Write code to estimate self motion # # Use the operations to finish writing the function that will calculate an estimate of self motion. Fill in the descriptive list of items with actual operations. Use the function for estimating world-motion below, which we've filled for you! # # + [markdown] colab_type="text" # ### Exercise 1: finish self motion function # + cellView="both" colab={} colab_type="code" def my_selfmotion(ves, params): """ Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict) : dictionary with named entries: see my_train_illusion_model() for details Returns: (float) : an estimate of self motion in m/s """ ################################################## ## TODO for students: fill in ... in code below # Fill out function and remove raise NotImplementedError("Student exercise: estimate my_selfmotion") ################################################## # 1. integrate vestibular signal: ves = np.cumsum(ves * (1 / params['samplingrate'])) # 2. running window function to accumulate evidence: selfmotion = ... # 3. take final value of self-motion vector as our estimate selfmotion = ... # 4. compare to threshold. Hint the threshodl is stored in # params['threshold'] # if selfmotion is higher than threshold: return value # if it's lower than threshold: return 0 if ...: selfmotion = ... return selfmotion # + [markdown] cellView="both" colab={} colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_4c0b8958.py) # # # + [markdown] colab_type="text" # ### Interactive Demo: Unit testing # # Testing if the functions you wrote do what they are supposed to do is important, and known as 'unit testing'. Here we will simplify this for the `my_selfmotion()` function, by allowing varying the threshold and window size with a slider, and seeing what the distribution of self-motion estimates looks like. # + cellView="form" colab={} colab_type="code" #@title #@markdown Make sure you execute this cell to enable the widget! def refresh(threshold=0, windowsize=100): params = {'samplingrate': 10, 'FUN': np.mean} params['filterwindows'] = [windowsize, 50] params['threshold'] = threshold selfmotion_estimates = np.empty(200) # get the estimates for each trial: for trial_number in range(200): ves = vestibular[trial_number, :] selfmotion_estimates[trial_number] = my_selfmotion(ves, params) plt.figure() plt.hist(selfmotion_estimates, bins=20) plt.xlabel('self-motion estimate') plt.ylabel('frequency') plt.show() _ = widgets.interact(refresh, threshold=(-1, 2, .01), windowsize=(1, 100, 1)) # + [markdown] colab_type="text" # **Estimate world motion** # # We have completed the `my_worldmotion()` function for you below. # # # + cellView="both" colab={} colab_type="code" # World motion function def my_worldmotion(vis, selfmotion, params): """ Short description of the function Args: vis (numpy.ndarray): 1xM array with the optic flow signal selfmotion (float): estimate of self motion params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of world motion in m/s """ # running average to smooth/accumulate sensory evidence visualmotion = my_moving_window(vis, window=params['filterwindows'][1], FUN=np.mean) # take final value visualmotion = visualmotion[-1] # subtract selfmotion from value worldmotion = visualmotion + selfmotion # return final value return worldmotion # + [markdown] colab_type="text" # --- # # Section 8: Model completion # + cellView="form" colab={} colab_type="code" # @title Video 8: Completion video = YouTubeVideo(id='EM-G8YYdrDg', width=854, height=480, fs=1) print(f"Video available at https://youtube.com/watch?v={video.id}") video # + [markdown] colab_type="text" # # **Goal:** Make sure the model can speak to the hypothesis. Eliminate all the parameters that do not speak to the hypothesis. # # Now that we have a working model, we can keep improving it, but at some point we need to decide that it is finished. Once we have a model that displays the properties of a system we are interested in, it should be possible to say something about our hypothesis and question. Keeping the model simple makes it easier to understand the phenomenon and answer the research question. Here that means that our model should have illusory perception, and perhaps make similar judgments to those of the participants, but not much more. # # To test this, we will run the model, store the output and plot the models' perceived self motion over perceived world motion, like we did with the actual perceptual judgments (it even uses the same plotting function). # # ## TD 8.1: See if the model produces illusions # + cellView="form" colab={} colab_type="code" # @markdown Run to plot model predictions of motion estimates # prepare to run the model again: data = {'opticflow': opticflow, 'vestibular': vestibular} params = {'threshold': 0.6, 'filterwindows': [100, 50], 'FUN': np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:, 0:3] = judgments[:, 0:3] predictions[:, 3] = modelpredictions['selfmotion'] predictions[:, 4] = modelpredictions['worldmotion'] * -1 my_plot_percepts(datasets={'predictions': predictions}, plotconditions=True) # + [markdown] colab_type="text" # **Questions:** # # * How does the distribution of data points compare to the plot in TD 1.2 or in TD 7.1? # * Did you expect to see this? # * Where do the model's predicted judgments for each of the two conditions fall? # * How does this compare to the behavioral data? # # However, the main observation should be that **there are illusions**: the blue and red data points are mixed in each of the two clusters of data points. This mean the model can help us understand the phenomenon. # + [markdown] colab_type="text" # --- # # Section 9: Model evaluation # + cellView="form" colab={} colab_type="code" # @title Video 9: Evaluation video = YouTubeVideo(id='bWLFyobm4Rk', width=854, height=480, fs=1) print(f"Video available at https://youtube.com/watch?v={video.id}") video # + [markdown] colab_type="text" # # **Goal:** Once we have finished the model, we need a description of how good it is. The question and goals we set in micro-tutorial 1 and 4 help here. There are multiple ways to evaluate a model. Aside from the obvious fact that we want to get insight into the phenomenon that is not directly accessible without the model, we always want to quantify how well the model agrees with the data. # # **Quantify model quality with $R^2$** # # Let's look at how well our model matches the actual judgment data. # + cellView="form" colab={} colab_type="code" # @markdown Run to plot predictions over data my_plot_predictions_data(judgments, predictions) # + [markdown] colab_type="text" # When model predictions are correct, the red points in the figure above should lie along the identity line (a dotted black line here). Points off the identity line represent model prediction errors. While in each plot we see two clusters of dots that are fairly close to the identity line, there are also two clusters that are not. For the trials that those points represent, the model has an illusion while the participants don't or vice versa. # # We will use a straightforward, quantitative measure of how good the model is: $R^2$ (pronounced: "R-squared"), which can take values between 0 and 1, and expresses how much variance is explained by the relationship between two variables (here the model's predictions and the actual judgments). It is also called [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), and is calculated here as the square of the correlation coefficient (r or $\rho$). Just run the chunk below: # + cellView="form" colab={} colab_type="code" # @markdown Run to calculate R^2 conditions = np.concatenate((np.abs(judgments[:, 1]), np.abs(judgments[:, 2]))) veljudgmnt = np.concatenate((judgments[:, 3], judgments[:, 4])) velpredict = np.concatenate((predictions[:, 3], predictions[:, 4])) slope, intercept, r_value,\ p_value, std_err = stats.linregress(conditions, veljudgmnt) print(f"conditions -> judgments R^2: {r_value ** 2:0.3f}") slope, intercept, r_value,\ p_value, std_err = stats.linregress(veljudgmnt, velpredict) print(f"predictions -> judgments R^2: {r_value ** 2:0.3f}") # + [markdown] colab_type="text" # These $R^2$s express how well the experimental conditions explain the participants judgments and how well the models predicted judgments explain the participants judgments. # # You will learn much more about model fitting, quantitative model evaluation and model comparison tomorrow! # # Perhaps the $R^2$ values don't seem very impressive, but the judgments produced by the participants are explained by the model's predictions better than by the actual conditions. In other words: in a certain percentage of cases the model tends to have the same illusions as the participants. # + [markdown] colab_type="text" # ## TD 9.1 Varying the threshold parameter to improve the model # # In the code below, see if you can find a better value for the threshold parameter, to reduce errors in the models' predictions. # # **Testing thresholds** # + [markdown] colab_type="text" # # ### Interactive Demo: optimizing the model # + cellView="form" colab={} colab_type="code" #@title #@markdown Make sure you execute this cell to enable the widget! data = {'opticflow': opticflow, 'vestibular': vestibular} def refresh(threshold=0, windowsize=100): # set parameters according to sliders: params = {'samplingrate': 10, 'FUN': np.mean} params['filterwindows'] = [windowsize, 50] params['threshold'] = threshold modelpredictions = my_train_illusion_model(sensorydata=data, params=params) predictions = np.zeros(judgments.shape) predictions[:, 0:3] = judgments[:, 0:3] predictions[:, 3] = modelpredictions['selfmotion'] predictions[:, 4] = modelpredictions['worldmotion'] * -1 # plot the predictions: my_plot_predictions_data(judgments, predictions) # calculate R2 veljudgmnt = np.concatenate((judgments[:, 3], judgments[:, 4])) velpredict = np.concatenate((predictions[:, 3], predictions[:, 4])) slope, intercept, r_value,\ p_value, std_err = stats.linregress(veljudgmnt, velpredict) print(f"predictions -> judgments R^2: {r_value ** 2:0.3f}") _ = widgets.interact(refresh, threshold=(-1, 2, .01), windowsize=(1, 100, 1)) # + [markdown] colab_type="text" # Varying the parameters this way, allows you to increase the models' performance in predicting the actual data as measured by $R^2$. This is called model fitting, and will be done better in the coming weeks. # + [markdown] colab_type="text" # ## TD 9.2: Credit assigmnent of self motion # # When we look at the figure in **TD 8.1**, we can see a cluster does seem very close to (1,0), just like in the actual data. The cluster of points at (1,0) are from the case where we conclude there is no self motion, and then set the self motion to 0. That value of 0 removes a lot of noise from the world-motion estimates, and all noise from the self-motion estimate. In the other case, where there is self motion, we still have a lot of noise (see also micro-tutorial 4). # # Let's change our `my_selfmotion()` function to return a self motion of 1 when the vestibular signal indicates we are above threshold, and 0 when we are below threshold. Edit the function here. # + [markdown] colab_type="text" # ### Exercise 2: function for credit assigment of self motion # + cellView="both" colab={} colab_type="code" def my_selfmotion(ves, params): """ Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s """ # integrate signal: ves = np.cumsum(ves * (1 / params['samplingrate'])) # use running window to accumulate evidence: selfmotion = my_moving_window(ves, window=params['filterwindows'][0], FUN=params['FUN']) # take the final value as our estimate: selfmotion = selfmotion[-1] ########################################################################### # Exercise: Complete credit assignment. Remove the next line to test your function raise NotImplementedError("Modify with credit assignment") ########################################################################### # compare to threshold, set to 0 if lower if selfmotion < params['threshold']: selfmotion = 0 else: selfmotion = ... return selfmotion # Use the updated function to run the model and plot the data # Uncomment below to test your function data = {'opticflow': opticflow, 'vestibular': vestibular} params = {'threshold': 0.33, 'filterwindows': [100, 50], 'FUN': np.mean} # modelpredictions = my_train_illusion_model(sensorydata=data, params=params) predictions = np.zeros(judgments.shape) predictions[:, 0:3] = judgments[:, 0:3] predictions[:, 3] = modelpredictions['selfmotion'] predictions[:, 4] = modelpredictions['worldmotion'] * -1 # my_plot_percepts(datasets={'predictions': predictions}, plotconditions=False) # + [markdown] colab={} colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_97a9e346.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=560 height=560 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D2_ModelingPractice/static/W1D2_Tutorial2_Solution_97a9e346_0.png> # # # + [markdown] colab_type="text" # That looks much better, and closer to the actual data. Let's see if the $R^2$ values have improved. Use the optimal values for the threshold and window size that you found previously. # # ### Interactive Demo: evaluating the model # + cellView="form" colab={} colab_type="code" #@title #@markdown Make sure you execute this cell to enable the widget! data = {'opticflow': opticflow, 'vestibular': vestibular} def refresh(threshold=0, windowsize=100): # set parameters according to sliders: params = {'samplingrate': 10, 'FUN': np.mean} params['filterwindows'] = [windowsize, 50] params['threshold'] = threshold modelpredictions = my_train_illusion_model(sensorydata=data, params=params) predictions = np.zeros(judgments.shape) predictions[:, 0:3] = judgments[:, 0:3] predictions[:, 3] = modelpredictions['selfmotion'] predictions[:, 4] = modelpredictions['worldmotion'] * -1 # plot the predictions: my_plot_predictions_data(judgments, predictions) # calculate R2 veljudgmnt = np.concatenate((judgments[:, 3], judgments[:, 4])) velpredict = np.concatenate((predictions[:, 3], predictions[:, 4])) slope, intercept, r_value,\ p_value, std_err = stats.linregress(veljudgmnt, velpredict) print(f"predictions -> judgments R2: {r_value ** 2:0.3f}") _ = widgets.interact(refresh, threshold=(-1, 2, .01), windowsize=(1, 100, 1)) # + [markdown] colab_type="text" # While the model still predicts velocity judgments better than the conditions (i.e. the model predicts illusions in somewhat similar cases), the $R^2$ values are a little worse than those of the simpler model. What's really going on is that the same set of points that were model prediction errors in the previous model are also errors here. All we have done is reduce the spread. # + [markdown] colab_type="text" # **Interpret the model's meaning** # # Here's what you should have learned from model the train illusion: # # 1. A noisy, vestibular, acceleration signal can give rise to illusory motion. # 2. However, disambiguating the optic flow by adding the vestibular signal simply adds a lot of noise. This is not a plausible thing for the brain to do. # 3. Our other hypothesis - credit assignment - is more qualitatively correct, but our simulations were not able to match the frequency of the illusion on a trial-by-trial basis. # # We decided that for now we have learned enough, so it's time to write it up. # # + [markdown] colab_type="text" # --- # # Section 10: Model publication! # + cellView="form" colab={} colab_type="code" # @title Video 10: Publication video = YouTubeVideo(id='zm8x7oegN6Q', width=854, height=480, fs=1) print(f"Video available at https://youtube.com/watch?v={video.id}") video # + [markdown] colab_type="text" # # **Goal:** In order for our model to impact the field, it needs to be accepted by our peers, and order for that to happen it matters how the model is published. # + [markdown] colab_type="text" # ## TD 10.1: Write a summary of the project # # Here we will write up our model, by answering the following questions: # * **What is the phenomena**? Here summarize the part of the phenomena which your model addresses. # * **What is the key scientific question?**: Clearly articulate the question which your model tries to answer. # * **What was our hypothesis?**: Explain the key relationships which we relied on to simulate the phenomena. # * **How did your model work?** Give an overview of the model, it's main components, and how the model works. ''Here we ... '' # * **What did we find? Did the model work?** Explain the key outcomes of your model evaluation. # * **What can we conclude?** Conclude as much as you can _with reference to the hypothesis_, within the limits of the model. # * **What did you learn? What is left to be learned?** Briefly argue the plausibility of the approach and what you think is _essential_ that may have been left out. # # ### Guidance for the future # There are good guidelines for structuring and writing an effective paper (e.g., [<NAME>, 2017](https://doi.org/10.1371/journal.pcbi.1005619)), all of which apply to papers about models. There are some extra considerations when publishing a model. In general, you should explain each of the steps in the paper: # # **Introduction:** Steps 1 & 2 (maybe 3) # # **Methods:** Steps 3-7, 9 # # **Results:** Steps 8 & 9, going back to 1, 2 & 4 # # In addition, you should provide a visualization of the model, and upload the code implementing the model and the data it was trained and tested on to a repository (e.g. GitHub and OSF). # # The audience for all of this should be experimentalists, as they are the ones who can test predictions made by your your model and collect new data. This way your models can impact future experiments, and that future data can then be modeled (see modeling process schematic below). Remember your audience - it is _always_ hard to clearly convey the main points of your work to others, especially if your audience doesn't necessarily create computational models themselves. # # ![how-to-model process from Blohm et al 2019](https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D2_ModelingPractice/static/NMA-W1D2-fig06.png) # # ### Suggestion # # For every modeling project, a very good exercise in this is to _**first**_ write a short, 100-word abstract of the project plan and expected impact, like the summary you wrote. This forces focussing on the main points: describing the relevance, question, model, answer and what it all means very succinctly. This allows you to decide to do this project or not **before you commit time writing code for no good purpose**. Notice that this is really what we've walked you through carefully in this tutorial! :) # # + [markdown] colab_type="text" # --- # # Summary # Confatulations! You have finished Day2 of NMA! In this tutorial, we worked through the rest steps of the process of modeling. # # - We identified the key components of the model, and examined how they work together (step 6) # - We implemented the model (step 7), and completed it (step 8) # - We tested and evaluated our model (step 9), and finally # - We learn how to publish our model in order to increase its visibility amongts our peers # # ## Post-script # # Note that the model we built here was extremely simple and used artificial data on purpose. It allowed us to go through all the steps of building a model, and hopefully you noticed that it is not always a linear process, you will go back to different steps if you hit a roadblock somewhere. # # However, if you're interested in how to actually approach modeling a similar phenomenon in a probabilistic way, we encourage you to read the paper by [Dokka et. al., 2019](https://doi.org/10.1073/pnas.1820373116), where the authors model how judgments of heading direction are influenced by objects that are also moving. # + [markdown] colab_type="text" # --- # # Reading # # <NAME>, <NAME>, <NAME> (2020). _A How-to-Model Guide for Neuroscience_ eNeuro, 7(1). https://doi.org/10.1523/ENEURO.0352-19.2019 # # <NAME>, <NAME>, <NAME>, DeAngelis GC, Angelaki DE (2019). _Causal inference accounts for heading perception in the presence of object motion._ PNAS, 116(18):9060-9065. https://doi.org/10.1073/pnas.1820373116 # # <NAME>, DeAngelis GC, <NAME>, Angelaki DE, Pouget A (2014). _Optimal Multisensory Decision-Making in a Reaction-Time Task._ eLife, 3:e03005. https://doi.org/10.7554/eLife.03005 # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2014). _Direction detection thresholds of passive self-motion in artistic gymnasts._ Exp Brain Res, 232:1249–1258. https://doi.org/10.1007/s00221-014-3841-0 # # <NAME>, <NAME> (2017). _Ten simple rules for structuring papers._ PLOS Comput Biol 13(9): e1005619. https://doi.org/10.1371/journal.pcbi.1005619 # # <NAME>, <NAME> (2012). _Stimulus Meanings Alter Illusory Self-Motion (Vection) - Experimental Examination of the Train Illusion._ Seeing Perceiving, 25(6):631-45. https://doi.org/10.1163/18784763-00002394 #
tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt data=pd.read_csv('./Dataset/gender_voice_weka_dataset.csv') data.head() data.shape data.describe() from sklearn.preprocessing import LabelEncoder labelEncoder=LabelEncoder() data['label']=labelEncoder.fit_transform(data['label'].astype(str)) data.head() data.boxplot(by='label',column=['meanfreq'],grid=False, figsize=(10,8)) plt.show() from sklearn.model_selection import train_test_split X=data.drop('label',axis=1) Y=data['label'] x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=0) x_train.shape, y_train.shape x_test.shape, y_test.shape from sklearn.linear_model import LogisticRegression logistic_model=LogisticRegression(penalty='l2',solver='liblinear') logistic_model.fit(x_train,y_train) y_pred=logistic_model.predict(x_test) from sklearn.metrics import confusion_matrix confusion_matrix=confusion_matrix(y_test,y_pred) print(confusion_matrix) print('Training score : ',logistic_model.score(x_train,y_train)) from sklearn.metrics import classification_report print('Testing report:') print(classification_report(y_test,y_pred))
Logistic_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Model Verification # + [markdown] pycharm={"name": "#%% md\n"} # This notebook is used to verify that the model is encoded correctly according to the model conceptualisation. The verification procedures used in this notebook are taken from the [Agent based modelling for Socio-technical systems](https://www.ieeexplore.ieee.org/abstract/document/5874914/?casa_token=2LsDXcW6KKYAAAAA:<KEY>) # + [markdown] pycharm={"name": "#%% md\n"} # ## 1. Tracing the agent behaviour # - # Tracing the agent behavior was integrated into the encoding stage of the modeling. While model implementation, the agent behavior of each agent group was traced and verified with the conceptualization. The implementation is done in python using tracking variables and text prompts for agent interaction. # # Example: # * Printing 'agent created' while agent initialisation # * Printing 'asset initialised' for asset initialisation # * Printing 'demand updated' when ToD is implemented and demand is revised # # These tracing flags and call-outs were used at the initial stage and were eventually removed after successful implementation of the model. # + [markdown] pycharm={"name": "#%% md\n"} # ## 2.Single agent testing # - # The single-agent testing consists of sanity checks to ensure all agent methods are functional and behave as per conceptualization. # + pycharm={"name": "#%%\n"} import datetime import matplotlib.pyplot as plt from model.model_code import * # + pycharm={"name": "#%%\n"} model = EnergyCommunity() # + pycharm={"name": "#%%\n"} agent = Residential(unique_id=model.next_id(), member_name='hh1_consumption [kWh]', member_type=MemberType.CONSUMER, demand_flexibility=None, asset_list=None, model=model) # + pycharm={"name": "#%%\n"} fig, ax = plt.subplots(figsize=(10, 5)) for i in np.arange(3): demand = agent.get_demand_schedule() date = datetime.datetime.strptime(agent.date, '%Y-%m-%d') date += datetime.timedelta(days=1) agent.date = date.strftime('%Y-%m-%d') ax.plot(demand.to_list(), label=agent.date) plt.xlabel('Time blocks of 15 minutes') plt.ylabel('Electricity Demand kWh') plt.title('Energy demand of a residential agent') leg = ax.legend() plt.tight_layout() plt.savefig('figures_verification/single_agent_testing1.png', dpi=300) # + [markdown] pycharm={"name": "#%% md\n"} # As it can be seen above, the residential agent is successfully initialised. The above plot shows electricity demand of a Residential agent for 3 days. # + [markdown] pycharm={"name": "#%% md\n"} # A similar verification is carried out for the non-residential agent. # + pycharm={"name": "#%%\n"} # Creating the agent agent = NonResidential(unique_id=model.next_id(), member_name='school_mbo', member_type=MemberType.CONSUMER, demand_flexibility=None, asset_list=None, model=model) # + pycharm={"name": "#%%\n"} fig, ax = plt.subplots(figsize=(10, 5)) for i in np.arange(3): demand = agent.get_demand_schedule() date = datetime.datetime.strptime(agent.date, '%Y-%m-%d') date += datetime.timedelta(days=2) agent.date = date.strftime('%Y-%m-%d') ax.plot(demand.to_list(), label=agent.date) plt.xlabel('Time blocks of 15 minutes') plt.ylabel('Energy demand of a non-residential agent (School)') plt.title('Electricity Demand kWh') leg = ax.legend() plt.tight_layout() plt.savefig('figures_verification/single_agent_testing2.png', dpi=300) # + [markdown] pycharm={"name": "#%% md\n"} # As it can be seen above, the non-residential agent i.e. school is successfully initialised. The above plot shows electricity demand of the agent for 3 days which represents the input demand in the model. # # + [markdown] pycharm={"name": "#%% md\n"} # ## 3. Interaction testing in a minimal model # - # The interaction testing is done by checking interaction between community members and assets # + pycharm={"name": "#%%\n"} # Create an agent with asset asset = [{'agent_type': Asset, 'asset_type': Solar, 'capacity': 10, 'efficiency': 0.20, 'price': 0.15}] agent = NonResidential(unique_id=model.next_id(), member_name='school_mbo', member_type=MemberType.PROSUMER, demand_flexibility=None, asset_list=asset, model=model) # + pycharm={"name": "#%%\n"} asset = agent.assets[0] # + pycharm={"name": "#%%\n"} fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 5), sharex=True) for i in np.arange(3): date = datetime.datetime.strptime(agent.date, '%Y-%m-%d') date += datetime.timedelta(days=1) agent.date = date.strftime('%Y-%m-%d') asset.date = agent.date demand = agent.get_demand_schedule() generation = asset.generate_supply_schedule() fig.suptitle('Generation and Supply curve for agent') ax1.plot(demand.to_list(), label=agent.date) ax2.plot(generation.to_list(), label=agent.date) ax1.set_ylabel('Electricity Demand kWh') ax2.set_ylabel('Electricity Generation kWh') ax2.set_xlabel('Time blocks of 15 minutes') leg = ax1.legend() plt.tight_layout() plt.savefig('figures_verification/interaction_testing.png', dpi=300) # + [markdown] pycharm={"name": "#%% md\n"} # As shown in the plots above, member agent initialised an asset agent. Plots above show the demand and generation for an agent for three different days. # + [markdown] pycharm={"name": "#%% md\n"} # ## 4. Multi-agent testing # + [markdown] pycharm={"name": "#%% md\n"} # In the multi-agent testing, a minimalistic model is initialised to varify the agent interaction. # + pycharm={"name": "#%%\n"} # initialise model model = EnergyCommunity() # + pycharm={"name": "#%%\n"} # model.all_agents[NonResidential] = model.all_agents[NonResidential][1:] agent1 = model.all_agents[NonResidential][0] agent2 = model.all_agents[NonResidential][1] agent3 = model.all_agents[NonResidential][2] # + pycharm={"name": "#%%\n"} model.step() # + pycharm={"name": "#%%\n"} fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10), sharex=True, sharey=False) for i in np.arange(1): plt.title('Multi-agent testing') ax1.plot(agent2.demand_schedule.to_list(), label='scheduled demand') ax1.plot(agent2.demand_realized.to_list(), label='realised demand') ax1.legend() ax1.set_title('agent 1') ax2.plot(agent3.demand_schedule.to_list(), label='scheduled demand') ax2.plot(agent3.demand_realized.to_list(), label='realised demand') ax2.legend() ax2.set_title('agent 2') ax2.set_xlabel('Time blocks of 15 minutes') plt.savefig('figures_verification/multi_agent_testing.png', dpi=300) # + [markdown] pycharm={"name": "#%% md\n"} # Above graph shows demand schedule for two agents. Scheduled demand is the energy consumption by the agent whereas realised demand is the actual demand for the agent after reducing the captive generation. It can be seen that agent 1 has a generation asset whereas agent 2 does not have any generation asset. Thus, this can be concluded that the agents are interacting and behaving as per the conceptualisation in a minimalistic model setup.
evidence_files/model_verification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="4DXwxt9r1bx_" # ## A first (actually second) encounter with Keras # # `Keras` is a high-level Python library for **deep learning**: Keras is an API wrapper for `TensorFlow`, which is the **backend engine** that performs low-level computations (e.g. tensor products, convolutions, etc.). # # [Don't worry if there are many terms that you don't fully understand now: we will go over these concepts repeatedly during the course, with increasing levels of detail] # # Keras allows easy and fast deployment of neural networks models: the code chunk below imports `Keras` functions that define a specific neural network architecture: # # - `Sequential()`: a network made up of a sequence of successive layers # - `Dense()`: fully-connected (dense) layers # - the type of **activation functions** to be used in each layer # - the number of units in each layer # # + id="-uUdGW-D1ayA" from keras.models import Sequential from keras.layers import Dense, Activation,Conv2D,MaxPooling2D,Flatten,Dropout # + id="iV5ShyTb98uU" model = Sequential() model.add(Dense(64, activation='relu', input_dim=50)) #input shape of 50 model.add(Dense(28, activation='relu')) #input shape of 50 model.add(Dense(10, activation='softmax')) # + [markdown] id="6wsTLk6GEqNR" # ## Model definition # # As we saw above, the first step is to choose the neural network architecture and define the deep learning model. # # We first use the `Sequential()` function to select the Keras API for the construction of deep learning models. Sequential models are a stack of layers, each with one input and one output tensor. # # The function `add()` will then allow us to add subsequent layers to our deep learning model, by specifying the type of layer # + id="6AkjMRyaNK8Y" model = Sequential() # + [markdown] id="ZuZ5apaYPhCM" # ### Dense layer # # The first very common type of layer is the **Fully Connected** (dense) layer: we can specify the number of units in the layer and the activation function to use. # + id="_81JIwrxPgQX" model.add(Dense(units = 32, activation='relu', input_shape=(12,12,3))) ##input shape: tensor size e.g. 12 x 12 pixels x 3 channels (RGB) # + [markdown] id="4vjeG4RIPaK8" # ### Convolutional layer # # Another common type of layers is the **convolutional layer** where we specify the number of filters, the size of the filter (kernel) and the activation function. # # + id="CFQbEniLPYS7" model.add(Conv2D(filters = 8, kernel_size = (3, 3), activation='relu')) # + [markdown] id="mraG7PM1S0Ea" # ### MaxPooling layer # # MaxPooling layers downsample the input representation by taking the maximum value over the window defined by pool size for each dimension along the features axis ($\rightarrow$ dimensionality reduction) # # + id="pkJ0AGASSzHZ" model.add(MaxPooling2D(pool_size=(2, 2))) # + [markdown] id="gxsTd-EyVy24" # ### Dropout layer # # The **dropout layer** randomly sets input units to 0 with a specified frequency (`rate` argument) at each step during training, which helps prevent overfitting. # + id="d_Vtwu-3VcwA" model.add(Dropout(rate = 0.5)) # + [markdown] id="hF2wo-KpeOhp" # We can get an overview of the defined deep learning model by using the `model.summary()` function: # + id="JJrocEIkZBPU" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631711831391, "user_tz": -120, "elapsed": 197, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "09590256246541826137"}} outputId="2dc370d4-27ff-42f0-e333-f47275a93672" print(model.summary()) # + [markdown] id="u1jau0_BeY5B" # We see that we have a large number of parameters to train in this model: # # - 4 parameters (3 channels + bias term) per 32 units in the Dense layer $\rightarrow$ 128 parameters # - for convolutional layer number of parameter is computed as: # - number_parameters = out_channels * (in_channels * kernel_h * kernel_w + 1) # - the "+ 1" is for bias term # - `out_channels` = number of units in current layers = 8 # - `in_channels` = number of units in previous layer = 32 # - `kernel_h` = `kernel_w` = 3 # - 8 * (32 * 3 * 3 + 1) = 2312 # # + [markdown] id="CmhJQx3OXs6d" # ## Compiling the model # # Once the model architecture has been defined, you go on compiling the model by setting up relevant configurations: **loss function**, **optimizer**, etc. # + id="4h2t5ivJEI6J" model.compile(loss='mean_squared_error', optimizer='adam') # + [markdown] id="BoSoa-PphdjJ" # The next steps would be to: # # - fit the built and compiled model # - evaluate the model performance # # You can find the Keras code for these two steps below: since we currently haven't loaded any data to work on, we commented out these lines of code, for us to see and discuss the syntax # + id="ObGoYi48hqMa" # model.fit(X_train, X_train, batch_size=32, epochs=10, validation_data=(x_val, y_val)) # score = model.evaluate(x_test, y_test, batch_size=32) # + [markdown] id="3y4cx-02hm6T" # ## An example with simple linear regression # # We now use Keras to fit a simple linear regression model: # # $$ # y = \mu + \beta x + e # $$ # # we start by generating data for the feature *x* and the target variable *y* (continuous) # + id="MF_wy4MciQHA" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631711978529, "user_tz": -120, "elapsed": 247, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="7443cefd-71c4-44a0-fb0e-86653a0eef52" import numpy as np import matplotlib.pyplot as plt x = np.linspace(1,2,200) ## generate 200 samples with values between 1 and 2 print(x.shape) y = x*4 + np.random.randn(*x.shape) * 0.3 ## *x.shape is the number of random numbers to generate (* because x.shape is a tuple) # + [markdown] id="WwVguveFkz4A" # We now build the neural networks model by specifying one Dense layer with one single unit. The activation function is `linear` (identity function). the input dimension is 1 (one feature) # + id="2IIPVkGMkuua" model = Sequential() model.add(Dense(1, input_dim=1, activation='linear')) # + [markdown] id="8NTEzG-SlTyi" # We then compile the model, selecting *stochastic gradient descent* as optimizer and *mean square error* as loss function # + id="ZarF5DOTkxKU" model.compile(optimizer='sgd', loss='mse', metrics=['mse']) # + id="0ZxIjWMdlY63" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631711991108, "user_tz": -120, "elapsed": 225, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="2b83d357-7cba-4d6e-cf78-ae548530a437" print(model.summary()) # + [markdown] id="_0LqhG-UlbsP" # We here have two parameters to train: the intercept $\mu$ and the slope $\beta$ # + [markdown] id="-08cpPgbmrla" # We are now ready to fit our simple linear regression model with Keras. We define epochs=30 (30 iterations of optimization) # + id="SBYLRGn3lsNX" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631712046238, "user_tz": -120, "elapsed": 2080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="52256769-291f-41cd-b6ba-442ef875fda9" history = model.fit(x,y, epochs=30, shuffle=False) # + [markdown] id="XjqiaLe0pbEB" # Finally, we are ready to evaluate the fitted deep learning model. # First, we look at predicted vs observed values: using the fitted model, we predict values for our 200 generated samples # + id="9JCA-6JgoV8R" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631712051637, "user_tz": -120, "elapsed": 210, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="2728bd61-d793-4d31-ec2d-a0d5743d2f73" predict = model.predict(x) y_hat = predict.reshape(200,) print(y.shape) print(y_hat.shape) np.corrcoef(y,y_hat) # + [markdown] id="twrvpVMRzgw_" # ### Question # # Why do you think that we have such high predictive accuracy? # + id="K1JchT4zhlFE" colab={"base_uri": "https://localhost:8080/", "height": 264} executionInfo={"status": "ok", "timestamp": 1631712059180, "user_tz": -120, "elapsed": 671, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="6de1e533-26d0-41fc-f6a3-aabb3cce17f0" plt.scatter(y_hat, y) plt.show() # + [markdown] id="_Seiszuwq3A5" # We can plot the decay of the loss over epochs, using results saved in the `history` object # + id="GvtM5m56mX3n" colab={"base_uri": "https://localhost:8080/", "height": 283} executionInfo={"status": "ok", "timestamp": 1631712063846, "user_tz": -120, "elapsed": 743, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="7d1126ca-00da-44c2-d427-3fb9fd404944" plt.plot(history.history['loss']) # + [markdown] id="RDdf-AQ5q-cT" # We can also retrieve the weights (coefficients) estimated in the final layer: # + id="MDY2BIpgoQdL" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631712072988, "user_tz": -120, "elapsed": 267, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="66005fa2-f9b4-4b48-a5de-742d967d20f8" model.get_weights() # + [markdown] id="huMD0ZmkoygY" # ## Exercise 2.1 [optional]: do-it-together # # Using the `scikitlearn` dataset `diabetes`, you should fit a simple linear regression model with **Keras**: # # - select one feature from the data (age, sex, bmi etc.) # - build your Keras model # - compile the model # - fit the model # - evaluate the model # # We have prepared the data loading step for you: # + id="CypetAKGrUwq" import pandas as pd import sklearn.datasets diabetes = sklearn.datasets.load_diabetes() diabetes.data = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) #converting numpy array -> pandas DataFrame diabetes.target = pd.Series(diabetes.target) # + id="bnUXPvVUryGn" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631529933304, "user_tz": -120, "elapsed": 297, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="88da6d99-8d43-4a21-f5e8-833ef9dd24d9" print(diabetes.DESCR) # + id="bi7iWJJTrqLd" colab={"base_uri": "https://localhost:8080/", "height": 414} executionInfo={"status": "ok", "timestamp": 1631529954302, "user_tz": -120, "elapsed": 298, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="0e139873-a471-43c0-9d86-18cc8d75de3c" diabetes.data ## features # + id="L2OfxaI6ru_5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1631529961730, "user_tz": -120, "elapsed": 286, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg7jR0XlLn_xe6aJQRo3t-i8pd96QODPh7zNo_l=s64", "userId": "09590256246541826137"}} outputId="c0d878b7-9188-4a17-8486-994b9073210c" diabetes.target ## quantitative measure of disease progression # + [markdown] id="lYSoICBFswA7" # Now it's our turn to play a bit with basic Keras: let's enjoy it!
lab_day1/day1_code02 keras basics [EXERCISE].ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # データサイエンス100本ノック(構造化データ加工編) - R # ## はじめに # - 初めに以下のセルを実行してください # - 必要なライブラリのインポートとデータベース(PostgreSQL)からのデータ読み込みを行います # - 利用が想定されるライブラリは以下セルでインポートしています # - その他利用したいライブラリがあれば適宜インストールしてください(!マークに続けてOSコマンドを入力することで、任意のubuntu Linuxコマンドが入力可能) # - 名前、住所等はダミーデータであり、実在するものではありません # + require('RPostgreSQL') require('tidyr') require('dplyr') require('stringr') require('caret') require('lubridate') require('rsample') require('recipes') host <- 'db' port <- Sys.getenv()["PG_PORT"] dbname <- Sys.getenv()["PG_DATABASE"] user <- Sys.getenv()["PG_USER"] password <- Sys.getenv()["PG_PASSWORD"] con <- dbConnect(PostgreSQL(), host=host, port=port, dbname=dbname, user=user, password=password) df_customer <- dbGetQuery(con,"SELECT * FROM customer") df_category <- dbGetQuery(con,"SELECT * FROM category") df_product <- dbGetQuery(con,"SELECT * FROM product") df_receipt <- dbGetQuery(con,"SELECT * FROM receipt") df_store <- dbGetQuery(con,"SELECT * FROM store") df_geocode <- dbGetQuery(con,"SELECT * FROM geocode") # - # # 演習問題 # --- # > R-001: レシート明細データフレーム(df_receipt)から全項目の先頭10件を表示し、どのようなデータを保有しているか目視で確認せよ。 # --- # > R-002: レシート明細データフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、10件表示させよ。 # --- # > R-003: レシート明細データフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、10件表示させよ。ただし、sales_ymdはsales_dateに項目名を変更しながら抽出すること。 # --- # > R-004: レシート明細データフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。 # > - 顧客ID(customer_id)が"CS018205000001" # --- # > R-005: レシート明細データフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。 # > - 顧客ID(customer_id)が"CS018205000001" # > - 売上金額(amount)が1,000以上 # --- # > R-006: レシート明細データフレーム(receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上数量(quantity)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。 # > - 顧客ID(customer_id)が"CS018205000001" # > - 売上金額(amount)が1,000以上または売上数量(quantity)が5以上 # --- # > R-007: レシート明細のデータフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。 # > - 顧客ID(customer_id)が"CS018205000001" # > - 売上金額(amount)が1,000以上2,000以下 # --- # > R-008: レシート明細データフレーム(df_receipt)から売上日(sales_ymd)、顧客ID(customer_id)、商品コード(product_cd)、売上金額(amount)の順に列を指定し、以下の条件を満たすデータを抽出せよ。 # > - 顧客ID(customer_id)が"CS018205000001" # > - 商品コード(product_cd)が"P071401019"以外 # --- # > R-009: 以下の処理において、出力結果を変えずにORをANDに書き換えよ。 # # ` # df_store %>% # filter(!(prefecture_cd == "13" | floor_area > 900)) # ` # --- # > R-010: 店舗データフレーム(df_store)から、店舗コード(store_cd)が"S14"で始まるものだけ全項目抽出し、10件だけ表示せよ。 # --- # > R-011: 顧客データフレーム(df_customer)から顧客ID(customer_id)の末尾が1のものだけ全項目抽出し、10件だけ表示せよ。 # --- # > R-012: 店舗データフレーム(df_store)から横浜市の店舗だけ全項目表示せよ。 # --- # > R-013: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の先頭がアルファベットのA〜Fで始まるデータを全項目抽出し、10件だけ表示せよ。 # --- # > R-014: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の末尾が数字の1〜9で終わるデータを全項目抽出し、10件だけ表示せよ。 # --- # > R-015: 顧客データフレーム(df_customer)から、ステータスコード(status_cd)の先頭がアルファベットのA〜Fで始まり、末尾が数字の1〜9で終わるデータを全項目抽出し、10件だけ表示せよ。 # --- # > R-016: 店舗データフレーム(df_store)から、電話番号(tel_no)が3桁-3桁-4桁のデータを全項目表示せよ。 # --- # > R-017: 顧客データフレーム(df_customer)を生年月日(birth_day)で高齢順にソートし、先頭10件を全項目表示せよ。 # --- # > R-018: 顧客データフレーム(df_customer)を生年月日(birth_day)で若い順にソートし、先頭10件を全項目表示せよ。 # --- # > R-019: レシート明細データフレーム(df_receipt)に対し、1件あたりの売上金額(amount)が高い順にランクを付与し、先頭10件を抽出せよ。項目は顧客ID(customer_id)、売上金額(amount)、付与したランクを表示させること。なお、売上金額(amount)が等しい場合は同一順位を付与するものとする。 # --- # > R-020: レシート明細データフレーム(df_receipt)に対し、1件あたりの売上金額(amount)が高い順にランクを付与し、先頭10件を抽出せよ。項目は顧客ID(customer_id)、売上金額(amount)、付与したランクを表示させること。なお、売上金額(amount)が等しい場合でも別順位を付与すること。 # --- # > R-021: レシート明細データフレーム(df_receipt)に対し、件数をカウントせよ。 # --- # > R-022: レシート明細データフレーム(df_receipt)の顧客ID(customer_id)に対し、ユニーク件数をカウントせよ。 # --- # > R-023: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)と売上数量(quantity)を合計せよ。 # --- # > R-024: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も新しい売上日(sales_ymd)を求め、10件表示せよ。 # --- # > R-025: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も古い売上日(sales_ymd)を求め、10件表示せよ。 # --- # > R-026: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに最も新しい売上日(sales_ymd)と古い売上日を求め、両者が異なるデータを10件表示せよ。 # --- # > R-027: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の平均を計算し、降順でTOP5を表示せよ。 # --- # > R-028: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の中央値を計算し、降順でTOP5を表示せよ。 # --- # > R-029: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに商品コード(product_cd)の最頻値を求めよ。 # --- # > R-030: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の標本分散を計算し、降順でTOP5を表示せよ。 # --- # > R-031: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の標本標準偏差を計算し、降順でTOP5を表示せよ。 # --- # > R-032: レシート明細データフレーム(df_receipt)の売上金額(amount)について、25%刻みでパーセンタイル値を求めよ。 # --- # > R-033: レシート明細データフレーム(df_receipt)に対し、店舗コード(store_cd)ごとに売上金額(amount)の平均を計算し、330以上のものを抽出せよ。 # --- # > R-034: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに売上金額(amount)を合計して全顧客の平均を求めよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。 # --- # > R-035: レシート明細データフレーム(df_receipt)に対し、顧客ID(customer_id)ごとに売上金額(amount)を合計して全顧客の平均を求め、平均以上に買い物をしている顧客を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、データは10件だけ表示させれば良い。 # --- # > R-036: レシート明細データフレーム(df_receipt)と店舗データフレーム(df_store)を内部結合し、レシート明細データフレームの全項目と店舗データフレームの店舗名(store_name)を10件表示させよ。 # --- # > R-037: 商品データフレーム(df_product)とカテゴリデータフレーム(df_category)を内部結合し、商品データフレームの全項目とカテゴリデータフレームの小区分名(category_small_name)を10件表示させよ。 # --- # > R-038: 顧客データフレーム(df_customer)とレシート明細データフレーム(df_receipt)から、各顧客ごとの売上金額合計を求めよ。ただし、買い物の実績がない顧客については売上金額を0として表示させること。また、顧客は性別コード(gender_cd)が女性(1)であるものを対象とし、非会員(顧客IDが'Z'から始まるもの)は除外すること。なお、結果は10件だけ表示させれば良い。 # --- # > R-039: レシート明細データフレーム(df_receipt)から売上日数の多い顧客の上位20件と、売上金額合計の多い顧客の上位20件を抽出し、完全外部結合せよ。ただし、非会員(顧客IDが'Z'から始まるもの)は除外すること。 # --- # > R-040: 全ての店舗と全ての商品を組み合わせると何件のデータとなるか調査したい。店舗(df_store)と商品(df_product)を直積した件数を計算せよ。 # --- # > R-041: レシート明細データフレーム(df_receipt)の売上金額(amount)を日付(sales_ymd)ごとに集計し、前日からの売上金額増減を計算せよ。なお、計算結果は10件表示すればよい。 # --- # > R-042: レシート明細データフレーム(df_receipt)の売上金額(amount)を日付(sales_ymd)ごとに集計し、各日付のデータに対し、1日前、2日前、3日前のデータを結合せよ。結果は10件表示すればよい。 # --- # > R-043: レシート明細データフレーム(df_receipt)と顧客データフレーム(df_customer)を結合し、性別(gender)と年代(ageから計算)ごとに売上金額(amount)を合計した売上サマリデータフレーム(df_sales_summary)を作成せよ。性別は0が男性、1が女性、9が不明を表すものとする。 # > # > ただし、項目構成は年代、女性の売上金額、男性の売上金額、性別不明の売上金額の4項目とすること(縦に年代、横に性別のクロス集計)。また、年代は10歳ごとの階級とすること。 # --- # > R-044: 前設問で作成した売上サマリデータフレーム(df_sales_summary)は性別の売上を横持ちさせたものであった。このデータフレームから性別を縦持ちさせ、年代、性別コード、売上金額の3項目に変換せよ。ただし、性別コードは男性を'00'、女性を'01'、不明を'99'とする。 # --- # > R-045: 顧客データフレーム(df_customer)の生年月日(birth_day)は日付型でデータを保有している。これをYYYYMMDD形式の文字列に変換し、顧客ID(customer_id)とともに抽出せよ。データは10件を抽出すれば良い。 # --- # > R-046: 顧客データフレーム(df_customer)の申し込み日(application_date)はYYYYMMDD形式の文字列型でデータを保有している。これを日付型に変換し、顧客ID(customer_id)とともに抽出せよ。データは10件を抽出すれば良い。 # --- # > R-047: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)はYYYYMMDD形式の数値型でデータを保有している。これを日付型に変換し、レシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。 # --- # > R-048: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)は数値型のUNIX秒でデータを保有している。これを日付型に変換し、レシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。 # --- # > R-049: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)を日付型に変換し、「年」だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。データは10件を抽出すれば良い。 # --- # > R-050: レシート明細データフレーム(df_receipt)の売上エポック秒(sales_epoch)を日付型に変換し、「月」だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。なお、「月」は0埋め2桁で取り出すこと。データは10件を抽出すれば良い。 # --- # > R-051: レシート明細データフレーム(df_receipt)の売上エポック秒を日付型に変換し、「日」だけ取り出してレシート番号(receipt_no)、レシートサブ番号(receipt_sub_no)とともに抽出せよ。なお、「日」は0埋め2桁で取り出すこと。データは10件を抽出すれば良い。 # --- # > R-052: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計の上、売上金額合計に対して2,000円以下を0、2,000円より大きい金額を1に2値化し、顧客ID、売上金額合計とともに10件表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。 # --- # > R-053: 顧客データフレーム(df_customer)の郵便番号(postal_cd)に対し、東京(先頭3桁が100〜209のもの)を1、それ以外のものを0に2値化せよ。さらにレシート明細データフレーム(df_receipt)と結合し、全期間において買い物実績のある顧客数を、作成した2値ごとにカウントせよ。 # --- # > R-054: 顧客データフレーム(df_customer)の住所(address)は、埼玉県、千葉県、東京都、神奈川県のいずれかとなっている。都道府県毎にコード値を作成し、顧客ID、住所とともに抽出せよ。値は埼玉県を11、千葉県を12、東京都を13、神奈川県を14とすること。結果は10件表示させれば良い。 # --- # > R-055: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、その合計金額の四分位点を求めよ。その上で、顧客ごとの売上金額合計に対して以下の基準でカテゴリ値を作成し、顧客ID、売上金額合計とともに表示せよ。カテゴリ値は上から順に1〜4とする。結果は10件表示させれば良い。 # > # > - 最小値以上第一四分位未満 # > - 第一四分位以上第二四分位未満 # > - 第二四分位以上第三四分位未満 # > - 第三四分位以上 # --- # > R-056: 顧客データフレーム(df_customer)の年齢(age)をもとに10歳刻みで年代を算出し、顧客ID(customer_id)、生年月日(birth_day)とともに抽出せよ。ただし、60歳以上は全て60歳代とすること。年代を表すカテゴリ名は任意とする。先頭10件を表示させればよい。 # --- # > R-057: 前問題の抽出結果と性別(gender)を組み合わせ、新たに性別×年代の組み合わせを表すカテゴリデータを作成せよ。組み合わせを表すカテゴリの値は任意とする。先頭10件を表示させればよい。 # --- # > R-058: 顧客データフレーム(df_customer)の性別コード(gender_cd)をダミー変数化し、顧客ID(customer_id)とともに抽出せよ。結果は10件表示させれば良い。 # --- # > R-059: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、売上金額合計を平均0、標準偏差1に標準化して顧客ID、売上金額合計とともに表示せよ。標準化に使用する標準偏差は、不偏標準偏差と標本標準偏差のどちらでも良いものとする。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。 # --- # > R-060: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、売上金額合計を最小値0、最大値1に正規化して顧客ID、売上金額合計とともに表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。 # --- # > R-061: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、売上金額合計を常用対数化(底=10)して顧客ID、売上金額合計とともに表示せよ(ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること)。結果は10件表示させれば良い。 # --- # > R-062: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客ID(customer_id)ごとに合計し、売上金額合計を自然対数化(底=e)して顧客ID、売上金額合計とともに表示せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。結果は10件表示させれば良い。 # --- # > R-063: 商品データフレーム(df_product)の単価(unit_price)と原価(unit_cost)から、各商品の利益額を算出せよ。結果は10件表示させれば良い。 # --- # > R-064: 商品データフレーム(df_product)の単価(unit_price)と原価(unit_cost)から、各商品の利益率の全体平均を算出せよ。 ただし、単価と原価にはNULLが存在することに注意せよ。 # --- # > R-065: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。ただし、1円未満は切り捨てること。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。 # --- # > R-066: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。今回は、1円未満を四捨五入すること(0.5については偶数方向の丸めで良い)。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。 # --- # > R-067: 商品データフレーム(df_product)の各商品について、利益率が30%となる新たな単価を求めよ。今回は、1円未満を切り上げること。そして結果を10件表示させ、利益率がおよそ30%付近であることを確認せよ。ただし、単価(unit_price)と原価(unit_cost)にはNULLが存在することに注意せよ。 # --- # > R-068: 商品データフレーム(df_product)の各商品について、消費税率10%の税込み金額を求めよ。1円未満の端数は切り捨てとし、結果は10件表示すれば良い。ただし、単価(unit_price)にはNULLが存在することに注意せよ。 # --- # > R-069: レシート明細データフレーム(df_receipt)と商品データフレーム(df_product)を結合し、顧客毎に全商品の売上金額合計と、カテゴリ大区分(category_major_cd)が"07"(瓶詰缶詰)の売上金額合計を計算の上、両者の比率を求めよ。抽出対象はカテゴリ大区分"07"(瓶詰缶詰)の購入実績がある顧客のみとし、結果は10件表示させればよい。 # --- # > R-070: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過日数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。 # --- # > R-071: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過月数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。1ヶ月未満は切り捨てること。 # --- # > R-072: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からの経過年数を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。1年未満は切り捨てること。 # --- # > R-073: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、顧客データフレーム(df_customer)の会員申込日(application_date)からのエポック秒による経過時間を計算し、顧客ID(customer_id)、売上日、会員申込日とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値、application_dateは文字列でデータを保持している点に注意)。なお、時間情報は保有していないため各日付は0時0分0秒を表すものとする。 # --- # > R-074: レシート明細データフレーム(df_receipt)の売上日(sales_ymd)に対し、当該週の月曜日からの経過日数を計算し、顧客ID、売上日、当該週の月曜日付とともに表示せよ。結果は10件表示させれば良い(なお、sales_ymdは数値でデータを保持している点に注意)。 # --- # > R-075: 顧客データフレーム(df_customer)からランダムに1%のデータを抽出し、先頭から10件データを抽出せよ。 # --- # > R-076: 顧客データフレーム(df_customer)から性別(gender_cd)の割合に基づきランダムに10%のデータを層化抽出し、性別ごとに件数を集計せよ。 # --- # > R-077: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客単位に合計し、合計した売上金額の外れ値を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、ここでは外れ値を平均から3σ以上離れたものとする。結果は10件表示させれば良い。 # --- # > R-078: レシート明細データフレーム(df_receipt)の売上金額(amount)を顧客単位に合計し、合計した売上金額の外れ値を抽出せよ。ただし、顧客IDが"Z"から始まるのものは非会員を表すため、除外して計算すること。なお、ここでは外れ値を第一四分位と第三四分位の差であるIQRを用いて、「第一四分位数-1.5×IQR」よりも下回るもの、または「第三四分位数+1.5×IQR」を超えるものとする。結果は10件表示させれば良い。 # --- # > R-079: 商品データフレーム(df_product)の各項目に対し、欠損数を確認せよ。 # --- # > R-080: 商品データフレーム(df_product)のいずれかの項目に欠損が発生しているレコードを全て削除した新たなdf_product_1を作成せよ。なお、削除前後の件数を表示させ、前設問で確認した件数だけ減少していることも確認すること。 # --- # > R-081: 単価(unit_price)と原価(unit_cost)の欠損値について、それぞれの平均値で補完した新たなdf_product_2を作成せよ。なお、平均値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。 # --- # > R-082: 単価(unit_price)と原価(unit_cost)の欠損値について、それぞれの中央値で補完した新たなdf_product_3を作成せよ。なお、中央値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。 # --- # > R-083: 単価(unit_price)と原価(unit_cost)の欠損値について、各商品の小区分(category_small_cd)ごとに算出した中央値で補完した新たなdf_product_4を作成せよ。なお、中央値について1円未満は四捨五入とし、0.5については偶数寄せでかまわない。補完実施後、各項目について欠損が生じていないことも確認すること。 # --- # > R-084: 顧客データフレーム(df_customer)の全顧客に対し、全期間の売上金額に占める2019年売上金額の割合を計算せよ。ただし、販売実績のない場合は0として扱うこと。そして計算した割合が0超のものを抽出せよ。 結果は10件表示させれば良い。また、作成したデータにNAやNANが存在しないことを確認せよ。 # --- # > R-085: 郵便番号(postal_cd)を用いて経度緯度変換用データフレーム(df_geocode)を紐付け、新たなdf_customer_1を作成せよ。ただし、複数紐づく場合は経度(longitude)、緯度(latitude)それぞれ平均を算出すること。 # --- # > R-086: 前設問で作成した緯度経度つき顧客データフレーム(df_customer_1)に対し、申込み店舗コード(application_store_cd)をキーに店舗データフレーム(df_store)と結合せよ。そして申込み店舗の緯度(latitude)・経度情報(longitude)と顧客の緯度・経度を用いて距離(km)を求め、顧客ID(customer_id)、顧客住所(address)、店舗住所(address)とともに表示せよ。計算式は簡易式で良いものとするが、その他精度の高い方式を利用したライブラリを利用してもかまわない。結果は10件表示すれば良い。 # $$ # 緯度(ラジアン):\phi \\ # 経度(ラジアン):\lambda \\ # 距離L = 6371 * arccos(sin \phi_1 * sin \phi_2 # + cos \phi_1 * cos \phi_2 * cos(\lambda_1 − \lambda_2)) # $$ # --- # > R-087: 顧客データフレーム(df_customer)では、異なる店舗での申込みなどにより同一顧客が複数登録されている。名前(customer_name)と郵便番号(postal_cd)が同じ顧客は同一顧客とみなし、1顧客1レコードとなるように名寄せした名寄顧客データフレーム(df_customer_u)を作成せよ。ただし、同一顧客に対しては売上金額合計が最も高いものを残すものとし、売上金額合計が同一もしくは売上実績の無い顧客については顧客ID(customer_id)の番号が小さいものを残すこととする。 # --- # > R-088: 前設問で作成したデータを元に、顧客データフレームに統合名寄IDを付与したデータフレーム(df_customer_n)を作成せよ。ただし、統合名寄IDは以下の仕様で付与するものとする。 # > - 重複していない顧客:顧客ID(customer_id)を設定 # > - 重複している顧客:前設問で抽出したレコードの顧客IDを設定 # --- # > R-089: 売上実績のある顧客に対し、予測モデル構築のため学習用データとテスト用データに分割したい。それぞれ8:2の割合でランダムにデータを分割せよ。 # --- # > R-090: レシート明細データフレーム(df_receipt)は2017年1月1日〜2019年10月31日までのデータを有している。売上金額(amount)を月次で集計し、学習用に12ヶ月、テスト用に6ヶ月のモデル構築用データを3セット作成せよ。 # --- # > R-091: 顧客データフレーム(df_customer)の各顧客に対し、売上実績のある顧客数と売上実績のない顧客数が1:1となるようにアンダーサンプリングで抽出せよ。 # --- # > R-092: 顧客データフレーム(df_customer)では、性別に関する情報が非正規化の状態で保持されている。これを第三正規化せよ。 # --- # > R-093: 商品データフレーム(df_product)では各カテゴリのコード値だけを保有し、カテゴリ名は保有していない。カテゴリデータフレーム(df_category)と組み合わせて非正規化し、カテゴリ名を保有した新たな商品データフレームを作成せよ。 # --- # > R-094: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。 # > - ファイル形式はCSV(カンマ区切り) # > - ヘッダ有り # > - 文字コードはUTF-8 # --- # > R-095: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。 # > - ファイル形式はCSV(カンマ区切り) # > - ヘッダ有り # > - 文字コードはCP932 # --- # > R-096: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata # 配下とする。 # > - ファイル形式はCSV(カンマ区切り) # > - ヘッダ無し # > - 文字コードはUTF-8 # --- # > R-097: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。 # > - ファイル形式はCSV(カンマ区切り) # > - ヘッダ有り # > - 文字コードはUTF-8 # --- # > R-098: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。 # > - ファイル形式はCSV(カンマ区切り) # > - ヘッダ無し # > - 文字コードはUTF-8 # --- # > R-099: 先に作成したカテゴリ名付き商品データを以下の仕様でファイル出力せよ。なお、出力先のパスはdata配下とする。 # > - ファイル形式はTSV(タブ区切り) # > - ヘッダ有り # > - 文字コードはUTF-8 # --- # > R-100: 先に作成した以下形式のファイルを読み込み、データフレームを作成せよ。また、先頭10件を表示させ、正しくとりまれていることを確認せよ。 # > - ファイル形式はTSV(タブ区切り) # > - ヘッダ有り # > - 文字コードはUTF-8 # # これで100本終わりです。おつかれさまでした!
docker/work/preprocess_knock_R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- text='أنا والمعلم' print(text.split(' ')) # + import time from sagas.nlu.nlu_tools import NluTools from sagas.nlu.google_translator import translate def marks(t): if len(t)>0: return ','+' '.join(t)[1:] return '' def process(source, target, text, ips_idx=0): options=set(['get_pronounce']) # options.add('get_pronounce') res,t = translate(text, source=source, target=target, trans_verbose=False, options=options) # print(res, text, t[ips_idx]) print('✁', '%s(%s %s)'%(text, res, ''.join(t))) for sent in text.split(' '): res,t = translate(sent, source=source, target=target, trans_verbose=False, options=options) # print(res, sent, t[ips_idx]) print('%s(%s%s)'%(sent,res,marks(t)), end =" ") time.sleep(0.05) print('.') text='أنا والمعلم' target='en' process('ar', target, text) NluTools().say(text, 'ar') # + def analyse_ar(text): target='en' process('ar', target, text) NluTools().say(text, 'ar') analyse_ar('أنا طالب جامعي صيني.') # - from sagas.nlu.google_translator import get_word_map, translate sents='I am a teacher.' r,_ = translate(sents, source='en', target='ar') print(r) analyse_ar(r) # + from sagas.nlu.google_translator import get_word_map, translate from sagas.nlu.corenlp_helper import CoreNlp, CoreNlpViz, get_nlp def tra(sents): r,_ = translate(sents, source='en', target='ar') print(r) return r ana=lambda sents: CoreNlpViz(shape='ellipse', size='8,5', fontsize=20).analyse(sents, get_nlp('ar'), get_word_map('ar','en', sents)) ana_en=lambda sents: ana(tra(sents)) # ana=lambda sents: CoreNlpViz(shape='ellipse', size='8,5', fontsize=20).analyse(sents, get_nlp('ar')) ana('أنا طالب جامعي صيني') # - ana_en('you are a teacher') ana('انا معلم') text='انا معلم' rs = {} for sent in text.split(' '): print(sent) rs[sent] = '%s\n(%s)' % (sent, 'x') for sent in text.split(' '): print(rs[sent]) ana_en('I am from Japan') # ana_en('Full support for Arabic') r=ana_en('I support you') display(r) def ana_s(sents): for sent in sents: print(sent) r=ana(sent) display(r) ana_s([ 'أنا أدعمك', 'انا معلم', 'انا من اليابان', ])
notebook/procs-ar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # <h2>Quadratic Regression Dataset - Linear Regression vs XGBoost</h2> # # Model is trained with XGBoost installed in notebook instance # # In the later examples, we will train using SageMaker's XGBoost algorithm. # # Training on SageMaker takes several minutes (even for simple dataset). # # If algorithm is supported on Python, we will try them locally on notebook instance # # This allows us to quickly learn an algorithm, understand tuning options and then finally train on SageMaker Cloud # # In this exercise, let's compare XGBoost and Linear Regression for Quadratic regression dataset # Install xgboost in notebook instance. #### Command to install xgboost # !pip install xgboost==0.90 # + import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error # XGBoost import xgboost as xgb # Linear Regression from sklearn.linear_model import LinearRegression # - df = pd.read_csv('quadratic_all.csv') df.head() plt.plot(df.x,df.y,label='Target') plt.grid(True) plt.xlabel('Input Feature') plt.ylabel('Target') plt.legend() plt.title('Quadratic Regression Dataset') plt.show() # + train_file = 'quadratic_train.csv' validation_file = 'quadratic_validation.csv' # Specify the column names as the file does not have column header df_train = pd.read_csv(train_file,names=['y','x']) df_validation = pd.read_csv(validation_file,names=['y','x']) # - df_train.head() df_validation.head() plt.scatter(df_train.x,df_train.y,label='Training',marker='.') plt.scatter(df_validation.x,df_validation.y,label='Validation',marker='.') plt.grid(True) plt.xlabel('Input Feature') plt.ylabel('Target') plt.title('Quadratic Regression Dataset') plt.legend() plt.show() # + X_train = df_train.iloc[:,1:] # Features: 1st column onwards y_train = df_train.iloc[:,0].ravel() # Target: 0th column X_validation = df_validation.iloc[:,1:] y_validation = df_validation.iloc[:,0].ravel() # - # Create an instance of XGBoost Regressor # XGBoost Training Parameter Reference: # https://github.com/dmlc/xgboost/blob/master/doc/parameter.md regressor = xgb.XGBRegressor() regressor regressor.fit(X_train,y_train, eval_set = [(X_train, y_train), (X_validation, y_validation)]) eval_result = regressor.evals_result() training_rounds = range(len(eval_result['validation_0']['rmse'])) plt.scatter(x=training_rounds,y=eval_result['validation_0']['rmse'],label='Training Error') plt.scatter(x=training_rounds,y=eval_result['validation_1']['rmse'],label='Validation Error') plt.grid(True) plt.xlabel('Iteration') plt.ylabel('RMSE') plt.title('Training Vs Validation Error') plt.legend() plt.show() xgb.plot_importance(regressor) plt.show() # ## Validation Dataset Compare Actual and Predicted result = regressor.predict(X_validation) result[:5] plt.title('XGBoost - Validation Dataset') plt.scatter(df_validation.x,df_validation.y,label='actual',marker='.') plt.scatter(df_validation.x,result,label='predicted',marker='.') plt.grid(True) plt.legend() plt.show() # RMSE Metrics print('XGBoost Algorithm Metrics') mse = mean_squared_error(df_validation.y,result) print(" Mean Squared Error: {0:.2f}".format(mse)) print(" Root Mean Square Error: {0:.2f}".format(mse**.5)) # Residual # Over prediction and Under Prediction needs to be balanced # Training Data Residuals residuals = df_validation.y - result plt.hist(residuals) plt.grid(True) plt.xlabel('Actual - Predicted') plt.ylabel('Count') plt.title('XGBoost Residual') plt.axvline(color='r') plt.show() # + # Count number of values greater than zero and less than zero value_counts = (residuals > 0).value_counts(sort=False) print(' Under Estimation: {0}'.format(value_counts[True])) print(' Over Estimation: {0}'.format(value_counts[False])) # - # Plot for entire dataset plt.plot(df.x,df.y,label='Target') plt.plot(df.x,regressor.predict(df[['x']]) ,label='Predicted') plt.grid(True) plt.xlabel('Input Feature') plt.ylabel('Target') plt.legend() plt.title('XGBoost') plt.show() # ## Linear Regression Algorithm lin_regressor = LinearRegression() lin_regressor.fit(X_train,y_train) # Compare Weights assigned by Linear Regression. # # Original Function: 5*x**2 -23*x + 47 + some noise # # Linear Regression Function: -15.08 * x + 709.86 # # Linear Regression Coefficients and Intercepts are not close to actual lin_regressor.coef_ lin_regressor.intercept_ result = lin_regressor.predict(df_validation[['x']]) plt.title('LinearRegression - Validation Dataset') plt.scatter(df_validation.x,df_validation.y,label='actual',marker='.') plt.scatter(df_validation.x,result,label='predicted',marker='.') plt.grid(True) plt.legend() plt.show() # RMSE Metrics print('Linear Regression Metrics') mse = mean_squared_error(df_validation.y,result) print(" Mean Squared Error: {0:.2f}".format(mse)) print(" Root Mean Square Error: {0:.2f}".format(mse**.5)) # Residual # Over prediction and Under Prediction needs to be balanced # Training Data Residuals residuals = df_validation.y - result plt.hist(residuals) plt.grid(True) plt.xlabel('Actual - Predicted') plt.ylabel('Count') plt.title('Linear Regression Residual') plt.axvline(color='r') plt.show() # + # Count number of values greater than zero and less than zero value_counts = (residuals > 0).value_counts(sort=False) print(' Under Estimation: {0}'.format(value_counts[True])) print(' Over Estimation: {0}'.format(value_counts[False])) # - # Plot for entire dataset plt.plot(df.x,df.y,label='Target') plt.plot(df.x,lin_regressor.predict(df[['x']]) ,label='Predicted') plt.grid(True) plt.xlabel('Input Feature') plt.ylabel('Target') plt.legend() plt.title('LinearRegression') plt.show() # Linear Regression is showing clear symptoms of under-fitting # # Input Features are not sufficient to capture complex relationship # <h2>Your Turn</h2> # You can correct this under-fitting issue by adding relavant features. # # 1. What feature will you add and why? # 2. Complete the code and Test # 3. What performance do you see now? # Specify the column names as the file does not have column header df_train = pd.read_csv(train_file,names=['y','x']) df_validation = pd.read_csv(validation_file,names=['y','x']) df = pd.read_csv('quadratic_all.csv') # # Add new features # + # Place holder to add new features to df_train, df_validation and df # if you need help, scroll down to see the answer # Add your code # + X_train = df_train.iloc[:,1:] # Features: 1st column onwards y_train = df_train.iloc[:,0].ravel() # Target: 0th column X_validation = df_validation.iloc[:,1:] y_validation = df_validation.iloc[:,0].ravel() # - lin_regressor.fit(X_train,y_train) # Original Function: -23*x + 5*x**2 + 47 + some noise (rewritten with x term first) lin_regressor.coef_ lin_regressor.intercept_ result = lin_regressor.predict(X_validation) plt.title('LinearRegression - Validation Dataset') plt.scatter(df_validation.x,df_validation.y,label='actual',marker='.') plt.scatter(df_validation.x,result,label='predicted',marker='.') plt.grid(True) plt.legend() plt.show() # + # RMSE Metrics print('Linear Regression Metrics') mse = mean_squared_error(df_validation.y,result) print(" Mean Squared Error: {0:.2f}".format(mse)) print(" Root Mean Square Error: {0:.2f}".format(mse**.5)) print("***You should see an RMSE score of 30.45 or less") # - df.head() # Plot for entire dataset plt.plot(df.x,df.y,label='Target') plt.plot(df.x,lin_regressor.predict(df[['x','x2']]) ,label='Predicted') plt.grid(True) plt.xlabel('Input Feature') plt.ylabel('Target') plt.legend() plt.title('LinearRegression') plt.show() # ## Solution for under-fitting # # add a new X**2 term to the dataframe # # syntax: # # df_train['x2'] = df_train['x']**2 # # df_validation['x2'] = df_validation['x']**2 # # df['x2'] = df['x']**2 # ### Tree Based Algorithms have a lower bound and upper bound for predicted values # True Function def quad_func (x): return 5*x**2 -23*x + 47 # + # X is outside range of training samples # New Feature: Adding X^2 term X = np.array([-100,-25,25,1000,5000]) y = quad_func(X) df_tmp = pd.DataFrame({'x':X,'y':y,'x2':X**2}) df_tmp['xgboost']=regressor.predict(df_tmp[['x']]) df_tmp['linear']=lin_regressor.predict(df_tmp[['x','x2']]) # - df_tmp plt.scatter(df_tmp.x,df_tmp.y,label='Actual',color='r') plt.plot(df_tmp.x,df_tmp.linear,label='LinearRegression') plt.plot(df_tmp.x,df_tmp.xgboost,label='XGBoost') plt.legend() plt.xlabel('X') plt.ylabel('y') plt.title('Input Outside Range') plt.show() # X is inside range of training samples X = np.array([-15,-12,-5,0,1,3,5,7,9,11,15,18]) y = quad_func(X) df_tmp = pd.DataFrame({'x':X,'y':y,'x2':X**2}) df_tmp['xgboost']=regressor.predict(df_tmp[['x']]) df_tmp['linear']=lin_regressor.predict(df_tmp[['x','x2']]) df_tmp # XGBoost Predictions have an upper bound and lower bound # Linear Regression Extrapolates plt.scatter(df_tmp.x,df_tmp.y,label='Actual',color='r') plt.plot(df_tmp.x,df_tmp.linear,label='LinearRegression') plt.plot(df_tmp.x,df_tmp.xgboost,label='XGBoost') plt.legend() plt.xlabel('X') plt.ylabel('y') plt.title('Input within range') plt.show() # <h2>Summary</h2> # 1. In this exercise, we compared performance of XGBoost model and Linear Regression on a quadratic dataset # 2. The relationship between input feature and target was non-linear. # 3. XGBoost handled it pretty well; whereas, linear regression was under-fitting # 4. To correct the issue, we had to add additional features for linear regression # 5. With this change, linear regression performed much better # # XGBoost can detect patterns involving non-linear relationship; whereas, algorithms like linear regression may need complex feature engineering
xgboost/LinearAndQuadraticFunctionRegression/quadratic_xgboost_localmode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cedf868076a2" # ##### Copyright 2020 The Cirq Developers # + cellView="form" id="906e07f6e562" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="291eb7f565e0" # # Quantum approximate optimization algorithm for the Ising model # + [markdown] id="4dec45d973fc" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://quantumai.google/cirq/tutorials/educators/qaoa_ising"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/educators/qaoa_ising.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/educators/qaoa_ising.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/educators/qaoa_ising.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> # </td> # </table> # + [markdown] id="gAxVvd1LF3l4" # This notebook provides an introduction to the Quantum Approximate Optimization Algorithm (QAOA) using Cirq. The presentation mostly follows [Farhi et al](https://arxiv.org/abs/1411.4028). We will show how to construct the QAOA circuit and use it to solve optimization problems. # + id="RlJBDvNgC00H" try: import cirq except ImportError: print("installing cirq...") # !pip install cirq --quiet print("installed cirq.") # + id="FTrmLyq4C2gf" import cirq import numpy as np import sympy import matplotlib.pyplot as plt # + [markdown] id="70311a0a937e" # ## Description of the QAOA # + [markdown] id="8A7a3jcql1l5" # Suppose we have a function $C(z)$ which we want to minimize. Here, $z$ denotes a collection of variables $z = z_1,z_2,\ldots, z_n$, where each $z_j$ can be either $+1$ or $-1$. (The important thing here is that each $z_j$ has *two possible values*. By convention we choose those values to be $\pm 1$). The QAOA is a general-purpose algorithm whose goal is to produce an assignment of the $z_j$ that gives a relatively low value of $C(z)$. It's not guaranteed to give the lowest possible value of $C(z)$ (hence "approximate" in the name) except in a particular limit which we will discuss. # # The QAOA acts on $n$ qubits where, as you might guess, each qubit represents one of the variables $z_1, ..., z_n$, and the $2^n$ states of the computational basis correspond to the $2^n$ possible assignments of the $z$ variables. To be more specific, let's agree that the value of $z_j$ corresponds to the measurement outcome of the Pauli-$Z$ operator on the $j$th qubit. (There is a potential confusion here because the state $| 0 \rangle$ corresponds to $z = +1$, while the state $| 1\rangle$ corresponds to $z=-1$. This is unfortunate, but is something that we'll just have to deal with.) # + [markdown] id="q4F7ccpiGOn4" # The QAOA is fairly simple to explain, though the reasons behind why it works are not obvious at first glance. As usual, we begin with all of our qubits initialized in the $|0\rangle$ state. The first step is to act with $H^{\otimes n}$, the Hadamard operator on each qubit. This prepares an equal superposition of all bitstrings, i.e., an equal superposition of all possible $z$ assignments: # # $$ # H^{\otimes n} |0^n\rangle =\frac{1}{2^{n/2}} \sum_{z \in \{0,1\}^n} |z\rangle. # $$ # # This should be thought of as the "real" initial state of the algorithm (as opposed to thinking of the $|0\rangle$ state as the initial state). The point of the remaining steps is to affect the amplitudes such that those with small $C(z)$ values grow while those with large $C(z)$ values shrink. In this way, when we measure the qubits we'll be more likely to find a bitstring with a small value of $C(z)$. # + [markdown] id="AW1bxjmdHw44" # The meat of the algorithm relies on the following unitary operator: # # $$ # U(\gamma, C) = e^{i \pi \gamma C(Z) / 2} . # $$ # # This operator deserves some explanation. First, $\gamma$ is a parameter which we will later treat as a variational parameter, adjusting its value to produce the best possible result. $C$ here is the function we are trying to minimize, and the notation $C(Z)$ is supposed to tell you to plug in the Pauli-$Z$ operator for each qubit in place of the argument $z$. For example, if # # $$ # C(z) = 3z_1 z_2 - z_2z_3 + z_4 , # $$ # # then # # $$ # C(Z) = 3Z_1 Z_2 - Z_2Z_3 + Z_4. # $$ # # It doesn't look like much happened, but the point here is that $C(z)$ is a number while $C(Z)$ is a matrix. That matrix is diagonal in the computational basis, and those diagonal entries represent all the possible values of $C(z)$. # # After acting with $H^{\otimes n}$, we act with $U(C, \gamma)$. The result is still a sum over all possible bit-strings, but now the coefficients are complex phases which depend on $C$. At this point there is still an equal probability to measure any particular string, though, because Born's rule only depends on the square of the amplitude. So, the algorithm is not done yet. # + [markdown] id="ocxry8MKJFeD" # The next step of the algorithm is to act with the unitary operator # # $$ # U(\beta,B) = e^{i\pi\beta B/2},~~~ B = \sum_{j=1}^n X_j , # $$ # # where $\beta$ is another variational parameter. Since the Pauli-$X$ operators on each qubit commute with each other, we can alternatively write this as # # $$ # U(\beta, B) = \prod_{j=1}^n e^{i\pi\beta X_j/2} . # $$ # # So this is just a rotation of each qubit around the $X$-axis on the Bloch sphere by an amount determined by $\beta$. This operation is _not_ diagonal in the computational basis, and the resulting state will not be an equal superposition over all bitstrings. So after this step there will be constructive and destructive interference, which hopefully leads to enhancement of states corresponding to small values of $C$. This $U(\beta, B)$ is sometimes called a "mixing" operation. Note that, up to an inconsequential global phase, we can also write # # $$ # U(\beta, B) = \prod_{j=1}^n X_j^{\beta} . # $$ # + [markdown] id="RFgdxbSlLh0y" # The total circuit consists of repeating the previous two steps a total of $p\geq 1$ times, where the choice of $p$ is up to you. The parameters $\gamma$ and $\beta$ can be chosen independently at each step. So at the conclusion of the circuit, the state of the qubits is # # $$ # |\gamma,\beta\rangle = U(\beta_p,B)U(\gamma_p,C)\cdots U(\beta_1,B)U(\gamma_1,C)H^{\otimes n}|0^n\rangle. # $$ # # If we choose $\gamma$ and $\beta$ so that the expectation value # # $$ # F(\gamma,\beta) = \langle \gamma,\beta|C(Z)|\gamma,\beta\rangle # $$ # # is minimized, then measuring the state $|\gamma,\beta\rangle$ in the computational basis gives us a good candidate bitstring for the minimum of $C(z)$. That's the whole thing! # + [markdown] id="MK02MlydMUqL" # In summary we have to perform the following tasks in order to implement the QAOA: # # 1. Figure out out to perform the $U(\gamma, C)$ operation in Cirq for our choice of $C$. # 2. Create a quantum circuit alternating $U(\gamma, C)$ and $U(\beta, B)$ operations as many times as desired. # 3. Find the optimal value of the variational parameters in our circuit. # 4. Measure the output of our circuit. # + [markdown] id="ae8fb76ad322" # ## Toy problem: Ground state of the Ising model # + [markdown] id="X15yPl_KQ20Z" # The Ising Model defines the energy function # # $$ # E = -\sum_{\langle i,j \rangle} Z_i Z_j - \sum_i h_i Z_i, # $$ # # where the notation $\langle i,j\rangle$ means a sum over all nearest-neighbor pairs. The picture here is that the qubits live on the vertices of a graph, and the edges of the graph define which qubits are neighbors. We'll just take out graph to be a rectangular lattice with some number of rows and some number of columns. The numbers $h_i$ have the physical interpretation of an external magnetic field. # # We are interested in finding a low-lying state of the Ising Model, by which I mean a state that has a relatively low amount of energy. This is a difficult problem in general. The pairwise interaction terms would tell you that neighboring qubits should be in the same state to lower the energy, while the magnetic field terms tell you that a given qubit wants to point "in the same direction" as its local field, and the strength of that preference depends on the magnitude of the field. These two different kinds of pressure are not always in agreement! # # This type of problem is a perfect candidate for the QAOA, where we use the energy $E$ as our cost function $C$. # + [markdown] id="377aad71d045" # ### Implementing $U(\gamma, C)$ # + [markdown] id="VFwmWPf7D057" # The first thing we need to do is create the operation $U(\gamma, C)$ where $C$ is equal to the Ising model energy function. The first thing to note is that, since all of the terms in the energy commute, we can decompose this operation as # # $$ # U(\gamma, C) = \prod_{\langle i,j\rangle}e^{-i\pi\gamma Z_iZ_j/2} \prod_i e^{-i\pi \gamma h_i Z_i/2}. # $$ # # This requires that we have the two-qubit gate $\exp(-i\pi\gamma ZZ/2)$ at our disposal. In matrix form, this is # # $$ # \begin{align} # \exp(-i \pi\gamma Z\otimes Z/2) = \begin{bmatrix} # e^{-i\pi \gamma/2} & 0 &0&0\\ # 0 & e^{i\pi \gamma/2} &0&0\\ # 0&0& e^{i\pi \gamma/2} &0 \\ # 0&0 & 0 & e^{-i\pi \gamma/2} # \end{bmatrix} # \end{align} # $$ # # Cirq has a built-in gate `cirq.ZZ` which is equivalent to this once you account for a global phase. # + id="3CZsDwTlDy_h" """Example of using the ZZ gate.""" # Get two qubits. a = cirq.NamedQubit("a") b = cirq.NamedQubit("b") # Pick a value for gamma. gamma = 0.3 # Display the circuit. circuit = cirq.Circuit(cirq.ZZ(a,b)**gamma) print(circuit) # + [markdown] id="WvFVcT2de0V6" # We should also check that the unitary is what we expect. # + id="CazWJFM5eGgB" """Check the unitary of the ZZ gate.""" # Unitary we want to implement. expected_unitary = np.array([ [np.exp(-1j * np.pi * gamma / 2),0, 0, 0], [0, np.exp(1j * np.pi * gamma / 2), 0, 0], [0, 0, np.exp(1j * np.pi * gamma / 2), 0], [0, 0, 0, np.exp(-1j * np.pi * gamma / 2)] ]) # Unitary we are implementing. actual_unitary = cirq.unitary(circuit) # Check that they are equal up to global phase. cirq.testing.assert_allclose_up_to_global_phase( actual_unitary, expected_unitary, atol=1e-5 ) # + [markdown] id="cy1ns-RLMzp3" # The magnetic field terms can be handled in a similar way. The single-qubit unitary # # $$ # \exp(-i\pi \gamma hZ/2) = \begin{bmatrix} # e^{-i\pi \gamma h/2} & 0 \\ # 0 & e^{i\pi \gamma h/2} # \end{bmatrix} # $$ # # is equivalent to `cirq.Z**(h*gamma)` up to global phase. # + id="4e25d4a9e5fd" """Example of using the Z gate.""" # Value of the external magenetic field. h = 1.3 # Display the circuit. circuit = cirq.Circuit(cirq.Z(a)**(gamma*h)) print(circuit) # + [markdown] id="1e735a3a1797" # As before, we can check this unitary is what we expect. # + id="7b78132a891e" """Check the unitary of the Z gate.""" # Unitary we want to implement. expected_unitary = np.array([ [np.exp(-1j*np.pi*gamma*h/2), 0], [0, np.exp(1j*np.pi*gamma*h/2)] ]) # Unitary we are implementing. actual_unitary = cirq.unitary(circuit) # Check that they are equal up to global phase. cirq.testing.assert_allclose_up_to_global_phase( expected_unitary, actual_unitary, atol=1e-5 ) # + [markdown] id="51cff3fcb275" # Now that we know how to implement both $Z_i Z_j$ and $Z_i$, we can implement the full $U(\gamma, C)$ operator. # + [markdown] id="1b6e1cf7ec1b" # ### Exercise: More general two-qubit gate # + [markdown] id="esgOuHF_FT_2" # The Ising Model is particularly simple because the nearest-neighbor interaction $Z_i Z_j$ is already given in terms of a product of Pauli matrices. But suppose instead that the cost function was a sum of terms that looked like # # $$ # C(z_i,z_j) = \begin{cases} # c_{00} \text{ if } z_i =1,~z_j=1,\\ # c_{01} \text{ if } z_i =1,~z_j=-1,\\ # c_{10} \text{ if } z_i =-1,~z_j=1,\\ # c_{11} \text{ if } z_i =-1,~z_j=-1 # \end{cases} # $$ # # for some numbers $c_{ab}$. How would you make the analogous two-qubit gate for this case? You can either make a custom gate from scratch, or build a solution from the standard elementary gates. # + id="d33731f88da5" # Your code here! # + [markdown] id="0d66998d0966" # ### Implementing the full circuit # + [markdown] id="uaDb6B_jPgrb" # We will focus on the Ising model on a rectangular lattice with an arbitrary number of rows and columns. Here are some things to think about: # # 1. `cirq.GridQubit`s are natural because our qubits actually do live on a grid. Cirq does not care what kind of qubit you make, though. # 2. It's a good idea to define separate functions to place the C and B layers for the circuit. Really these should be generators that yield the required gates. # 3. You might consider wrapping everything inside a class. We won't do that here, but if you want to play around with different numbers of rows/columns or different numbers of B/C layers it can be convenient. # # First, we'll define the basic parameters of our model and the generators for the different layers. # + id="NPeFsF6yFvGr" """Define problem parameters and get a set of GridQubits.""" # Set the dimensions of the grid. n_cols = 3 n_rows = 3 # Set the value of the external magnetic field at each site. h = 0.5 * np.ones((n_rows, n_cols)) # Arranging the qubits in a list-of-lists like this makes them easy to refer to later. qubits = [[cirq.GridQubit(i, j) for j in range(n_cols)] for i in range(n_rows)] # + id="2b287acf9127" def gamma_layer(gamma_value, h): """Generator for U(gamma, C) layer of QAOA Args: gamma: Float variational parameter for the circuit h: Array of floats of external magnetic field values """ for i in range(n_rows): for j in range(n_cols): if i < n_rows - 1: yield cirq.ZZ(qubits[i][j], qubits[i + 1][j])**gamma_value if j < n_cols - 1: yield cirq.ZZ(qubits[i][j], qubits[i][j + 1])**gamma_value yield cirq.Z(qubits[i][j])**(gamma_value * h[i, j]) # + id="7782b09bd0dc" def beta_layer(beta_value): """Generator for U(beta, B) layer (mixing layer) of QAOA""" for row in qubits: for qubit in row: yield cirq.X(qubit)**beta_value # + [markdown] id="M-3Tg-_dlOn0" # Let's test these functions by constructing the circuit. Try making a circuit with different numbers of layers. How would you automatically make a circuit with a specified number of layers? Make sure the parameters of these layers are distinct `sympy.Symbol`s for later optimization. Print the circuit to see that it's doing what you want it to do. # + id="4ZmjE6pmtUoG" """Create the QAOA circuit.""" # Use sympy.Symbols for the 𝛾 and β parameters. gamma = sympy.Symbol("𝛄") beta = sympy.Symbol("β") # Start in the H|0> state. qaoa = cirq.Circuit(cirq.H.on_each(qubits)) # Your code here! # Display the QAOA circuit. qaoa # + [markdown] id="VEAt5QZvtPu_" # #### Solution # + [markdown] id="7zWHPT1ktlUk" # We'll just illustrate the solution for a single $C$ layer and a single $B$ layer. # + id="lHjIRxL13nXP" """Create the QAOA circuit.""" # Use sympy.Symbols for the 𝛾 and β parameters. gamma = sympy.Symbol("𝛄") beta = sympy.Symbol("β") # Start in the H|0> state. qaoa = cirq.Circuit(cirq.H.on_each(qubits)) # Implement the U(gamma, C) operator. qaoa.append(gamma_layer(gamma, h)) # Implement the U(beta, B) operator. qaoa.append(beta_layer(beta), strategy=cirq.InsertStrategy.NEW_THEN_INLINE) # Display the QAOA circuit. qaoa # + [markdown] id="9fc72a4fb3d3" # ### Computing the energy # + [markdown] id="3HtlMxa6QpVo" # To train the QAOA circuit (that is, find the optimal values of the parameters) we're going to need to be able to compute the expectation value of the Ising model energy. # # If we were using real hardware, the only way to compute the expectation value of the energy would be to estimate it by sampling. Using a simulator we can alternatively compute the wavefunction and then calculate the expectation value from that. Not only does this save us from having to worry about statistical error, it also tends to be faster that simulating the sampling process. # # > We divide the total energy by the number of qubits because we expect the energy to scale with the size of the system. # + id="-9etj1AeK6dG" def energy_from_wavefunction(wf, h): """Computes the energy-per-site of the Ising model directly from the a given wavefunction. Args: wf: Array of size 2**(n_rows * n_cols) specifying the wavefunction. h: Array of shape (n_rows, n_cols) giving the magnetic field values. Returns: energy: Float equal to the expectation value of the energy per site """ n_sites = n_rows * n_cols # Z is an array of shape (n_sites, 2**n_sites). Each row consists of the # 2**n_sites non-zero entries in the operator that is the Pauli-Z matrix on # one of the qubits times the identites on the other qubits. The # (i*n_cols + j)th row corresponds to qubit (i,j). Z = np.array([(-1)**(np.arange(2**n_sites) >> i) for i in range(n_sites - 1, -1, -1)]) # Create the operator corresponding to the interaction energy summed over all # nearest-neighbor pairs of qubits ZZ_filter = np.zeros_like(wf, dtype=float) for i in range(n_rows): for j in range(n_cols): if i < n_rows - 1: ZZ_filter += Z[i * n_cols + j] * Z[(i + 1) * n_cols + j] if j < n_cols - 1: ZZ_filter += Z[i * n_cols + j] * Z[i * n_cols + (j + 1)] energy_operator = -ZZ_filter - h.reshape(n_sites).dot(Z) # Expectation value of the energy divided by the number of sites return np.sum(np.abs(wf)**2 * energy_operator) / n_sites # + [markdown] id="fjFPEQuyvxjR" # We'll also need a helper function that computes the expected value of the energy given some parameters of the QAOA. # + id="XOYLY_u5K7z0" def energy_from_params(gamma_value, beta_value, qaoa, h): """Returns the energy given values of the parameters.""" sim = cirq.Simulator() params = cirq.ParamResolver({"𝛄": gamma_value, "β": beta_value}) wf = sim.simulate(qaoa, param_resolver=params).final_state_vector return energy_from_wavefunction(wf, h) # + [markdown] id="909ff1474e87" # ### Optimizing the parameters # + [markdown] id="r-CjbPwkRI_I" # Now we need to figure out the best values of $\gamma$ and $\beta$ by minimizing the expectation value of the energy. We'll start by doing a brute-force search of the parameter space for illustrative purposes. # + id="hM2Zd_kTI578" """Do a grid search over values of 𝛄 and β.""" # Set the grid size and range of parameters. grid_size = 50 gamma_max = 2 beta_max = 2 # Do the grid search. energies = np.zeros((grid_size, grid_size)) for i in range(grid_size): for j in range(grid_size): energies[i, j] = energy_from_params( i * gamma_max / grid_size, j * beta_max / grid_size, qaoa, h ) # + [markdown] id="b9b6bb9ad449" # We can visualize the energy landscape as follows. # + id="AFP2Ofi0KTfq" """Plot the energy as a function of the parameters 𝛄 and β found in the grid search.""" plt.ylabel(r"$\gamma$") plt.xlabel(r"$\beta$") plt.title("Energy as a function of parameters") plt.imshow(energies, extent=(0, beta_max, gamma_max, 0)) plt.colorbar(); # + [markdown] id="1pWhCblZASbb" # We see that the energy function has a number of interesting properties. First, note that the function is periodic in $\beta$ and $\gamma$ with shorter periods than one might naively expect given the definition of the gates. The details of why that's true will take us away from the main content of this tutorial, but it's a good thing to understand so that the parameter space can be efficiently truncated. # # The other main thing to notice is that there are many local minima and maxima. This makes it challenging to use gradient-based methods for optimization, which we'll explicitly see next. Part of the challenge for algorithms of this type is finding efficient ways to optimize the parameters. # + [markdown] id="0888d57915c6" # #### Gradient descent # + [markdown] id="9MwLJ0LCC0un" # For practice let's try to minimize the expectation value of the energy using gradient descent. We know that there are local minima that we might get stuck in, depending on initialization, but it's still a worthwhile exercise. # # The first step is to define a function which approximates the gradient of the energy. We'll do this by symmetric difference, i.e., $f'(x) \approx (f(x+\epsilon)-f(x-\epsilon))/(2\epsilon)$. You should experiment with different values of $\epsilon$ as well as different formulas for the gradient. # + id="_Ge8sxrab6hV" def gradient_energy(gamma, beta, qaoa, h): """Uses a symmetric difference to calulate the gradient.""" eps = 10**-3 # Try different values of the discretization parameter # Gamma-component of the gradient grad_g = energy_from_params(gamma + eps, beta, qaoa, h) grad_g -= energy_from_params(gamma - eps, beta, qaoa, h) grad_g /= 2*eps # Beta-compoonent of the gradient grad_b = energy_from_params(gamma, beta + eps, qaoa, h) grad_b -= energy_from_params(gamma, beta - eps, qaoa, h) grad_b /= 2*eps return grad_g, grad_b # + [markdown] id="xaRHKo2LDwdm" # Now we'll implement a gradient descent algorithm that minimizes the energy. Note that it will get stuck in local minima depending on the initialization. # + id="BYcCDiYDB8ef" """Run a simple gradient descent optimizer.""" gamma, beta = 0.2, 0.7 # Try different initializations eta = 10**-2 # Try adjusting the learning rate. # Perform gradient descent for a given number of steps. num_steps = 150 for i in range(num_steps + 1): # Compute the gradient. grad_g, grad_b = gradient_energy(gamma, beta, qaoa, h) # Update the parameters. gamma -= eta * grad_g beta -= eta * grad_b # Status updates. if not i % 25: print("Step: {} Energy: {}".format(i, energy_from_params(gamma, beta, qaoa, h))) print("\nLearned gamma: {}\nLearned beta: {}".format(gamma, beta, qaoa, h)) # + [markdown] id="d27161500fd6" # ### Getting the approximate solutions # + [markdown] id="6UpLOKeso7uo" # We've optimized our parameters. How well did we do? # + [markdown] id="_Pv7GZX0EJdr" # For a $3\times 3$ grid we have $9$ qubits and $12$ interacting nearest-neighbor pairs. If all of the qubits are in the $|0\rangle$ state or all are in the $|1\rangle$ state, then the energy-per-qubit is $-12/9 = -1.33$ at zero external magnetic field $h$, and will be close to that if the magnetic field is small. Notice that the QAOA algorithm we analyzed above is __not__ getting close to that ground state. Is this a problem? # # Well, not really. The QAOA algorithm still succeeds if we can find the ground state after a small number of measurements. The QAOA prepares a certain state which is a linear combination of the ground state and many other states. When we measure the qubits, we find the ground-state configuration with some probability. If that probability is relatively large, then after a reasonably small number of measurements we'll locate the ground state. # # Practically speaking, this means we should measure the state prepared by the QAOA several times and record the lowest-energy state we find. The QAOA can be successful by biasing these measurements toward the ground state, even if they do not produce the ground state with $100\%$ probability. # + [markdown] id="9epP9VB8GGgD" # Let's make a copy of our qaoa circuit for measurement purposes and attach a measurement gate to each qubit. # + id="bIoIBl-aSjKE" """Add measurements to the QAOA circuit.""" measurement_circuit = qaoa.copy() measurement_circuit.append(cirq.measure(*[qubit for row in qubits for qubit in row], key="m")) measurement_circuit # + [markdown] id="_OOqzrQwGTJZ" # Now we'll measure the output of the circuit repeatedly for a good set of angles $\gamma$ and $\beta$. Note that these are simply found from inspecting the above heatmap of the energy found via grid search. # + id="KbIu8eyNSK_t" """Sample from the QAOA circuit.""" num_reps = 1000 # Try different numbers of repetitions. gamma_value, beta_value = 0.2, 0.25 # Try different values of the parameters. # Sample from the circuit. simulator = cirq.Simulator() params = cirq.ParamResolver({"𝛄": gamma_value, "β": beta_value}) result = simulator.run(measurement_circuit, param_resolver = params, repetitions=num_reps) # + [markdown] id="EudMLjzNGadh" # Finally, we'll compute the energy for each of our measurement outcomes and look at the statistics. We start with a helper function which calculates the energy given a set of measurement outcomes. # + id="Oa6kAObJTZRi" def compute_energy(meas): """Returns the energy computed from measurements. Args: meas: Measurements/samples. """ Z_vals = 1 - 2 * meas.reshape(n_rows,n_cols) energy = 0 for i in range(n_rows): for j in range(n_cols): if i < n_rows - 1: energy -= Z_vals[i, j] * Z_vals[i + 1, j] if j < n_cols - 1: energy -= Z_vals[i, j] * Z_vals[i, j + 1] energy -= h[i, j] * Z_vals[i, j] return energy / (n_rows * n_cols) # + [markdown] id="kkUl5LYnG7E7" # Now we consider the 10 most common outputs of our measurements and compute the energies of those. # + id="t2SHZj_-TTFS" """Compute the energies of the most common measurement results.""" # Get a histogram of the measurement results. hist = result.histogram(key="m") # Consider the top 10 of them. num = 10 # Get the most common measurement results and their probabilities. configs = [c for c, _ in hist.most_common(num)] probs = [v / result.repetitions for _, v in hist.most_common(num)] # + [markdown] id="37ec9e5b702b" # We can now plot the probabilities of the most common measurement results as well as the energies associated with these results. # + id="6-jbvrc_WOgP" """Plot the most common measurement results and their energies.""" # Plot probabilities of the most common bitstrings. plt.title("Probability of {} Most Common Outputs".format(num)) plt.bar([x for x in range(len(probs))],probs) plt.show() meas = [[int(s) for s in "".join([str(b) for b in bin(k)[2:]]).zfill(n_rows * n_cols)] for k in configs] costs = [compute_energy(np.array(m)) for m in meas] # Plot energies of the most common bitstrings. plt.title("Energy of {} Most Common Outputs".format(num)) plt.bar([x for x in range(len(costs))], costs) plt.show() print("Fraction of outputs displayed: {}".format(np.sum(probs).round(2))) # + [markdown] id="4gj23-BnHEtS" # We see that, for a good choice of $\gamma$ and $\beta$, ground state is the most probable outcome. # # Try changing the values of $\gamma$ and $\beta$ away from the optimal ones. You'll see that this experiment no longer finds the ground state for us. # + [markdown] id="aad607c31941" # ### Exercise: Experiment with different numbers of layers # + [markdown] id="iIUkW0w4FGfa" # See if you can get a closer to the true ground state (i.e., a larger fraction of measurements yielding the minimal energy) by adding more layers to the circuit. # + [markdown] id="a34f4a88d8d9" # ### Exercise: Try a different graph and/or different interaction strengths # + [markdown] id="9JRyH-WDVfMp" # Instead of a square lattice, you can try to formulate the Ising model on any graph you like. This just changes which qubits you link in the $U(\gamma, C)$ layer. Each edge of the graph could also come with a different interaction coefficient, so that instead of $\exp(i\pi \gamma Z_i Z_j/2)$ for that edge you would have $\exp(i\pi \gamma J_{ij} Z_i Z_j / 2)$ for some matrix $J_{ij}$ of coefficients. Note that you have to change both the $U(\gamma, C)$ layer and the definition of the energy function to make this work. # + [markdown] id="6a635c04373d" # ### Exercise: Repeat using sampling # + [markdown] id="--aUfkiaUb3S" # On real hardware we need to use sampling to estimate expectation values. Adjust your code so that sampling is used instead of wavefunction evaluation. How many samples do you need to take to get good results? Try different values. # + [markdown] id="9f75ef0e4437" # ### Exercise: Transverse field Ising model # + [markdown] id="klGsAVyNtxl2" # The Ising Model with transverse field replaces the $\sum h_i Z_i$ term with a $\sum h_i X_i$ term. Can we use the QAOA here as well? What are the differences? This is no longer a classical problem: in general the ground state will now be a superposition of elements of the computational basis. Can you make a circuit that prepares a state close to the ground state?
docs/tutorials/educators/qaoa_ising.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import pandas as pd import numpy as np import os df = pd.read_csv(os.path.join("..", "Resources", "diabetes.csv")) df.head() target = df["Outcome"] target_names = ["negative", "positive"] data = df.drop("Outcome", axis=1) feature_names = data.columns data.head() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42) # KNN classifier from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier() # Create the grid search estimator along with a parameter object containing the values to adjust. # Try adjusting n_neighbors with values of 1 through 19. Adjust leaf_size by using 10, 50, 100, and 500. # Include both uniform and distance options for weights. from sklearn.model_selection import GridSearchCV param_grid = { 'n_neighbors': [1, 3, 5, 7, 9, 11, 13, 15, 17, 19], 'weights': ['uniform', 'distance'], 'leaf_size': [10, 50, 100, 500] } grid_clf = GridSearchCV(model, param_grid, verbose=3) # Fit the model by using the grid search estimator. # This will take the KNN model and try each combination of parameters. grid_clf.fit(X_train, y_train) # List the best parameters for this dataset print(grid_clf.best_params_) # List the best score print(grid_clf.best_score_) # Create the parameter object for the randomized search estimator. # Try adjusting n_neighbors with values of 1 through 19. Adjust leaf_size by using a range from 1 to 500. # Include both uniform and distance options for weights. param_grid = { 'n_neighbors': np.arange(1,20,2), 'weights': ['uniform', 'distance'], 'leaf_size': np.arange(1, 500) } param_grid # Create the randomized search estimator by using the logistic regression model and the parameter grid that you created from sklearn.model_selection import RandomizedSearchCV random_clf = RandomizedSearchCV(model, param_grid, random_state=0, verbose=3) # Fit the model by using the randomized search estimator. # This will take the logistic regression model and a random sample of combinations of parameters. random_clf.fit(X_train, y_train) # List the best parameters for this dataset print(random_clf.best_params_) # List the best score print(random_clf.best_score_) # Make predictions with the hypertuned model predictions = random_clf.predict(X_test) # Calculate the classification report from sklearn.metrics import classification_report print(classification_report(y_test, predictions, target_names=target_names))
01-Lesson-Plans/19-Supervised-Machine-Learning/2/Extra-Activities/02-Stu_Hyperparameters/Solved/Stu_Hyperparameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Lagrange interpolation # # Given $(n+1)$ distinct points $\{q_i\}_{i=0}^n$ in the interval $[0,1]$, # we define the *Lagrange interpolation* operator $\mathcal{L}^n$ the operator # $$ # \mathcal{L}^n : C^0([0,1]) \mapsto \mathcal{P}^n # $$ # which satisfies # $$ # (\mathcal{L}^n f)(q_i) = f(q_i), \qquad i=0,\dots,n. # $$ # # This operator is used to approximate the infinitely dimensional space $C^0([0,1])$ with a # finite dimensional one, $\mathcal{P}^n$, which is the space of polynomials of order n. # # Such a space has dimension $n+1$, and can be constructed using linear combinations of # monomials of order $\leq n$: # # $$ # \mathcal{P}^n = \text{span}\{p_i := x^i\}_{i=0}^{n} # $$ # # Let's start by importing the usual suspects: # %matplotlib inline from numpy import * from pylab import * # In what follows, we will plot several functions in the interval $[0,1]$, # so we start by defining a linear space used for plotting. As a good habit, # we choose a number of points which would generate intervals that are # exactly representable in terms of a binary base. # + ref = 1025 # So that x_i+1 - x_i is exactly representable in base 2 x = linspace(0,1,ref) n = 5 # Polynomials of order 5, with dimension 6 # We compute the basis of Pn row-wise. This is memory efficient if we want to access # all the values of the basis at once. Pn = zeros((ref, n+1)) for i in range(n+1): Pn[:, i] = x**i # The _ = in front of the plot command is only there to ignore the output of the plot # command figure(figsize=[10,10]) _ = plot(x, Pn) # - # If we want to construct the Lagrange interpolation of a given function on $n+1$ equispaced points in # $[0,1]$, then we are actively looking for an element of $\mathcal{P}^n$ that coincides with the function # at these given points. # # Given a basis $\{v_i\}_{i=0}^n$, any element of $\mathcal{P}^n$ can be written as a linear combination of # the basis, i.e., # # $$ # \forall u \in \mathcal{P}^n, \quad \exists! \{u^i\}_{i=0}^n \quad| \quad u(x) = \sum_{i=0}^n u^i v_i(x) # $$ # # in what follows, we'll use [Einstein summation convention](https://en.wikipedia.org/wiki/Einstein_notation), and # call $u$ both the function of $\mathcal{P}^n$, or the $R^{n+1}$ vector representing its coefficients. # # **Remark on the notation (advanced topic. Ignore if you don't understand it)** # # We use upper indices to indicate both "contravariant" coefficients and the *canonical basis of the dual space*, # i.e., the linear functionals in $(\mathcal{P}^n)^*$ such that # # $$ # (\mathcal{P}^n)^* := \text{span}\{v^i\}_{i=0}^{n} \qquad | \qquad v^i(v_j) = \delta^i_j\qquad i,j = 0, \ldots, n # $$ # # With this notation, we have that the coefficients of a polynomial are uniquely determined by # # $$ # u^i = v^i(u) # $$ # # where the $u$ on the right hand side is an element of $\mathcal{P}^n$ (not its coefficients). # # If we want to solve the interpolation problem above, then we need to find the coefficients $u^i$ of the # polynomial $u$ that interpolates $f$ at the points $q_i$: # # $$ # v_j(q_i) u^j = f(q_i) # $$ # # (Remember Einstein summation convention) # # This can be written as a linear problem $A u = F$, with system matrix $A_{ij} := v_j(q_i)$ and right # hand side $F_i = f(q_i)$. # + # The interpolation points q = linspace(0,1,n+1) A = zeros((n+1, n+1)) for j in range(n+1): A[:,j] = q**j # The interpolation function f = lambda x: sin(2*pi*x) # The right hand side F = f(q) # The coefficients u = linalg.solve(A, F) # Make a nice looking plot figure(figsize=[10,5]) _ = plot(x, f(x)) _ = plot(x, Pn.dot(u)) _ = plot(q, f(q), 'ro') # - # # Condition number of interpolation # # What is the condition number of this problem? # # Given a set of $n+1$ (distinct) points $\{q_i\}_{i=0}^{n}$, and a basis $\{p_i\}_{i=0}^{n}$ for the polynomial space $P^n([0,1])$ (that has dimension $n+1$), we would like to estimate the condition number of the interpolation problem, defined as # # > Given a function $g \in C^0([0,1])$, find the polynomial $p \in P^n([0,1])$ such that $p(q_i) = g(q_i)$ for all $i=0,\dots,n$. # # **Parameters of the problem: the points $q_i$** # # **Input of the problem: the function $g$** # # **Output of the problem: the polynomial $p$ in $P^n$, where $n=len(q)-1$** # # If we have a basis $v_i$ for $P^n$, any polynomial in $P^n$ can be written as # # $$p(x) = p^i v_i(x)$$ # # A possible algorithm for the interpolation problem then can obtained as: # # - Define a basis of $P^n$ (basis = set of $n+1$ linearly independent functions, whose linear combination covers the entire space $P^n$). For example: $p_i = x^i$ # - Evaluate the function we want to interpolate in $q_i$. Call the resulting vector $g_i: g(q_i)$ for $i=0,\dots,n$ # - Write $p(x) = p^i v_i$ (sum is implied for repeated indices), and impose that $p(q_i) = g(q_i)$, i.e.: $p^j v_j(q_i) = g(q_i)$ or: # - Construct the **interpolation matrix**: $A_{ij} = v_j(q_i)$ # - Solve the linear system $A_{ij} p^j = g(q_i)$ # # **NOTATION:** We indicate the coefficients of the inverse of the matrix with coefficients $A_{ij}$ using the following notation: $A^{ij} = (A^{-1})_{ij}$, that is, we define # # $$ A^{ij} A_{jk} = \delta^i_k$$ # # Where $\delta^i_k$ is one if $i = j$ and zero otherwise (the identity, or Kronecker delta). # # ## How do we estimate the absolute condition number of the problem? # # Given a perturbation function $\delta g$, the interpolation of $g+\delta g$ results in a perturbed polynomial $p+\delta p$, where, by linearity, $\delta p$ interpolates $\delta g$, i.e., $\delta p^i = A^{ij} \delta g(q_j)$. # # We would like to estimate # # $$ K_{abs} := \sup_{\delta g \in C^0([0,1])} \frac{\|\delta p\|_{\infty}}{\|\delta g\|_{\infty}} $$ # # where $\| v \|_{\infty} := \max_{x\in[0,1]} |v(x)|$ is the $L^\infty$ norm of the function $v$. # # We start by estimating the numerator: # # $$ \|\delta p\|_\infty = \|\delta p^i v_i \|_\infty \leq \max_i |\delta p^i| \quad \left\|\sum_i |v_i|\right\|_\infty # \leq \max_i |A^{ij}\delta g^j| \quad \left\|\sum_i |v_i|\right\|_\infty \leq C \|A\| \quad \max_i |\delta g^j| \quad \left\|\sum_i |v_i|\right\|_\infty$$ # # Now we observe that the second term is always bounded by $\max_{x\in[0,1]}|g(x)|$, i.e., # # $$ \sup_{\delta g \in C^0([0,1])} \frac{\max_i |\delta g^j|}{\|\delta g\|_\infty} \leq 1$$ # # and therefore: # # $$ K_{abs} := \sup_{\delta g \in C^0([0,1])} \frac{\|\delta p\|_{\infty}}{\|\delta g\|_{\infty}} \leq C \|A\| \quad \left\|\sum_i |v_i|\right\|_\infty$$. # # The condition number depends on three parts: # - the constant $C$, depending on the norm type we chose for $A$ and on $n$ # - the norm of the matrix $A$ # - the quantity $ \left\|\sum_i |v_i|\right\|_\infty$ # # Let's start by evaluating the norm of $A$ for the monomial basis, and the norm of the last term when we increase $n$: for i in range(3,15): qtmp = linspace(0,1,i) Atmp = zeros((i,i)) Ltmp = zeros((ref,i)) for j in range(i): Atmp[:,j] = qtmp**j Ltmp[:,j] = x**j lebesgue = max(sum(abs(Ltmp), axis=1)) print("Matrix condition number: ( n = ", i, "):", linalg.cond(Atmp), ", Basis condition number: ", lebesgue) # As we see, the condition number of this matrix explodes as n increases. Since the interpolation problem # reduces to solving the matrix constructed as $A_{ij} := p_j(x_i)$, one way to ensure a good condition number (of the matrix!) # is to choose the basis such that $A$ is the identity matrix, i.e., to choose the basis such that $v_j(x_i) = \delta_{ij}$. Such a basis is called the **Lagrange basis**, and it is constructed explicitly as: # # # $$ # l^n_i(x) := \prod_{j=0, j\neq i}^n \frac{(x-x_j)}{(x_i-x_j)} \qquad # i = 0, \dots, n # $$ # # With this basis, no matrix inversion is required, and we can simply write the Lagrange interpolation as # $$ # \mathcal{L}^n f := \sum_{i=0}^n f(x_i) l^n_i(x), # $$ # # > Given a set of $(n+1)$ distinct points # > $\{x_i\}_{i=0}^n$ , there exist a unique Lagrange interpolation of order # > $n$. # + Ln = zeros((ref, n+1)) for i in range(n+1): Ln[:,i] = product([ (x-q[j])/(q[i]-q[j]) for j in range(n+1) if j != i], axis=0) _ = plot(x,Ln) # - # Notice that we can also write `Ln` as the inverse of the matrix $A$, applied to `Pn.T`. # # Start by considering the Lagrange interpolation of the Lagrange basis $l_k$, i.e., # # $$p^i = A^{ij} l_k(q_j) = A^{ij} \delta_{jk} = A^{ik}$$ # # To plot these coefficients (i.e., to evaluate the polynomial with coefficients $A^{ik}$ at fixed $k$ in the points $x_i$), we simply multipy $A^{ik}$ with $v_k(x_m)$, i.e. `Pn[m,k]`, or `Ln[j,k] = P[j,i] * Ainv[i,k] # # In other words, the $i$-th row of the matrix $A^{-1}$, represent the monomial coefficients of the lagrange basis $l_k$: Ainv = linalg.inv(A) Ln2 = Pn.dot(Ainv) _ = plot(x, Ln2) # Now let's see how this influences the second part of the condition number, when we increase $n$: for i in range(3,15): qtmp = linspace(0,1,i) Atmp = zeros((i,i)) Ltmp = zeros((ref,i)) for j in range(i): Atmp[:,j] = qtmp**j Ltmp[:,j] = x**j Atmpinv = linalg.inv(Atmp) Ltmp = Ltmp.dot(Atmpinv) lebesgue = max(sum(abs(Ltmp), axis=1)) print("Basis condition number: ", lebesgue) # As you see, the global condition number is now much smaller. The identity has condition number 1, and the basis reaches a condition number of about 160. **DO NOT** use `A` to compute the Lagrange basis!!! In this way, you are polluting your basis with a bad condition number from the matrix. Construct explicitly the basis using the explicit construction. # # That construction suffers from the bad condition number of the substraction when the number of points is very large, but otherwise shows the same behaviour you see up here. # + y = Ln2.dot(f(q)) figure(figsize=[10,5]) _ = plot(x, f(x)) _ = plot(x, y) _ = plot(q, f(q), 'ro') # - # Let's try different functions: # + # A little "macro". This assumes Ln, q, and x are all defined # Notice: technically this is a python function. However, it # expects symbols and variables to be defined in the global scope # and this is **not** good programming style. It may be very # useful and fast at times, but try not to overdo it. # # I'd call it a function if internally it did not use any globally # defined variable. def my_plot(f): figure(figsize=[10,5]) y = Ln.dot(f(q)) _ = plot(x, f(x)) _ = plot(x, y) _ = plot(q, f(q), 'ro') show() my_plot(sin) my_plot(cos) # When we need something more complicated than simply cos, or sin, # we can use "on the fly" function definitions, or lambda functions: my_plot(lambda x: cos(2*pi*x)) # Lambda functions can be assigned too, for convenience and later # reuse... runge = lambda x: 1.0/(1+50*(.5-x)**2) my_plot(runge) # Alternatively, you can define the function in the classical pythonic # way: def shifted_abs(x): return abs(x-.5) my_plot(shifted_abs) # - lebesgue = sum(abs(Ln), axis=1) plot(x, lebesgue) lebesgue.shape
slides/Lecture 05 - LH - LAB - Interpolation with python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # load data import pandas as pd data = pd.read_csv(r'train.csv', index_col='Id') data_test = pd.read_csv(r'test.csv', index_col='Id') data.head() # - # ## Missing Values - Train Data # missing data total_missing = data.isnull().sum().sort_values(ascending=False) percent_missing = (data.isnull().sum() / data.isnull().count())*100 missing_data = pd.concat([total_missing, percent_missing], axis=1, keys=['Total', 'Percent']) missing_data.head(20) # ### Droping data columns with 15% missing value data = data.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'LotFrontage'], axis=1) data.shape # #### We going to write a function To find missing values in each columns # missing values - function def missing_values(data, n): total_missing = data.isnull().sum().sort_values(ascending=False) percent_missing = (data.isnull().sum() / data.isnull().count())*100 missing_data = pd.concat([total_missing, percent_missing], axis=1, keys=['Total', 'Percent']) return missing_data.head(n) # missing data missing_values(data, 14) # ### Column - GarageType # NA - No Garage # # so we can replace NA with No Garage data.GarageType.unique() data.GarageType = data.GarageType.fillna('No Garage') missing_values(data, 13) # ### Column - GarageYrBlt data.GarageYrBlt.unique() # finding index of missing values idx = data[data['GarageYrBlt'].isnull()].index.tolist() data.iloc[idx].head(6) # #### Filling missing values in GarageYrBlt column data.GarageYrBlt = data.GarageYrBlt.fillna(method='bfill', axis=0) missing_values(data, 12) # ### Column - GarageFinish # NA - No Garage # # so we can replace NA with No Garage data.GarageFinish.unique() data.GarageFinish = data.GarageFinish.fillna('No Garage') missing_values(data, 11) # ### Column - GarageQual # NA - No Garage # # so we can replace NA with No Garage data.GarageQual.unique() data.GarageQual = data.GarageQual.fillna('No Garage') missing_values(data, 10) # ### Column - GarageCond # NA - No Garage # # so we can replace NA with No Garage data.GarageCond = data.GarageCond.fillna('No Garage') missing_values(data, 9) # ### Column - BsmtFinType2 # NA - No Basement # # so we can replace NA with No Basement data.BsmtFinType2.unique() data.BsmtFinType2 = data.BsmtFinType2.fillna('No Basement') missing_values(data, 8) # ### Column - BsmtExposure # NA - No Basement # # so we can replace NA with No Basement data.BsmtExposure.unique() data.BsmtExposure = data.BsmtExposure.fillna('No Basement') missing_values(data, 7) # ### Column - BsmtFinType1 # NA - No Basement # # so we can replace NA with No Basement data.BsmtFinType1.unique() data.BsmtFinType1 = data.BsmtFinType1.fillna('No Basement') missing_values(data, 6) # ### Column - BsmtQual # NA - No Basement # # so we can replace NA with No Basement data.BsmtQual.unique() data.BsmtQual = data.BsmtQual.fillna('No Basement') missing_values(data, 5) # ### Column - BsmtCond # NA - No Basement # # So we can replace NA with No Basement data.BsmtCond.unique() data.BsmtCond = data.BsmtCond.fillna('No Basement') missing_values(data, 4) # ### Column - MasVnrType # Cinder Block **NOT** found in the data set # # So we assuming that missing values are Cinder Blockes data.MasVnrType.unique() data.MasVnrType = data.MasVnrType.fillna('CBlock') missing_values(data, 3) # ### Column - MasVnrArea # missing values filling by using **back-fill** method data.MasVnrArea.unique() data.MasVnrArea = data.MasVnrArea.fillna(method='bfill', axis=0) missing_values(data, 2) # ### Column - Electrical # Missing value filling by using **back-fill** method data.Electrical.unique() data.Electrical = data.Electrical.fillna(method='bfill', axis=0) missing_values(data, 2) # ## Missing Values - Test Data missing_values(data_test, 34) # #### Droping columns with >15% missing values data_test = data_test.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu', 'LotFrontage'], axis=1) data.shape # missing data missing_values(data_test, 28) # ### Column - GarageQual # NA - No Garage # # So we can replace NA with No Garage data_test.GarageQual.unique() data_test.GarageQual = data_test.GarageQual.fillna('No Garage') missing_values(data_test, 27) # ### Column - GarageCond # NA - No Garage # # So we can replace NA with No Garage data_test.GarageCond.unique() data_test.GarageCond = data_test.GarageCond.fillna('No Garage') missing_values(data_test, 26) # ### Column - GarageFinish # NA - No Garage # # So we can replace NA with No Garage data_test.GarageFinish.unique() data_test.GarageFinish = data_test.GarageFinish.fillna('No Garage') missing_values(data_test, 25) # ### Column - GarageYrBlt # Replace missing values by **back-fill** method data_test.GarageYrBlt.unique() data_test.GarageYrBlt = data_test.GarageYrBlt.fillna(method='bfill', axis=0) missing_values(data_test, 24) # ### Column - GarageType # NA - No Garage # # So we can replace NA with No Garage data_test.GarageType.unique() data_test.GarageType = data_test.GarageType.fillna('No Garage') missing_values(data_test, 23) # ### Column - BsmtCond # NA - No Basement # # So we can replace NA with No Basement data_test.BsmtCond.unique() data_test.BsmtCond = data_test.BsmtCond.fillna('No Basement') missing_values(data_test, 22) # ### Column - BsmtQual # NA - No Basement # # So we can replace NA with No Basement data_test.BsmtQual.unique() data_test.BsmtQual = data_test.BsmtQual.fillna('No Basement') missing_values(data_test, 21) # ### Column - BsmtExposure # NA - No Basement # # So we can replace NA with No Basement data_test.BsmtExposure.unique() data_test.BsmtExposure = data_test.BsmtExposure.fillna('No Basement') missing_values(data_test, 20) # ### Column - BsmtFinType2 # NA - No Basement # # So we can replace NA with No Basement data_test.BsmtFinType2.unique() data_test.BsmtFinType2 = data_test.BsmtFinType2.fillna('No Basement') missing_values(data_test, 19) # ### Column - BsmtFinType1 # NA - No Basement # # So we can replace NA with No Basement data_test.BsmtFinType1.unique() data_test.BsmtFinType1 = data_test.BsmtFinType1.fillna('No Basement') missing_values(data_test, 18) # ### Column - MasVnrType # Cinder Block NOT found in the data set # # So we assuming that missing values are Cinder Blockes data_test.MasVnrType.unique() data_test.MasVnrType = data_test.MasVnrType.fillna('CBlock') missing_values(data_test, 17) # ### Column - MasVnrArea # Filling missing values with **back-fill** method data_test.MasVnrArea.unique() data_test.MasVnrArea = data_test.MasVnrArea.fillna(method='bfill', axis=0) missing_values(data_test, 16) # ### Column - MSZoning # Filling missing values with **back-fill** method data_test.MSZoning.unique() data_test.MSZoning = data_test.MSZoning.fillna(method='bfill', axis=0) missing_values(data_test, 15) # ### Column - Functional # Salvage only missing from list, so lets assuming that missing values is Salvage only data_test.Functional.unique() data_test.Functional = data_test.Functional.fillna('Sal') missing_values(data_test, 14) # ### Column - BsmtFullBath # Missing values filling by using **back-fill** method data_test.BsmtFullBath.unique() data_test.BsmtFullBath = data_test.BsmtFullBath.fillna(method='bfill', axis=0) missing_values(data_test, 13) # ### Column - BsmtHalfBath # Missing values filling by using **back-fill** method data_test.BsmtHalfBath.unique() data_test.BsmtHalfBath = data_test.BsmtHalfBath.fillna(method='bfill', axis=0) missing_values(data_test, 12) # ### Column - Utilities # Missing values filling by using **back-fill** method data_test.Utilities.unique() data_test.Utilities = data_test.Utilities.fillna(method='bfill', axis=0) missing_values(data_test, 11) # ### Column - Exterior2nd # Other is **NOT** in the list, so we assuming that Other is the missing value data_test.Exterior2nd.unique() data.Exterior2nd.unique() data_test.Exterior2nd = data_test.Exterior2nd.fillna(method='bfill', axis=0) missing_values(data_test, 10) # ### Column - Exterior1st # Missing value filling by using **back-fill** method data_test.Exterior1st.unique() data_test.Exterior1st = data_test.Exterior1st.fillna(method='bfill', axis=0) missing_values(data_test, 9) # ### Column - SaleType # Missing value filling by using **back-fill** method data_test.SaleType.unique() data_test.SaleType = data_test.SaleType.fillna(method='bfill', axis=0) missing_values(data_test, 8) # ### Column - TotalBsmtSF # Missing values filling by using **back-fill** method data_test.TotalBsmtSF.unique() data_test.TotalBsmtSF = data_test.TotalBsmtSF.fillna(method='bfill', axis=0) missing_values(data_test, 7) # ### Column - KitchenQual # Missing value filling by using **back-fill** method data_test.KitchenQual.unique() data_test.KitchenQual = data_test.KitchenQual.fillna(method='bfill', axis=0) missing_values(data_test, 6) # ### Column - BsmtUnfSF # Missing values filling by using **back-fill** method data_test.BsmtUnfSF.unique() data_test.BsmtUnfSF = data_test.BsmtUnfSF.fillna(method='bfill', axis=0) missing_values(data_test, 5) # ### Column - GarageCars # Missing values filling by using **back-fill** method data_test.GarageCars.unique() data_test.GarageCars = data_test.GarageCars.fillna(method='bfill', axis=0) missing_values(data_test, 4) # ### Column - BsmtFinSF2 # Missing values filling by using **back-fill** method data_test.BsmtFinSF2.unique() data_test.BsmtFinSF2 = data_test.BsmtFinSF2.fillna(method='bfill', axis=0) missing_values(data_test, 3) # ### Column - GarageArea # Missing values filling by using **back-fill** method data_test.GarageArea.unique() data_test.GarageArea = data_test.GarageArea.fillna(method='bfill', axis=0) missing_values(data_test, 2) # ### Column - BsmtFinSF1 # Missing values filling by using **back-fill** method data_test.BsmtFinSF1.unique() data_test.BsmtFinSF1 = data_test.BsmtFinSF1.fillna(method='bfill', axis=0) missing_values(data_test, 2) # ### Find the Score of DataSet from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score import numpy as np # function def score_dataset(x, y, model=XGBRegressor()): # label encoding for categoricals for colname in x.select_dtypes(['object', 'category']): x[colname], _ = x[colname].factorize() # RMSLE (Root Mean Squared Log Error) score = cross_val_score(model, x, y, cv=5, scoring='neg_mean_squared_log_error') score = -1 * score.mean() score = np.sqrt(score) return score # **Find Score of DataSet after Data Cleaning** # assigning variables x = data.copy() y = x.pop('SalePrice') # score score_dataset(x, y) # # Deep Learning - Model # + import pandas as pd from IPython.display import display data_encoded = data # label encoding for categoricals for colname in data_encoded.select_dtypes(['object', 'category']): data_encoded[colname], _ = data_encoded[colname].factorize() # Split features and Traget x_train = data_encoded.copy() y_train = x_train.pop('SalePrice') # Scale to [0, 1] max_ = x_train.max(axis=0) min_ = x_train.min(axis=0) x_train_scaled = (x_train - min_) / (max_ - min_) x_train = x_train_scaled x_train.head() # - x_train.shape # ### Sequential Layers # + from tensorflow import keras from tensorflow.keras import layers model = keras.Sequential([ layers.Dense(512, activation='relu', input_shape=[73]), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dense(1024, activation='relu'), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dense(2048, activation='relu'), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dense(4096, activation='relu'), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dense(8192, activation='relu'), layers.Dropout(0.3), layers.BatchNormalization(), layers.Dense(1) ]) # - from tensorflow.keras import callbacks early_stopping = callbacks.EarlyStopping( min_delta=0.001, patience=20, restore_best_weights=True ) model.compile( optimizer='adam', loss='mae' ) history = model.fit( x_train, y_train, validation_split=0.2, batch_size=256, epochs=100, callbacks=[early_stopping], verbose=0 ) history_frame = pd.DataFrame(history.history) history_frame.loc[:, ['loss', 'val_loss']].plot(); print("Minimum validation loss: {}".format(history_frame['val_loss'].min())) # ### Save Model in predictHousePrices_deepLearning_model.h5 file import os.path if os.path.isfile( '/home/naseem/My Project/predictHousePrices-DeepLearning-python/predictHousePrices_deepLearning_model.h5' ) is False: model.save( '/home/naseem/My Project/predictHousePrices-DeepLearning-python/predictHousePrices_deeplearning_model.h5' ) # ### Load saved Deep-Learning Model import tensorflow as tf # load saved model model = tf.keras.models.load_model( '/home/naseem/My Project/predictHousePrices-DeepLearning-python/predictHousePrices_deeplearning_model.h5' ) model.summary()
jupyter_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linear Models # <div class="alert alert-success"> # Linear (regression) modelling is a method of predicting the value of an output value as a linear combination of weighted input values. # </div> # ## Linear Models - Overview # # In the simplest case, we are trying to fit a line. In this case, our model is of the form: # # $$ y = ax + b $$ # # In this equation above, we are trying to predict some data variable $y$, from some other data variable $x$, where $a$ and $b$ are parameters we need to figure out (learn), by fitting the model, and reflect the slope, and y-intercept, of the model (line) respectively. # # We need some procedure to go about finding $a$ and $b$. We will use OLS to do so - the values of $a$ and $b$ we want are those that fulfill the OLS solution - meaning the values that lead to the smallest distance between the predictions of the model, and our data. # # Note that to train this kind of model, you need data in which you know both $x$ and $y$ already, to train your model. This kind of model only applies to predicting values you have at least some information about. # # Having training data makes it a 'supervised' model, meaning that learning the prediction model is 'supervised' or guided by knowing some 'answers' to our prediction problem already, and the goal is to use this data to learn a model that can generalize to new data. # # This approach can also be generalized, including, for example, more features used to predict our output of interest. # # Therefore, we will rewrite our model, in the general form, as: # # $$ y = a_0 + a_1 x_1 + a_2 x_2 + ... + a_n x_n + \epsilon $$ # # In the equation above $a_0$ is the intercept (the same as $b$ from above), and $a_1$ to $a_n$ are $n$ parameters that we are trying to learn, as weights for data features $x_1$ to $x_n$. Our output variable (what we are trying to predict) is still $y$, and we've introduced $\epsilon$, which is the error, which basically captures unexplained variance. # # ### Linear Models Practice # # In the following, we will generate some data, with two features, that we'll call `d1` and `d2`. # # We will generate this data such that `d1` and `d2` are correlated. This means that they share some information, and so we can use this to property to try and predict values of `d2` from `d1`, using a linear model to do so. # # This model, using the second notation from above, will be of the form: # # $$ d2 = a_0 + a_1 * d1 $$ # # Where `a_0` and `a_1` are parameters of the model that we are trying to learn, reflecting the intercept and slope, respectively. # + # Imports # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt # Statmodels & patsy import patsy import statsmodels.api as sm # - # First, lets generate some example data to use. # + # Set random seed, for consistency simulating data np.random.seed(21) # Settings corr = 0.75 covs = [[1, corr], [corr, 1]] means = [0, 0] # Generate the data data = np.random.multivariate_normal(means, covs, 1000) # - # Check out the data we generated plt.scatter(data[:, 0], data[:, 1], alpha=0.5); # Put the data into a DataFrame df = pd.DataFrame(data, columns=['d1', 'd2']) # Have a quick look at the simualed data df.head() # Check the correlation between d1 & d2 (that it matches what was synthesized) df.corr() # ## Linear Models with Statsmodels & Patsy # <div class="alert alert-success"> # Statsmodels is a module for statistical analyses in Python. Patsy is a useful package for describing and applying statistical models. # </div> # # <div class="alert alert-info"> # The official documentation for # <a href="http://www.statsmodels.org/stable/index.html" class="alert-link">statsmodels</a> # and # <a href="https://patsy.readthedocs.io/en/latest/" class="alert-link">patsy</a>. # </div> # Patsy gives us an easy way to construct 'design matrices'. # # For our purpose, 'design matrices' are just organized matrices of our predictor and output variables. # # 'Predictors' refers to the features we want to predict from, and 'outputs' refers to the variables we want to predict. # Use patsy to organize our data into predictor and outputs # The string `d2 ~ d1` indicates to predict d2 as a function of d1 outcome, predictors = patsy.dmatrices('d2 ~ d1', df) # If you check the type of 'outcome' and 'predictors', you will find they are custom patsy objects, of type 'DesignMatrix'. # # If you print them out, you will see that they resemble pandas Series or DataFrames. # # You can think of them as customized dataframe-like objects for the specific purpose of being organized into matrices to be used for modeling. # # Next, we can use `statsmodels` to initialize an OLS model object. # Initialize an OLS model object # Note: This initializes the model, and provides the data # but does not actually compute the model yet model = sm.OLS(outcome, predictors) # Note that `statsmodels`, just like `sklearn` that we will encounter a bit later, uses an object-oriented approach. # # In this approach you initialize objects that store the data and methods together. This allows for an organized approach to storing and check data and parameters, and applying computations to them, such as fitting models. Outputs parameters of the model are also stored in the object, which can then also be used to make predictions. # Check the type of the model object we just created. # You can also explore, with tab-complete, what is available from this object type(model) # Finally, fit the model results = model.fit() # Check out the results print(results.summary()) # ### Interpreting Outputs # # `statsmodels` gives us a lot of information! # # The top section is largely meta-data: it includes things like the model type, and time and date of us running it. # # It also includes the `R-squared`, which is an overall summary of the amount of variance the model is able to capture. R-squared values are bound between 0-1. An r-squared of ~0.5, that we have here, is quite a high value, suggesting a good model fit. # # The middle section is the actual model results. # # Each row reflects a parameter, and gives us it's value (`coef`), the error (`std err`), the results of a statistical test regarding whether this parameter is a significant predictor of the output variable (`t`, which associated p-value as `P>|t|`), and the confidence interval of the parameters value (`[0.025` - `0.975]`). # # The last section includes some other tests that are run on the data. These can be used to check some properties of the input data, and to check assumptions of the model are met. # ### Checking our Model # # In terms of the model itself, the most useful components are in the second row, in which the summary gives the parameter values, and p-values of our predictors, which in this case are 'Intercept', and 'd2'. # # From the results above, we can grab the values of the parameters, and obtain the following model: # # $$ d2 = -0.0116 + 0.7396 * d1 $$ # # However, we should also keep in mind whether each parameter is significant. To check use, let's look at the statistical test that is reported that checks whether the parameter value is significant (as in, significantly different from zero). Using an alpha value of 0.05, in this case, the 'd2' parameter value is significant, but the 'Intercept' value is not. Since the parameter value for 'Intercept' is not significantly different from zero, we can decide not to include it in our final model. # # We therefore finish with the model: # $$ d2 = 0.7396 * d1 $$ # # With this model, it is promising that are value of $a_1$, of 0.7396, is very close to the correlation value of the data points, which we set at 0.75! This suggest our model is working well! # ### Visualizing our Model # # Next, we can visualize our model, with our data. # + # Plot the orginal data (as before) plt.scatter(df['d1'], df['d2'], alpha=0.3, label='Data'); # Generate and plot the model fit line xs = np.arange(df['d1'].min(), df['d1'].max()) ys = 0.7185 * xs plt.plot(xs, ys, '--k', linewidth=4, label='Model') plt.xlabel('D1') plt.ylabel('D2') plt.legend(); # - # ### Using multiple predictors # # The model above used only one predictor, fitting a straight line to the data. This is similar to previous examples we've seen of and tried for fitting lines. # # We can also fit more than 1 predictor variable, and that is where the power and benefits of using `patsy` and `statsmodels` really comes through. We can use these tools to specify any models we want, including multiple predictors with different kinds of interactions between predictors, and these functions take care of fitting these models. # # To briefly explore this, let's now add a new variable to our dataframe, and fit an OLS model with two predictors. # # In this case, we will fit a model of the form: # # $$ d1 = a_0 + a_1 * d2 + a_1 * d3 $$ # Add a new column of data to df df['d3'] = pd.Series(np.random.randn(1000), index=df.index) df.head() # Predict d1 from d2 and d3 outcome, predictors = patsy.dmatrices('d1 ~ d2 + d3', df) model = sm.OLS(outcome, predictors) results = model.fit() # Check the model fit summary print(results.summary()) # Note that in this case, we simulated the `d3` column with no relation to the `d1` values we were trying to predict, so the `d3` predictor isn't significant, and overall this bigger model doesn't explain anymore variance of the data (the r-squared is no better). # # # ### Conclusion # # `statsmodels` offers a powerful and general approach to fitting statistical models to data, investigating properties of these model fits, and comparing between models. You can further investigate how to include other features, such as interactions between input variables, and so on. # ## Linear Regression with sklearn # # As we've already seen with the `OLS` tutorial, there are multiple ways to apply the same underlying computations. # # Another popular module that can be used for fitting models to data is `sklearn`. # # Here, for a quick demonstration and comparison, we will fit the `sklearn` implementation of Linear Regression models to our same data. The underlying computations are approximately the same, but as we can see, the API for using `sklearn` and the exact results are different. # <div class="alert alert-info"> # Linear regression in # <a href="http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html" class="alert-link">sklearn</a>. # </div> # Linear Models with sklearn from sklearn import linear_model # Convert data into arrays for easier use with sklearn d1 = np.reshape(df.d1.values, [len(df.d1), 1]) d2 = np.reshape(df.d2.values, [len(df.d2), 1]) d3 = np.reshape(df.d3.values, [len(df.d3), 1]) # Initialize linear regression model reg = linear_model.LinearRegression() # Fit the linear regression model reg.fit(d2, d1) # d1 = a0 + a1*d2 # Check the results of this # If you compare these to what we got with statsmodels above, they are indeed the same print('The intercept value is: \t{:1.4f}'.format(reg.intercept_[0])) print('The coefficient value is: \t{:1.4f}'.format(reg.coef_[0][0])) # ### Using multiple predictors (in sklearn) # Initialize and fit linear model # d1 = a0 + a1*d2 + a2*d3 reg = linear_model.LinearRegression() reg.fit(np.hstack([d2, d3]), d1) # Check the results of this # If you compare these to what we got with statsmodels above, they are indeed the same print('Intercept: \t {:+1.4f}'.format(reg.intercept_[0])) print('d2 value:\t {:+1.4f}'.format(reg.coef_[0][0])) print('d2 value:\t {:+1.4f}'.format(reg.coef_[0][1])) # ### Conclusion # # The pattern of results with `sklearn` is about the same as before, though we can see there is some small differences in estimation. In general, if you have data organized into Dataframes, then `statsmodels` does offer a more direct way to apply statistical models, but `sklearn` does also offer a lot of useful functionality for model fitting & analysis.
14-LinearModels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.lib.deepreload import reload # %load_ext autoreload # %autoreload 2 # + import re import operator from pathlib import Path import numpy as np import matplotlib.pyplot as plt import pandas as pd import pydicom from pymedphys.dicom import depth_dose, profile # + algorithm = 'MC' energy = '06MV' field_size = '02x02' dose_to = 'Dm' filename = algorithm + energy + field_size + dose_to filename # + if algorithm == 'MC': algorithm_full = 'Monte Carlo' if algorithm == 'CC': algorithm_full = 'Collapsed Cone' algorithm_full # + if dose_to == 'Dm': dose_to_what = 'Dose to Medium' if dose_to == 'Dw': dose_to_what = 'Dose to Water' dose_to_what # + ROOT_DIR = Path(r"S:\Physics\DoseCHECK\Commissioning\Inhomogeneities Results\Heterogeneous 1\DICOM Comparisons") MONACO_DICOM_DIR = (ROOT_DIR.joinpath(r"DICOM Exports\Monaco")).joinpath(filename) DOSECHECK_DICOM_DIR = (ROOT_DIR.joinpath(r"DICOM Exports\DoseCHECK")).joinpath(filename) RESULTS = ROOT_DIR.joinpath(r"Results") # - monaco_dose = pydicom.read_file(str(MONACO_DICOM_DIR.joinpath('dose.dcm')), force=True) dosecheck_dose = pydicom.read_file(str(DOSECHECK_DICOM_DIR.joinpath('dose.dcm')), force=True) dicom_plan = pydicom.read_file(str(MONACO_DICOM_DIR.joinpath('plan.dcm')), force=True) plane_dimensions = np.arange(-300,300,1) depth_dimensions = np.arange(0,600,1) # + monaco_inplane_profile_fat = profile(plane_dimensions,25,'inplane',monaco_dose,dicom_plan) / 10 monaco_inplane_profile_lung = profile(plane_dimensions,75,'inplane',monaco_dose,dicom_plan) / 10 monaco_inplane_profile_bone = profile(plane_dimensions,125,'inplane',monaco_dose,dicom_plan) / 10 monaco_inplane_profile_air = profile(plane_dimensions,175,'inplane',monaco_dose,dicom_plan) / 10 monaco_inplane_profile_water = profile(plane_dimensions,225,'inplane',monaco_dose,dicom_plan) / 10 dosecheck_inplane_profile_fat = profile(plane_dimensions,25,'inplane',dosecheck_dose,dicom_plan) / 10 dosecheck_inplane_profile_lung = profile(plane_dimensions,75,'inplane',dosecheck_dose,dicom_plan) / 10 dosecheck_inplane_profile_bone = profile(plane_dimensions,125,'inplane',dosecheck_dose,dicom_plan) / 10 dosecheck_inplane_profile_air = profile(plane_dimensions,175,'inplane',dosecheck_dose,dicom_plan) / 10 dosecheck_inplane_profile_water = profile(plane_dimensions,225,'inplane',dosecheck_dose,dicom_plan) / 10 monaco_crossplane_profile_fat = profile(plane_dimensions,25,'crossplane',monaco_dose,dicom_plan) / 10 monaco_crossplane_profile_lung = profile(plane_dimensions,75,'crossplane',monaco_dose,dicom_plan) / 10 monaco_crossplane_profile_bone = profile(plane_dimensions,125,'crossplane',monaco_dose,dicom_plan) / 10 monaco_crossplane_profile_air = profile(plane_dimensions,175,'crossplane',monaco_dose,dicom_plan) / 10 monaco_crossplane_profile_water = profile(plane_dimensions,225,'crossplane',monaco_dose,dicom_plan) / 10 dosecheck_crossplane_profile_fat = profile(plane_dimensions,25,'crossplane',dosecheck_dose,dicom_plan) / 10 dosecheck_crossplane_profile_lung = profile(plane_dimensions,75,'crossplane',dosecheck_dose,dicom_plan) / 10 dosecheck_crossplane_profile_bone = profile(plane_dimensions,125,'crossplane',dosecheck_dose,dicom_plan) / 10 dosecheck_crossplane_profile_air = profile(plane_dimensions,175,'crossplane',dosecheck_dose,dicom_plan) / 10 dosecheck_crossplane_profile_water = profile(plane_dimensions,225,'crossplane',dosecheck_dose,dicom_plan) / 10 # - monaco_DD = depth_dose(depth_dimensions,monaco_dose,dicom_plan) / 10 dosecheck_DD = depth_dose(depth_dimensions,dosecheck_dose,dicom_plan) / 10 # + DD_diff = 100 * (monaco_DD - dosecheck_DD) / monaco_DD diff_inplane_fat = 100 * (monaco_inplane_profile_fat - dosecheck_inplane_profile_fat) / monaco_inplane_profile_fat diff_inplane_lung = 100 * (monaco_inplane_profile_lung - dosecheck_inplane_profile_lung) / monaco_inplane_profile_lung diff_inplane_bone = 100 * (monaco_inplane_profile_bone - dosecheck_inplane_profile_bone) / monaco_inplane_profile_bone diff_inplane_air = 100 * (monaco_inplane_profile_air - dosecheck_inplane_profile_air) / monaco_inplane_profile_air diff_inplane_water = 100 * (monaco_inplane_profile_water - dosecheck_inplane_profile_water) / monaco_inplane_profile_water diff_crossplane_fat = 100 * (monaco_crossplane_profile_fat - dosecheck_crossplane_profile_fat) / monaco_crossplane_profile_fat diff_crossplane_lung = 100 * (monaco_crossplane_profile_lung - dosecheck_crossplane_profile_lung) / monaco_crossplane_profile_lung diff_crossplane_bone = 100 * (monaco_crossplane_profile_bone - dosecheck_crossplane_profile_bone) / monaco_crossplane_profile_bone diff_crossplane_air = 100 * (monaco_crossplane_profile_air - dosecheck_crossplane_profile_air) / monaco_crossplane_profile_air diff_crossplane_water = 100 * (monaco_crossplane_profile_water - dosecheck_crossplane_profile_water) / monaco_crossplane_profile_water # + plt.figure(1) plt.subplot(321) plt.plot(plane_dimensions,monaco_inplane_profile_fat,label='Monaco') plt.plot(plane_dimensions,dosecheck_inplane_profile_fat,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Inplane Profiles in Fat Layer (depth = 25 mm)') plt.legend(loc='lower left') plt.subplot(322) plt.plot(plane_dimensions,monaco_inplane_profile_lung,label='Monaco') plt.plot(plane_dimensions,dosecheck_inplane_profile_lung,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Inplane Profiles in Lung Layer (depth = 25 mm)') plt.legend(loc='lower left') plt.subplot(323) plt.plot(plane_dimensions,monaco_inplane_profile_bone,label='Monaco') plt.plot(plane_dimensions,dosecheck_inplane_profile_bone,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Inplane Profiles in Bone Layer (depth = 125 mm)') plt.legend(loc='lower left') plt.subplot(324) plt.plot(plane_dimensions,monaco_inplane_profile_air,label='Monaco') plt.plot(plane_dimensions,dosecheck_inplane_profile_air,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Inplane Profiles in Air Layer (depth = 175 mm)') plt.legend(loc='lower left') plt.subplot(325) plt.plot(plane_dimensions,monaco_inplane_profile_water,label='Monaco') plt.plot(plane_dimensions,dosecheck_inplane_profile_water,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Inplane Profiles in Water (depth = 225 mm)') plt.legend(loc='lower left') plt.subplots_adjust(top=4, bottom=0.5, left=0, right=2, hspace=0.25, wspace=0.25) plt.suptitle(('Heterogeneous Phantom 1, ' + energy + ' ' + field_size + ' cm field, Abdsolute Doses for Monaco ' + algorithm_full + ' (' + dose_to_what + ') and DoseCHECK'),fontsize="x-large",x=1,y=4.2) plt.savefig(RESULTS.joinpath(filename + f'_1.png'),bbox_inches='tight') plt.figure(2) plt.subplot(321) plt.plot(plane_dimensions,monaco_crossplane_profile_fat,label='Monaco') plt.plot(plane_dimensions,dosecheck_crossplane_profile_fat,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Crossplane Profiles in Fat Layer (depth = 25 mm)') plt.legend(loc='lower left') plt.subplot(322) plt.plot(plane_dimensions,monaco_crossplane_profile_lung,label='Monaco') plt.plot(plane_dimensions,dosecheck_crossplane_profile_lung,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Crossplane Profiles in Lung Layer (depth = 75 mm)') plt.legend(loc='lower left') plt.subplot(323) plt.plot(plane_dimensions,monaco_crossplane_profile_bone,label='Monaco') plt.plot(plane_dimensions,dosecheck_crossplane_profile_bone,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Crossplane Profiles in Bone Layer (depth = 125 mm)') plt.legend(loc='lower left') plt.subplot(324) plt.plot(plane_dimensions,monaco_crossplane_profile_air,label='Monaco') plt.plot(plane_dimensions,dosecheck_crossplane_profile_air,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Crossplane Profiles in Air Layer (depth = 175 mm)') plt.legend(loc='lower left') plt.subplot(325) plt.plot(plane_dimensions,monaco_crossplane_profile_water,label='Monaco') plt.plot(plane_dimensions,dosecheck_crossplane_profile_water,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Plane Position (mm)') plt.title('Crossplane Profiles in Water (depth = 225 mm)') plt.legend(loc='lower left') plt.subplots_adjust(top=4, bottom=0.5, left=0, right=2, hspace=0.25, wspace=0.25) plt.savefig(RESULTS.joinpath(filename + f'_2.png'),bbox_inches='tight') plt.figure(3) plt.subplot(111) plt.plot(depth_dimensions,monaco_DD,label='Monaco') plt.plot(depth_dimensions,dosecheck_DD,label='DoseCHECK') plt.ylabel('Dose (Gy / 100 MU)') plt.xlabel('Depth (mm)') plt.title('Monaco and DoseCHECK Depth Doses') plt.legend(loc='lower left') plt.subplots_adjust(top=2, bottom=0.5, left=0, right=2, hspace=0.25, wspace=0.25) plt.savefig(RESULTS.joinpath(filename + f'_3.png'),bbox_inches='tight') plt.figure(4) plt.suptitle('Heterogeneous Phantom 1, ' + energy + ' ' + field_size + ' cm field, % Dose Difference for Monaco ' + algorithm_full + ' (' + dose_to_what + ') and DoseCHECK',fontsize="x-large",x=1,y=2.2) plt.subplot(131) plt.plot(depth_dimensions,DD_diff,label='% Difference') plt.ylabel('100 * (Monaco - DoseCHECK / Monaco)') plt.xlabel('Depth (mm)') plt.title('% Difference, depth doses') plt.legend(loc='lower left') plt.ylim([-50, 50]) plt.subplot(132) plt.plot(plane_dimensions,diff_inplane_fat,label='% Difference, Fat') plt.plot(plane_dimensions,diff_inplane_lung,label='% Difference, Lung') plt.plot(plane_dimensions,diff_inplane_bone,label='% Difference, Bone') plt.plot(plane_dimensions,diff_inplane_air,label='% Difference, Air') plt.plot(plane_dimensions,diff_inplane_water,label='% Difference, Water') plt.ylabel('100 * (Monaco - DoseCHECK / Monaco)') plt.xlabel('Plane Position (mm)') plt.title('% Difference, inplane profiles') plt.legend(loc='lower left') plt.ylim([-50, 50]) plt.subplot(133) plt.plot(plane_dimensions,diff_crossplane_fat,label='% Difference, Fat') plt.plot(plane_dimensions,diff_crossplane_lung,label='% Difference, Lung') plt.plot(plane_dimensions,diff_crossplane_bone,label='% Difference, Bone') plt.plot(plane_dimensions,diff_crossplane_air,label='% Difference, Air') plt.plot(plane_dimensions,diff_crossplane_water,label='% Difference, Water') plt.ylabel('100 * (Monaco - DoseCHECK / Monaco)') plt.xlabel('Plane Position (mm)') plt.title('% Difference, crossplane profiles') plt.legend(loc='lower left') plt.ylim([-50, 50]) plt.subplots_adjust(top=2, bottom=0.5, left=0, right=2, hspace=0.25, wspace=0.33) plt.savefig(RESULTS.joinpath(filename + f'_4.png'),bbox_inches='tight') plt.show() # -
examples/protyping/film-compare/Hetero1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Sparameters SiPANN vs Lumerical FDTD # # Lets compare Lumerical 3D FDTD simulations with SiPANN models. # # The responses match quite well for different gaps and bend_radius. Therefore I feel comfortable using the compact models from SiPANN # # # ## Coupler ring # + import numpy as np import matplotlib.pyplot as plt import gdslib as gl import gdslib.simphony.components as gc import gdslib.simphony as gs import gdsfactory as gf from gdslib.simphony.components.coupler_ring_fdtd import coupler_ring_fdtd c = gf.components.coupler_ring() c # - def compare_coupler_ring_sipann_fdtd(length_x, radius, gap): m1 = coupler_ring_fdtd(length_x=length_x, radius=radius, gap=gap) m2 = gc.coupler_ring(length_x=length_x, radius=radius, gap=gap) wavelengths = np.linspace(1450, 1650, 1024) * 1e-9 gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) length_x = 1 radius = 10 gap = 0.2 compare_coupler_ring_sipann_fdtd(length_x, radius, gap) plt.ylim([-20, 0]) length_x = 3 bend_radius = 10 gap = 0.2 compare_coupler_ring_sipann_fdtd(length_x, bend_radius, gap) plt.ylim([-20, 0]) length_x = 1 bend_radius = 5 gap = 0.15 compare_coupler_ring_sipann_fdtd(length_x, bend_radius, gap) # + length_x = 1 bend_radius = 5 gap = 0.15 m1 = coupler_ring_fdtd(length_x=length_x, radius=radius, gap=gap) m2 = gc.coupler_ring(length_x=length_x, radius=radius, gap=gap) wavelengths = np.linspace(1450, 1650, 1024) * 1e-9 gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) # + length_x = 4 bend_radius = 5 gap = 0.15 m1 = coupler_ring_fdtd(length_x=length_x, radius=radius, gap=gap) m2 = gc.coupler_ring(length_x=length_x, radius=radius, gap=gap) wavelengths = np.linspace(1450, 1650, 1024) * 1e-9 gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) # - # ## Directional Coupler # + import numpy as np import gdsfactory as gf import gdslib as gl import gdslib.simphony as gs from gdslib.simphony.components.coupler_fdtd import coupler_fdtd coupler_layout = gf.components.coupler() coupler_layout.plot() # + length = 20 gap = 0.224 coupler_laytout = gf.components.coupler(length=length, gap=gap) m1 = coupler_fdtd(c=coupler_layout) m2 = gc.coupler(length=length, gap=gap) wavelengths = np.linspace(1450, 1650, 1024) * 1e-9 gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) # - gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) plt.ylim([-10, 0]) # + length = 40 gap = 0.224 c = gf.components.coupler(length=length, gap=gap) m1 = coupler_fdtd(c=c) m2 = gc.coupler(length=length, gap=gap) wavelengths = np.linspace(1450, 1650, 1024) * 1e-9 gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) # - gs.plot_model(m1, wavelengths=wavelengths) gs.plot_model(m2, wavelengths=wavelengths) plt.ylim([-10, 0])
docs/notebooks/03_components_SIPANN_vs_fdtd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # "Title" # > "subtitle." # # - toc: true # - branch: master # - badges: false # - comments: false # - categories: [probability] # - hide: true # - search_exclude: false # - image: images/blog_posts/distributed_training.png # ## A Heading # # Start with second level headings, as above. # # Links [look like this](https://github.com/fastai/fastpages). # # And images like this: <img src="local_images/2019-02-25-distributed-joint-training/net_schematic.png" width="250" /> # # or like this: `![](my_icons/fastai_logo.png)` # # # Highlighted text `looks like this.` # # ## Collapsing Cells # + #hide_input # This hides the input but not the output of a cell # + #collapse-hide # This cell is collapsable and collapsed by default # + #collapse-show # This cell is collapsable and shown by default # - # ## Tweetcards # # Typing `> twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20` will render this: # # > twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20 # ## Youtube Videos # # Typing `> youtube: https://youtu.be/XfoYk_Z5AkI` will render this: # # # > youtube: https://youtu.be/XfoYk_Z5AkI # ## Boxes / Callouts # # Typing `> Warning: There will be no second warning!` will render this: # # # > Warning: There will be no second warning! # # # # Typing `> Important: Pay attention! It's important.` will render this: # # > Important: Pay attention! It's important. # # # # Typing `> Tip: This is my tip.` will render this: # # > Tip: This is my tip. # # # # Typing `> Note: Take note of this.` will render this: # # > Note: Take note of this. # # # # Typing `> Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine.` will render in the docs: # # > Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine. # ## Footnotes # # For example, here is a footnote {% fn 1 %}. # # And another {% fn 2 %} # # {{ 'This is the footnote.' | fndetail: 1 }} # {{ 'This is the other footnote. You can even have a [link](www.github.com)!' | fndetail: 2 }}
_notebooks/2017-02-27-template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Predicting-Mutation-Effects # language: python # name: predicting-mutation-effects # --- # + [markdown] tags=[] # # PREDATOR: **PRED**icting the imp**A**ct of cancer somatic mu**T**ations on pr**O**tein-protein inte**R**actions # # - # ## OV # # <b><i class="fa fa-folder-o" area-hidden="true" style="color:#1976D2"> </i>&nbsp; File Location</b><br> # <p style="background:#F5F5F5; text-indent: 1em;"> # <code style="background:#F5F5F5; color:#404040; font-weight:bold; font-size:12px">C:\Users\ibrah\Documents\GitHub\Predicting-Mutation-Effects\src</code> # </p> # # <b><i class="far fa-file" area-hidden="true" style="color:#1976D2"> </i>&nbsp; File Name</b> # <p style="background:#F5F5F5; text-indent: 1em;"> # <code style="background:#F5F5F5; color:#404040; font-weight:bold; font-size:12px">PredatorStudy_OV.ipynb</code> # </p> # # <b><i class="far fa-calendar-alt" area-hidden="true" style="color:#1976D2"> </i>&nbsp; Last Edited</b> # <p style="background:#F5F5F5; text-indent: 1em;"> # <code style="background:#F5F5F5; color:#404040; font-weight:bold; font-size:12px">October 25th, 2021</code> # </p> # # <div class="alert alert-block" style="background-color: #F5F5F5; border: 1px solid; padding: 10px; border-color: #E0E0E0"> # <b><i class="fa fa-compass" aria-hidden="true" style="color:#404040"></i></b>&nbsp; <b style="color: #404040">Purpose </b> <br> # <div> # # - [x] Apply on Cancer Datasets # > OV # # * Target (Cancer) data: # - *OV_Interface.txt* # + # Common imports import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os import os.path as op import sys import random from pathlib import Path from pprint import pprint from IPython.display import display from tqdm.notebook import tqdm from helpers.helpers_predator.displayers import ( display_label_counts, display_labels, visualize_label_counts, display_data, ) from helpers.helpers_predator.visualizers import ( visualize_sampled_train_datasets_label_counts ) from helpers.helpers_predator.common import load_predator from helpers.helpers_predator.common import export_data # PATHS OV_PATH = Path(r"../../My-ELASPIC-Web-API/Elaspic_Results/Merged_Results/OV_Interface_2021-09-28.txt") PREDATOR_MODEL_PATH = Path(r"PredatorModels/PredatorModel_2021-10-24/04f37897/predator.pkl") PREDICTIONS_DATASETS_FOLDER_PATH = "../data/predictions_datasets/" # Reflect changes in the modules immediately. # %load_ext autoreload # %autoreload 2 # - # ## Load the Predator predator = load_predator(PREDATOR_MODEL_PATH) # ## Prediction TCGA on Cancer Dataset: OV predator.initialize_target_data_materials( tcga_code_path_pairs=[('ov', OV_PATH)] ) # + [markdown] tags=[] # #### TCGA Cancer Datasets # - # ##### OV display_data(predator.data_materials["ov"]) # + [markdown] tags=[] # #### Preprocessed TCGA Cancer Datasets # - # ##### OV display_data(predator.data_materials["target_ov_data"]) # + [markdown] tags=[] # ### Voting mode: `hard` # + tags=[] predator.predict(voting='hard') # - # Predictions for first 10 experiment. predator.predictions["ov"][:3] predator.predictions.plot_predictions_distributions("ov") # #### Predictions Post Processing # Post processing of predictions involves following steps: # ##### 1. Merging Predictions with SNV Data # The prediction column is merged with SNV data for each experiment. # # $\text{For each experiment } n: $ # # $$ # \textit{(Prediction Merged Data)}_n = \underbrace{[\textit{Predictions}_n]}_\text{0, 1 or "NoVote"} + # \underbrace{[\textit{Protein }] [\textit{Mutation }] [\textit{Interactor }]}_\text{Cancer Data Triplets} + \underbrace{[\textit{Features }] }_\text{Elaspic} # $$ # # + [markdown] tags=[] # ##### 2. Convert to 1-isomer: `Interactor_UniProt_ID` # - # $\textit{Interactor_UniProt_ID}$ column contains isomer proteins. Here, we convert them into primary isoform representation (i.e. without dashes). # | Interactor_UniProt_ID | # -------------- # | P38936 | # | P16473 | # | P16473-2 | # | P19793 | # ##### 3. Dropping Invalid Predictions # Entries which predicted as both `Decreasing` and `Increasing+NoEff` are dropped. # Due to having different features for the same $\textit{(protein, mutation, interactor)}$ triplet from ELASPIC, the triplet $\textit{(protein, mutation, interactor)}$ may be classified both 0 and 1. We drop such instances. # + tags=[] predator.predictions_post_process() # - display_data(predator.predictions["ov_predicted_datasets"][0]) predator.predictions.plot_distribution_valid_vs_invalid("ov") predator.predictions.plot_num_finalized_predictions("ov") # + tags=[] predator.prepare_ensemble_prediction_data() # - display_data(predator.predictions["ov_ensemble_prediction_data"]) display_data(predator.data_materials["ov"]) display_data(predator.data_materials["Xs_ov"][0]) predator.predictions.plot_ensemble_prediction_distribution("ov") ov_prediction_results_hard = predator.predictions["ov_prediction_results"] display_data(ov_prediction_results_hard) ov_ensemble_prediction_data_hard = predator.predictions["ov_ensemble_prediction_data"] ov_prediction_results_hard_no_votes_dropped = predator.predictions["ov_prediction_results_no_votes_dropped"] display_data(ov_prediction_results_hard_no_votes_dropped) visualize_label_counts(ov_prediction_results_hard_no_votes_dropped, 'Prediction') # + [markdown] tags=[] # ### Voting mode: `soft` # - predator.initialize_target_data_materials( tcga_code_path_pairs=[('ov', OV_PATH)] ) # + tags=[] predator.predict(voting='soft') # - predator.predictions.keys() # Predictions for first 10 experiment. predator.predictions["ov_prob"][:3] # #### Predictions Post Processing # Post processing of predictions involves following steps: # ##### 1. Merging Predictions with SNV Data # The prediction column is merged with SNV data for each experiment. # # $\text{For each experiment } n: $ # # $$ # \textit{(Prediction Merged Data)}_n = \underbrace{[\textit{Predictions}_n]}_\text{Probs Percentages} + # \underbrace{[\textit{Protein }] [\textit{Mutation }] [\textit{Interactor }]}_\text{Cancer Data Triplets} + \underbrace{[\textit{Features }] }_\text{Elaspic} # $$ # # + [markdown] tags=[] # ##### 2. Convert to 1-isomer: `Interactor_UniProt_ID` # - # $\textit{Interactor_UniProt_ID}$ column contains isomer proteins. Here, we convert them into primary isoform representation (i.e. without dashes). # | Interactor_UniProt_ID | # -------------- # | P38936 | # | P16473 | # | P16473-2 | # | P19793 | # ##### 3. Dropping Invalid Predictions # Entries whose predicted class-1 probability lies in both `Decreasing` and `Increasing+NoEff` are dropped. # Due to having different features for the same $\textit{(protein, mutation, interactor)}$ triplet from ELASPIC, the triplet $\textit{(protein, mutation, interactor)}$ may contain class-1 probability prediction of both lower than 0.50 and higher than 50. We drop such instances. # + tags=[] predator.predictions_post_process() # - predator.predictions.keys() display_data(predator.predictions["ov_predicted_probs_datasets"][0]) predator.predictions.plot_distribution_valid_vs_invalid("ov") predator.predictions.plot_num_finalized_predictions("ov") display_data(predator.predictions['ov_finalized_prediction_dataframes'][0]) # + tags=[] predator.prepare_ensemble_prediction_data() # - display_data(predator.predictions['ov_predictions_prob_data']) predator.predictions.plot_ensemble_prediction_distribution("ov") ov_prediction_results_soft = predator.predictions['ov_prediction_results'] display_data(ov_prediction_results_soft) ov_prediction_results_soft_no_votes_dropped = predator.predictions["ov_prediction_results_no_votes_dropped"] display_data(ov_prediction_results_soft_no_votes_dropped) visualize_label_counts(ov_prediction_results_soft_no_votes_dropped, 'Prediction') ov_ensemble_prediction_data_soft = predator.predictions["ov_ensemble_prediction_data"] ov_predictions_prob_data_soft = predator.predictions["ov_predictions_prob_data"] # ## Exporting Predictions # ov_prediction_results = ov_prediction_results_hard_no_votes_dropped ov_prediction_results = ov_prediction_results_soft_no_votes_dropped display_data(ov_prediction_results) predator.export_prediction( tcga="ov", data=ov_prediction_results, file_name="predictions", folder_path=PREDICTIONS_DATASETS_FOLDER_PATH, voting="soft", overwrite=False, file_extension='csv' ) # ---
src/trash/PredatorStudy_OV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img align="center" style="max-width: 1000px" src="banner.png"> # <img align="right" style="max-width: 200px; height: auto" src="hsg_logo.png"> # # ## Exercise 01 - "Python 101: Jupyter Notebooks and Python Basics" # # EMBA 58/59 - W8/3 - "AI Coding for Executives", University of St. Gallen # # The goal of the exercises below is to deepen your understanding of the Python language. In this Notebook, exercises are focused on our Python 101 Notebook. Feel free to use that Notebook as a reference, to search for help online, and to work together with others. If you have questions, please feel free to shout out! # # You don't have to work on these exercises in any specific order. # # In case you are wondering: yes, this Notebook is Halloween-themed. # + [markdown] id="-5omx0uRAhsA" # ## Basic Data Types # + [markdown] id="DQz2SFrMAhsA" # **1. Write a set of (or single) Python command(s) that compare the first and last character of a string.** # # > Write a set of Python commands that compare the first and last character of a string. In case both characters are the same print 'True' otherwise print 'False'. Test your statements one the strings `s1` and `s2` as defined below. # + id="jbjp549OAhsB" s1 = 'spooky' s2 = 'sweets' # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="a-1ER6uJAhsB" # **2. Write a set of (or single) Python command(s) that determine the properties of a string.** # # > Write a set of Python commands that determine the number of characters of a string and whether the characters are all upper case. If the number of characters is between 5 and 12 characters and all upper case print 'True' otherwise print 'False'. Test your statements on the strings `s1`, `s2`, and `s3` as defined below. # + id="AjCBoIfjAhsB" s1 = 'Cat' s2 = 'RhinOzeRos' s3 = 'PYRRHULOXIA' # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="eRfRyxG6AhsB" # **3. Write a set of (or single) Python command(s) that prints a very scary sentence.** (just imagine it's Halloween) # # > Write a set of Python commands that prints the scariest sentence that you could imagine. The sentence should include at least 3 of the following words 'tarantula', 'moonlight', 'supernatural', 'fog', 'owl', 'nightmare', or 'poltergeist'. # + id="ZQUYxlXYAhsB" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="ohwxx6eHAhsG" # ## Data Containers # + [markdown] id="WlFZF2pAAhsG" # # **1. Write a set of (or single) Python command(s) that determine the number of characters of a list element.** # # > Write a set of Python commands that determine the number of characters of the second element of a list. In case the element consists of more than 4 characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below. # + id="tQeAxFjrAhsG" l1 = ['angel', 'nightmare', 'poltergeist'] l2 = ['darkness', 'king', 'fairy', 'owl'] # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="V4DbzkdNAhsG" # **2. Write a set of (or single) Python command(s) that compares the elements of a list.** # # > Write a set of Python commands that compares the first and last elements of a list. In case both elements consist of the same characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below. # + id="p522kJaIAhsG" l1 = ['BROOMSTICK', 'ghostly', 'mYstEriOUs', 'BROOMSTICK'] l2 = ['darkness', 'king', 'fairy', 'owl'] # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="zivjjJ5wAhsG" # **3. Write a set of (or single) Python command(s) that removes elements of a list.** # # > Write a set of Python commands to print a specified list after removing the 0th, 2th, 3th, and 5th element. Test your statements on list `l` as defined below. # + id="nasnMpv0AhsG" l = ['BROOMSTICK', 'Happy', 'mYstEriOUs', 'BROOMSTICK', 'Halloween', 'Poltergeist'] # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="jWmS6obEAhsL" # ## Fundamental Programming Structures # + [markdown] id="ray7ID4qAhsL" # **1. Write a Python loop that multiplies all elements of a list with 66.** # # > Write a Python loop that multiplies all elements of a list with `66`. The input list is given by `range(0, 10)` and its output should result in a list as denoted by: `[0, 66, 132, 198, 264, 330, 396, 462, 528, 594]`. # + id="iMayg5s5AhsL" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="tB019NXuAhsL" # **2. Write a Python loop that prints the numbers 1 to 10 backwards.** # # > Write a Python loop that prints the numbers 0 to 10 backwards. The output of the loop should print the following: `10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0`. # + id="1D7IJEGjAhsL" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="kZ3hue_mAhsM" # **3. Write a Python decision structure that prints all the numbers from 0 to 6 except 4 and 7.** # # > Write a Python decision structure that prints a number if it doesn't equal to 4 and 7. If the number equals 4 or 7 it should print 'forbidden number'. # + id="warjcIleAhsM" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="0Vt-agbKAhsM" # **4. Write a Python decision structure that evaluates if a number is a multiple of 5 and 7.** # # > Write a Python decision structure that evaluates if number is a multiple of 5 and 7. Hint: You may want to use Python's modulo operator (`%`) as part of your case evaluation. # + id="afbCNrCzAhsM" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="ZfyDnbbSAhsN" # ## Functions # + [markdown] id="IAiJdiumAhsN" # **1. Write a Python function to calculate the length of a string.** # # >Write a Python function named **"string_length"** to calculate the length of an arbitrary string. The function should take an arbitrary string as input and count the number of its characters. Test your function accordingly using various string values and print the results, e.g., input: 'Halloween', expected result: 9. # + id="PGLED_L4AhsN" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="OzCZMOjcAhsN" # **2. Write a Python program to get the largest number from a list.** # # >Write a Python function named **"max_num_in_list"** to get the largest number from a list. The function should take an arbitrary list of integer values as an input and should return the integer that corresponds to the highest value. Test your function accordingly using various string values and print the results, e.g., input: [1, 5, 8, 3], expected result: 8. # + id="DoeLXe7VAhsN" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="uJTHxzw5AhsN" # **3. Write a Python program to count the number of characters (character frequency) in a string.** # # >Write a Python function named **"char_frequency"** to count the number of distinct characters occurring in it. The function should take an arbitrary string as an input and should return the count of occurrence each individual character. Test your function accordingly using various string values and print the results, e.g., input: 'Happy Halllllloweeeeeen!', expected result: {'a': 2, ' ': 1, 'e': 6, 'H': 2, 'l': 6, 'o': 1, 'n': 1, 'p': 2, '!': 1, 'w': 1, 'y': 1}. # + id="4en4UDL-AhsN" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # *************************************************** # + [markdown] id="wh5-01kFAhsN" # **4. Write a Python function that takes a list of words and returns the one exhibiting the most characters.** # # >Write a Python function named **find_longest_word** that takes a list of words and returns the length of the longest word in the list. The function should take an arbitrary list of string values (words) as an input and should return the word that exhibits the most characters. Test your function accordingly using various lists of string values and print the results, e.g., input: ['Happy', 'Halloween', '2018'], expected result: 'Halloween'. # + id="o0P6m0pTAhsN" # *************************************************** # INSERT YOUR SOLUTION CODE HERE # ***************************************************
resources/lab_01/exercises_lab01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![rmotr](https://user-images.githubusercontent.com/7065401/52071918-bda15380-2562-11e9-828c-7f95297e4a82.png) # <hr style="margin-bottom: 40px;"> # # <img src="https://user-images.githubusercontent.com/7065401/39118381-910eb0c2-46e9-11e8-81f1-a5b897401c23.jpeg" # style="width:300px; float: right; margin: 0 40px 40px 40px;"></img> # # # Numpy: Numeric computing library # # NumPy (Numerical Python) is one of the core packages for numerical computing in Python. Pandas, Matplotlib, Statmodels and many other Scientific libraries rely on NumPy. # # NumPy major contributions are: # # * Efficient numeric computation with C primitives # * Efficient collections with vectorized operations # * An integrated and natural Linear Algebra API # * A C API for connecting NumPy with libraries written in C, C++, or FORTRAN. # # Let's develop on efficiency. In Python, **everything is an object**, which means that even simple ints are also objects, with all the required machinery to make object work. We call them "Boxed Ints". In contrast, NumPy uses primitive numeric types (floats, ints) which makes storing and computation efficient. # <img src="https://docs.google.com/drawings/d/e/2PACX-1vTkDtKYMUVdpfVb3TTpr_8rrVtpal2dOknUUEOu85wJ1RitzHHf5nsJqz1O0SnTt8BwgJjxXMYXyIqs/pub?w=726&h=396" /> # # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png) # # ## Hands on! import sys import numpy as np # ## Basic Numpy Arrays np.array([1, 2, 3, 4]) a = np.array([1, 2, 3, 4]) b = np.array([0, .5, 1, 1.5, 2]) a[0], a[1] a[0:] a[1:3] a[1:-1] a[::2] b b[0], b[2], b[-1] b[[0, 2, -1]] # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Array Types a a.dtype b b.dtype np.array([1, 2, 3, 4], dtype=np.float) np.array([1, 2, 3, 4], dtype=np.int8) c = np.array(['a', 'b', 'c']) c.dtype d = np.array([{'a': 1}, sys]) d.dtype # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Dimensions and shapes A = np.array([ [1, 2, 3], [4, 5, 6] ]) A.shape A.ndim A.size B = np.array([ [ [12, 11, 10], [9, 8, 7], ], [ [6, 5, 4], [3, 2, 1] ] ]) B B.shape B.ndim B.size # If the shape isn't consistent, it'll just fall back to regular Python objects: C = np.array([ [ [12, 11, 10], [9, 8, 7], ], [ [6, 5, 4] ] ]) C.dtype C.shape C.size type(C[0]) # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Indexing and Slicing of Matrices # Square matrix A = np.array([ #. 0. 1. 2 [1, 2, 3], # 0 [4, 5, 6], # 1 [7, 8, 9] # 2 ]) A[1] A[1][0] # + # A[d1, d2, d3, d4] # - A[1, 0] A[0:2] A[:, :2] A[:2, :2] A[:2, 2:] A A[1] = np.array([10, 10, 10]) A A[2] = 99 A # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Summary statistics a = np.array([1, 2, 3, 4]) a.sum() a.mean() a.std() a.var() A = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) A.sum() A.mean() A.std() A.sum(axis=0) A.sum(axis=1) A.mean(axis=0) A.mean(axis=1) A.std(axis=0) A.std(axis=1) # And [many more](https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.ndarray.html#array-methods)... # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Broadcasting and Vectorized operations a = np.arange(4) a a + 10 a * 10 a a += 100 a l = [0, 1, 2, 3] [i * 10 for i in l] a = np.arange(4) a b = np.array([10, 10, 10, 10]) b a + b a * b # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Boolean arrays # _(Also called masks)_ a = np.arange(4) a a[0], a[-1] a[[0, -1]] a[[True, False, False, True]] a a >= 2 a[a >= 2] a.mean() a[a > a.mean()] a[~(a > a.mean())] a[(a == 0) | (a == 1)] a[(a <= 2) & (a % 2 == 0)] A = np.random.randint(100, size=(3, 3)) A A[np.array([ [True, False, True], [False, True, False], [True, False, True] ])] A > 30 A[A > 30] # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Linear Algebra A = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) B = np.array([ [6, 5], [4, 3], [2, 1] ]) A.dot(B) A @ B B.T A B.T @ A # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Size of objects in Memory # ### Int, floats # An integer in Python is > 24bytes sys.getsizeof(1) # Longs are even larger sys.getsizeof(10**100) # Numpy size is much smaller np.dtype(int).itemsize # Numpy size is much smaller np.dtype(np.int8).itemsize np.dtype(float).itemsize # ### Lists are even larger # A one-element list sys.getsizeof([1]) # An array of one element in numpy np.array([1]).nbytes # ### And performance is also important l = list(range(100000)) a = np.arange(100000) # %time np.sum(a ** 2) # %time sum([x ** 2 for x in l]) # ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png) # # ## Useful Numpy functions # ### `random` np.random.random(size=2) np.random.normal(size=2) np.random.rand(2, 4) # --- # ### `arange` np.arange(10) np.arange(5, 10) np.arange(0, 1, .1) # --- # ### `reshape` np.arange(10).reshape(2, 5) np.arange(10).reshape(5, 2) # --- # ### `linspace` np.linspace(0, 1, 5) np.linspace(0, 1, 20) np.linspace(0, 1, 20, False) # --- # ### `zeros`, `ones`, `empty` np.zeros(5) np.zeros((3, 3)) np.zeros((3, 3), dtype=np.int) np.ones(5) np.ones((3, 3)) np.empty(5) np.empty((2, 2)) # --- # ### `identity` and `eye` np.identity(3) np.eye(3, 3) np.eye(8, 4) np.eye(8, 4, k=1) np.eye(8, 4, k=-3) "Hello World"[6] # ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png)
pandas/freecodecamp/freecodecamp-intro-to-numpy-master/freecodecamp-intro-to-numpy-master/2. NumPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Binary Support Vector Machine # # &nbsp; # # SVM is where we really step into the door of machine learning. I personally think Andrew Ng's lectures did not explain very well on this topic. I strongly advise readers to use alternative materials or the website SVM tutorial. This site was created by a Pole developer called <NAME>. He dedicated to teach SVM from the very basic notation of vectors and LaGrangian to the hardcore Wolfe dual problem and SMO algorithm. It is very friendly for beginners who forget everything about high school math, haha. The free e-book he wrote is a must-read for more advanced techniques such as L1/L2 regularized soft margin, kernels, SMO and multiclass classification. # # Link to this awesome website # # https://www.svm-tutorial.com/ # # For multiclass classification # # https://github.com/je-suis-tm/machine-learning/blob/master/multiclass%20support%20vector%20machine.ipynb import cvxopt.solvers import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.svm import SVC import os os.chdir('d:/python/data') #using official sklearn package with the same parameters def skl_binary_svm(x_train,x_test,y_train,y_test,**kwargs): m=SVC(**kwargs).fit(np.array(x_train).reshape(-1, 1), \ np.array(y_train).ravel()) train=m.score(np.array(x_train).reshape(-1, 1), \ np.array(y_train).ravel())*100 test=m.score(np.array(x_test).reshape(-1, 1), \ np.array(y_test).ravel())*100 print('\ntrain accuracy: %s'%(train)+'%') print('\ntest accuracy: %s'%(test)+'%') #svm for binary classification def binary_svm(x_train,x_test,y_train,y_test, kernel='linear',poly_constant=0.0,poly_power=1,gamma=5): #this is outer product matrix #which is the combination of all inner products #alternatively,we can write outer product in #np.mat([np.dot(y_train[i],y_train[j]) #for j in y_train.index for i in y_train.index]).reshape( #len(y_train),len(y_train)) #or just np.mat(y_train).T*np.mat(y_train) y_product=np.outer(y_train,y_train) #using different kernels to map inner product to a higher dimension space #there are only three kernels here, which are linear, polynomial, gaussian if kernel=='linear': x_product=np.outer(x_train,x_train) elif kernel=='polynomial': arr=np.outer(x_train,x_train) x_product=np.apply_along_axis( lambda x:(x+poly_constant)**poly_power, 0,arr.ravel()).reshape(arr.shape) else: #gaussian/rbf kernel #map to infinite dimension space #be careful with the value of gamma #when gamma is too large, it could be overfitted #when gamma is too small, it could be underfitted #better to use gridsearch to find an optimal gamma arr=np.mat( [i-j for j in x_train for i in x_train]).reshape( len(x_train),len(x_train)) x_product=np.apply_along_axis( lambda x:np.exp(-1*gamma*(np.linalg.norm(x))**2), 0,arr.ravel()).reshape(arr.shape) #plz refer to the following link #for how to solve wolfe dual problem in cvxopt # http://cvxopt.org/userguide/coneprog.html#quadratic-programming P=cvxopt.matrix(x_product*y_product) q=cvxopt.matrix(-1*np.ones(len(x_train))) G=cvxopt.matrix(np.diag(-1 * np.ones(len(x_train)))) h=cvxopt.matrix(np.zeros(len(x_train))) A=cvxopt.matrix(y_train,(1,len(x_train))) b=cvxopt.matrix(0.0) solution=cvxopt.solvers.qp(P, q, G, h, A, b) alpha=pd.Series(solution['x']) w=np.sum(alpha*y_train*x_train) #here i am using prof <NAME>'s method of calculating b #alternatively, we can do a normal average of all value b #b=np.mean(y_train-w*x_train) b=-(min(x_train[y_train==1.0]*w)+max(x_train[y_train==-1.0]*w))/2 print('\ntrain accuracy: %s'%(len( y_train[np.sign( np.multiply(w,x_train)+b)==y_train])/len(y_train)*100)+'%') print('\ntest accuracy: %s'%(len( y_test[np.sign(np.multiply(w,x_test)+b)==y_test])/len(y_test)*100)+'%') print('\nparameters w: %s'%(w)) print('\nparameters b: %s'%(b)) # ### Run df=pd.read_csv('iris.csv') #the classification has to be float instead of int #this is requested by cvxopt #for a binary classification #the value should be either -1.0 or 1.0 df['y']=np.select([df['type']=='Iris-setosa', \ df['type']=='Iris-versicolor', \ df['type']=='Iris-virginica'],[-1.0,0.0,1.0]) #for simplicity, let us make it a binary classification df=df[df['y']!=0.0] #for simplicity, let us reduce the dimension of x to 1 #reference to pca # https://github.com/je-suis-tm/machine-learning/blob/master/principal%20component%20analysis.ipynb high_dims=pd.concat([df[i] for i in df.columns if 'length' in i or 'width' in i],axis=1) x=PCA(n_components=1).fit_transform(high_dims) x=pd.Series([x[i].item() for i in range(len(x))]) y=df['y'] #train test split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3) #crucial!!!! #or we would get errors in the next step x_test.reset_index(inplace=True,drop=True) y_test.reset_index(inplace=True,drop=True) x_train.reset_index(inplace=True,drop=True) y_train.reset_index(inplace=True,drop=True) binary_svm(x_train,x_test,y_train,y_test) skl_binary_svm(x_train,x_test,y_train,y_test,kernel='linear')
binary support vector machine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Concept extraction from text # # # ## 1. Loading text file into string # # ### Option 1. Downloading a wikipedia article's text # + from bs4 import BeautifulSoup import requests url = 'https://en.wikipedia.org/wiki/Star' source = requests.get(url).text soup = BeautifulSoup(source,'lxml') text_set = soup.find_all(['p']) ## This will skip headings ('h2','h3') and lists that are made as links( 'li') text_list = [p1.get_text() for p1 in text_set] tags_list = [p1.name for p1 in text_set ] rawtxt = ''.join(text_list) print("length of material") print(len(rawtxt)) print("Sample of text") print(rawtxt[0:500]) # - # #### Save rawtxt as is for later: filename = 'starwiki.txt' path_name = "C:/Users/Arati/Documents/personal docs/python_introduction_course/textdata/" with open(path_name + filename,"a",encoding="utf-8") as myfile: myfile.write(rawtxt) myfile.close() # ### Option 2. Getting file from disk: filename = 'Cognitive_Load_Theory.txt' path_name = "C:/Users/Arati/Documents/personal docs/python_introduction_course/textdata/" with open (path_name +filename, "r",encoding="utf-8") as myfile: rawtxt=myfile.read() myfile.close() #rawtxt = rawtxt.encode('ascii','ignore') # ## Extracting list of concepts: # # ### Importing libraries and modules from nltk import word_tokenize from nltk.chunk import * from nltk.chunk.util import * from nltk.chunk.regexp import * from nltk import Tree import re from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) import nltk wnl = nltk.WordNetLemmatizer() porter = nltk.PorterStemmer() lancaster = nltk.LancasterStemmer() # ### Sentence splitting # + from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer trainer = PunktTrainer() trainer.INCLUDE_ALL_COLLOCS = True trainer.train(rawtxt) tokenizer = PunktSentenceTokenizer(trainer.get_params()) sents = tokenizer.tokenize(rawtxt) print("Number of sentences in text "+str(len(sents))) print(len(sents)) print("Sample of sentences:") print(sents[0:5]) # - # ### Token handling functions: # 1. validchar(wrd): checks if token is a valid alphanumeric+hyphens word # 2. lemmatize_by_pos(tag) lemmatizes token by part of speech # 3. chunk_this(grammar_rule_key,sentence_tags) chunks a particular grammar rule key (see chunkrules) # 4. eqn_label: extracts equation terms and replaces all occurences in text with a textkey, which is then treated as a noun phrase. Also updates equation dictionary # 5. display_equation (displays equation term by key) # 6. chunker: chunks each sentence by each chunking rule # + def validchar(wrd): p = re.compile(r'[^0-9a-zA-Z_-]') if p.search(wrd) is None: return 1 else: return 0 def lemmatize_by_pos(tag): token = tag[0].lower() pos = tag[1] if token in stop_words: return (token,pos) if pos.startswith('J'): # adjective form lemma = wnl.lemmatize(token,'s') elif pos.startswith('N'): # noun form lemma = wnl.lemmatize(token,'n') elif pos.startswith('R'): # adverb lemma = wnl.lemmatize(token,'r') elif pos.startswith('V'): lemma = wnl.lemmatize(token,'v') else: lemma = token return (lemma,pos) global eqn_dict eqn_dict = {} global eqn_count eqn_count = 1 def eqn_label(tokens): global eqn_count global eqn_dict EQNlist = [wrd for wrd in tokens if not(wrd.isalnum()) and re.search(r'[\[\]\{\}\+*^=_%$]',wrd) and len(wrd)>1 ] ## replace queations with a label and save to equation dictionary for eqn in EQNlist: if not(eqn in eqn_dict): eqn_dict[eqn] = ''.join(['equation',str(eqn_count)]) eqn_count = eqn_count + 1 else: tokens[tokens.index(eqn)] = eqn_dict[eqn] return tokens global inv_eqn_dict inv_eqn_dict = dict([[value,key] for key,value in eqn_dict.items()]) def display_equation(reptokens): for wrd in reptokens: if wrd in inv_eqn_dict: reptokens[reptokens.index(wrd)] = inv_eqn_dict[wrd] return reptokens # - # Setting up chunking rules: # # Chunking done in batches to enable overlapping tokens to be extracted. # + chunkrules = {} # Define chunking rules here: chunkrules['JJNP'] = r""" JJNP: {<RB.*>?<J.*>?<NN.*>{1,}} """ ## Examples: "reusable contactless stored value smart card" def chunk_this(grammar_rule_key,sentence_tags): setlist = [] cp = nltk.RegexpParser(chunkrules[grammar_rule_key]) J = cp.parse(sentence_tags) for i in range(len(J)): if not(isinstance(J[i],tuple)): if (J[i].label()==grammar_rule_key): setlist.append((' '.join([J[i][j][0] for j in range(len(J[i])) if (validchar(J[i][j][0])==1)]))) setlist = list(set(setlist)) setlist = [wrd.lower() for wrd in setlist if len(wrd)>0] return setlist def chunker(sentence_tags): return [chunk_this(key,sentence_tags) for key in chunkrules] # - # Process each sentence: # + # %%time sent_to_np = {} sent_to_ltags = {} sent_to_tags = {} for i in range(len(sents)): tokens = word_tokenize(sents[i]) reptokens = eqn_label(tokens) tags = nltk.pos_tag(reptokens) lemmatags = [lemmatize_by_pos(t) for t in tags] sent_to_np[i] = chunker(lemmatags) sent_to_ltags[i] = lemmatags sent_to_tags[i] = tags # - sent_to_np[0] # Flatten lists of lists containing chunks of different rules, dictionary of np to sent import itertools sent_to_npflat = {} np_to_sent = {} for key in sent_to_np: sent_to_npflat[key] = list(set((itertools.chain(*sent_to_np[key])))) for np in sent_to_npflat[key]: if np in np_to_sent: np_to_sent[np].append(key) else: np_to_sent[np]=[key] # ### Create dataframe with some metrics: # - Concept: concept phrase # - Occurence: list of sentences in which the phrase occurs # - Frequency: number of sentences in which the phrase occurs # - Mean: average of sentence numbers in the text in which the phrase occurs normalized to number of sentences # - Median: median of sentence numbers in the text in which the phrase occurs normalized to number of sentences. Lets us know if phrase occurs much more in the beginning of the text, or towards the end. can indicate how central the phrase is to the text. # - Sdev: standard deviation of the sentences in which the phrase occurs (indicates the dispersion of the phrase in the text) import numpy as num import pandas as pd Concept = pd.Series([key for (key,value) in np_to_sent.items()]) Occurence = pd.Series([num.array(value) for (key,value) in np_to_sent.items()]) Frequency = pd.Series([len(o) for o in Occurence]) Mean= pd.Series([num.mean(o) for o in Occurence])/len(sents) Median = pd.Series([num.median(o) for o in Occurence])/len(sents) Sdev = pd.Series([num.std(o) for o in Occurence])/len(sents) Conceptdata = pd.DataFrame({'Concept':Concept,'Occurence':Occurence,'Frequency':Frequency,'Mean':Mean,'Median':Median,'Sdev':Sdev}) Conceptdata.sort_values(by='Frequency',ascending=False).head(20) # ### Save as csv: Conceptdata.to_csv(filename[0:-4]+'.csv',sep=',') # ### Save dictionaries and dataframe to pickle file import pickle concepts = {'sents':sents,'rawtxt':rawtxt,'sent_to_npflat':sent_to_npflat,'sent_to_tags':sent_to_tags,'sent_to_ltags':sent_to_ltags,'np_to_sent':np_to_sent,'Conceptdata':Conceptdata} with open(filename[0:-4]+'concepts.pickle', 'wb') as f: pickle.dump(concepts, f) f.close()
concept_extraction/.ipynb_checkpoints/concept_extraction-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:glm_env] # language: python # name: conda-env-glm_env-py # --- # # Experimenting with VGAE Code # # Code source: https://github.com/DaehanKim/vgae_pytorch # Paper reference: "Variational Graph Auto-Encoders" by <NAME> and <NAME>, 2016 # # ## To figure out: # - [ ] how do they pre-process their data? What form does their input data take? # - [ ] how does GAE and GVAE work? can I implement? # + # # !pip install networkx # # !pip install sklearn # + import torch import torch.nn.functional as F from torch.optim import Adam from sklearn.metrics import roc_auc_score, average_precision_score import scipy.sparse as sp import numpy as np import os import time from pyprojroot import here import sys import pickle as pkl import networkx as nx # from input_data import load_data # from preprocessing import * # import args # import model # - root = here(project_files=[".here"]) sys.path.append(str(root)) # + # print(root) # - def parse_index_file(filename): """Function builds a list of indices from a given filename. Args: filename (str): filename (including extension) Returns: index (list of int): list of indices from filename """ index = [] for line in open(filename): index.append(int(line.strip())) #.strip removes extra whitespace return index def load_data(dataset): """Function loads data from different citation network datasets. Assumes all datasets contain 4 files with extensions .x, .tx, .allx, .graph. This function extracts the data from the 4 files and uses it to generate an adjacency matrix and feature vectors for each node. The adjacency matrix is for one large citation network graph. Args: dataset (str): name of the dataset to load Returns: adj """ # load the data: x, tx, allx, graph names = ['x', 'tx', 'allx', 'graph'] objects = [] for i in range(len(names)): with open("{}/data/ind.{}.{}".format(root, dataset, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) # graph is a dict (default dict from collections module) # each key is a node, each value is a list of the adjacent nodes # x is a compressed sparse row matrix (scipy) # each entry in x indicates where there are connections between papers # what is the difference between x, tx and allx? x, tx, allx, graph = tuple(objects) # print('graph', type(graph)) # print('graph', graph) # print("x", x[0]) # print("x type", type(x)) # print("tx", tx) # print("tx type", type(tx)) # print("allx", allx) # print("allx type", type(allx)) # test_idx_reorder is the list of file indices out of order # test_idx_range is the sorted list of file indices test_idx_reorder = parse_index_file("{}/data/ind.{}.test.index".format(root, dataset)) # print("test Index reorder", test_idx_reorder) test_idx_range = np.sort(test_idx_reorder) # print("test index range", test_idx_range) if dataset == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended # lil is a list of lists, another way to represent adjacency information # why are we using allx and tx and not x? features = sp.vstack((allx, tx)).tolil() # print("features", features[0][0]) # print("features in test_idx_range", features[test_idx_range, :]) features[test_idx_reorder, :] = features[test_idx_range, :] # what is this line doing? # print("features in test_idx_reorder", features[test_idx_reorder, :]) # build an adjacency matrix which is a compressed sparse row matrix adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) return adj, features # + tags=[] dataset = 'cora' adj, features = load_data(dataset) # print(adj.shape) # print(type(adj)) # print(adj) # adj is a sparse matrix (scipy datatype) that contains all of the information provided in graph, see above. # + # Store original adjacency matrix (without diagonal entries) for later # is this really doing anything? hard to check... # why are we removing diagonal entries? # the paper says assume every diagonal entry is 1, i.e. nodes are self-connected adj_orig = adj # print("before mods", adj_orig) # .diagonal returns the values of the diagonal of adj_orig as an array # np.newaxis adds a dimension to the array # print("adj_orig diagonal", adj_orig.diagonal()[np.newaxis, :].shape) # print("adj_orig dia_matrix", sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)) adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape) # print("after subtraction", adj_orig) adj_orig.eliminate_zeros() # print("after removing zeros", adj_orig) # + # TODO: understand these functions and what they're doing def sparse_to_tuple(sparse_mx): """Function obtains the coordinates, values and shape from a sparse matrix required to build a COO matrix representation. Args: sparse_mx (COO matrix): The sparse matrix to be converted Returns: coords (numpy.ndarray): The coordinates of the values in the adjacency matrix values (numpy.ndarray): The entries in the adjacency matrix shape (tuple): The shape of the adjacency matrix """ # print("type of sparse_mx", type(sparse_mx)) if not sp.isspmatrix_coo(sparse_mx): sparse_mx = sparse_mx.tocoo() coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose() values = sparse_mx.data shape = sparse_mx.shape return coords, values, shape def preprocess_graph(adj): """Function takes adjacency matrix as input and returns the normalized adjacency matrix. The normalized adjacency matrix is symmetric and is normalized on a row-by-row basis. Args: adj (compressed sparse row matrix): adjacency matrix (raw) Returns: adj_normalized (tuple): the normalized adjacency matrix, given as a tuple containing (coords, values, shape) to be used to build a COO matrix """ # print("adj input", adj) # print("adj input type", type(adj)) # coo_matrix((data, (row, col)), shape=(4, 4)).toarray() adj = sp.coo_matrix(adj) # print("adj in coo matrix form", adj) # print("eye", sp.eye(adj.shape[0])) # maybe this is adding 1's to the diagonal? adj_ = adj + sp.eye(adj.shape[0]) # I think this paper is doing row-based normalization? # I think that column-based would be equivalent? # why not just normalize over the entire array? rowsum = np.array(adj_.sum(1)) degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten()) # this is A_norm = D^(1/2) * A * D^(1/2) # D is the degree matrix adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo() return sparse_to_tuple(adj_normalized) # - # Some preprocessing adj_norm = preprocess_graph(adj_orig) print(len(adj_norm)) # print(adj_norm[0]) # coords # print(adj_norm[1]) # values # print(adj_norm[2]) # shape print(type(adj_norm[0])) # coords print(type(adj_norm[1])) # values print(type(adj_norm[2])) # shape # + tags=[] num_nodes = adj.shape[0] # adj is still a numpy array print("num nodes", num_nodes) # print("features", features) features_coords, features_values, features_shape = sparse_to_tuple(features.tocoo()) print("feature coords", features_coords) print("feature values", features_values) print("feature shape", features_shape) # shape is (num samples, num features) num_features = features_shape[1] print("num features", num_features) features_nonzero = features_values.shape[0] print("nonzero features", features_nonzero)
notebooks/emma_experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Initial data analysis and Linear Regression # This assignment is dedicated to Linear regression. By focusing on prediction different features of football players you understand the mathematics behind it and see the usefulness of main data analysis libraries. # **Materials** # - [Documentation](http://docs.scipy.org/doc/) libraries Numpy and SciPy # - [Documentation](http://matplotlib.org/) library Matplotlib # - [Documentation](http://pandas.pydata.org/pandas-docs/stable/tutorials.html) library Pandas # - [Pandas Cheat Sheet](http://www.analyticsvidhya.com/blog/2015/07/11-steps-perform-data-analysis-pandas-python/) # - [Documentation](http://stanford.edu/~mwaskom/software/seaborn/) library Seaborn # # **Resources** # - In this notebook we will use *FIFA 19 complete player dataset* which is taken from [here](https://www.kaggle.com/karangadiya/fifa19) # ## Part 1. Initial data analysis with Pandas # Importing libraries. # + import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import random # %matplotlib inline # - # Load the data. Table *data.csv* should be in the same directory as this notebook. data = pd.read_csv("data.csv", index_col='ID') # The first thing you need to do with a dataframe after loading is to look at first few records. This way you can make sure that you have parsed it correctly. Moreover, you can get acquainted with the data, look at the features and their type (categorical, numerical, text ...). # # They you may check whether the data has missing values inside. Depending on the problem type and percentage of missing values you can either fill them with some value or drop columns/rows having null values. # # After that you may want to look closer at some features. You can draw a histogram for defining a feature distribution (normal, power or some other). Also with the help of histogram you can find values which are really differ from the rest, we call them **outliers**. Histograms can be plotted by *hist* method of Pandas DataFrame. # # **Example 1** Let's look at first 5 rows of data using method *head* for DataFrame data. data.head() # Unfortunately the number of columns exceeds the maximum visible default value in Pandas. Use the magic line above to remove this restriction. pd.set_option('display.max_columns', None) data.head() # Much better now. # # **Example 2** Print total player number and top-10 columns containing the most number of null values. print(f"Total number of players in dataset {data.shape[0]}") # + # from tabulate import tabulate # top = 10 # print(tabulate( # sorted(list(zip(data.columns, data.isnull().sum(), data.isnull().sum() / data.shape[0] * 100)), key=lambda x: -x[2])[:top], # headers=['col_name', 'null_cnt', 'null_perc'])) # - # **Example 3**. Let's built a histogram of weight distribution in kgs from footbal players data. Follow steps: # - Extract weight value from string (column Weight). # - Convert *Weight* column to float type. # - Get rid of null values in weight column, use median column value instead of them. # - Convert pounds to kilograms # - Finally use method *hist* for DataFrame *data* with arguments *column=Weight* (we look at this feature distribution) print(f"Weight column type is '{data['Weight'].dtype}'") data['Weight_float'] = data['Height'].str.extract(r'([0-9]+)lbs').astype(float) data['Weight_float'].fillna(data['Weight_float'].median()) POUND_TO_KILO = 0.454 data['Weight_kg'] = data.apply(lambda row: row['Weight_float'] * POUND_TO_KILO, axis=1) data.hist(column='Weight_kg', bins=30) plt.show() # **Task 1 (1 point)**. Built a histogram of the height distribution in *meters* from footbal player data. Remember that height is in format *feet* '*inches*. Instead of filling null values with some constant just drop them. Use *.dropna* for specified column. # + def format(ht): # 7'11 ht = str(ht) ht_ = ht.split("'") ft_ = float(ht_[0]) try: in_ = float(ht_[1]) except IndexError: in_ = 0 cm = (30.48 * ft_) + (2.54 * in_) return cm data["Height"].apply(lambda x: format(x)) # - data['Height_cm'] = data['Height'].apply(lambda x:format(x)) data['Height_cm'].dropna(inplace=True) data.hist(column='Height_cm', bins=19) plt.show() data.head() # Effective way to visualize the relationship between two features is to draw a simple _scatter plot_. The position of each dot on the horizontal and vertical axis indicates values for an individual data point. # **Example 4.** Visualize the dependence of _Strength_ on _Weight_kg_. data.plot.scatter(x='Weight_kg', y='Strength') plt.title('Dependence of strength on weight') plt.show() # One more effective way of initial data analysis is to plot pairwise feature dependencies. That simply combines already considered Scatter plot and a histogram. We create $m \times m$ plots (_m_ is number of features) where pictures on diagonal represent **histograms** and outside the diagonal **scatter_matrix**. That can be done with the help of _scatter_matrix_ Pandas DataFrame method or _pairplot_ in Seaborn. # **Example 5.** # Illustrate pairwise dependencies between _ShortPassing_, _Dribbling_, _BallControl_ and _Strength_ features of footbal players. sns.pairplot(data[['ShortPassing', 'Dribbling', 'BallControl', 'Strength']]) # Histograms and scatter plots are good for continuous (numerical) features. Distribution of data by categorical features (that have a fixed number of possible values) can be represented with **bar charts**. # **Example 6.** Show distribution of players by age groups (under 20 yo. _young_, between 20-30 _mature_, over 30 yo. _masters_) # + data['age_group'] = data.apply(lambda x: 'young' if x['Age'] < 20 else 'mature' if x['Age'] <= 30 else 'masters', axis=1) distr = data.groupby('age_group').count().max(axis=1)[['young', 'mature', 'masters']] plt.bar(distr.index, distr.values) plt.ylabel('Number of players') plt.title('Distribution of players across age groups') plt.show() # - # Really often it is necessary to explore the distribution of some numerical feature based on the value of categorical one. Here comes the _boxplot_ of Seaborn library, which can show statistics of numerical features (mean, quantiles) by different value of categorical feature. Boxplot can also help to detect **outliers** - values that significantly differ from the rest. More detailed explanation [here](https://towardsdatascience.com/understanding-boxplots-5e2df7bcbd51). # **Example 7.** Show _SprintSpeed_ statistics across different age groups. # _Hint_: in order to prevent printing the service information and make our pictures more attractive we can write `;` in the end of last line. sns.boxplot(x='age_group', y='SprintSpeed', data=data); # ## Part 2. Minimizing Mean Squared Error. Linear Regression # We are going to predict target numerical variable $y$ for _n_ samples with the help of $x_1, x_2, ..., x_m$ _m_ features under the assumption of _liner dependence_ existence between features and target, i.e. # $$\hat{y} = w_0 + w_1 * x_1 + w_2 * x_2 + ... + w_m * x_m$$ # so that Mean Squared Error between $y$ and $\hat{y}$ was the lowest possible # $$MSE = \frac{1}{n}\sum_{i=1}^n {(y_i - \hat{y})}^2 -> min_{w_0, w_1, w_2, ...w_m}$$ # where $w_0$ is "free" weight component called **intercept** and $(w_1, w_2, ... w_n)$ is a **vector of coefficients**. # ### Part 2.1 Linear Regression with one variable # Just to understand the basic principles, let's try to predict _BallControl_ score based on the _Dribbling_ score for every player. Simple Linear Regression with one feature. # $$BallControl = w_0 + w_1 * Dribbling$$ # We are going to do real data science, aren't we? So let us split the available data into train and test samples. We let our model see only the train data, then we can measure it's quality on test sample. # + from sklearn.model_selection import train_test_split data.fillna({'BallControl': data['BallControl'].mean(), 'Dribbling': data['Dribbling'].mean()}, inplace=True) X_train, X_test, y_train, y_test = train_test_split(data['Dribbling'].values, data['BallControl'].values, train_size=0.8) X_train = X_train.reshape(-1, 1) X_test = X_test.reshape(-1, 1) # - y_train # + active="" # To illustrate the approach, let's use Ridge model from sklearn with _regularization_ param alpha=0. What does it mean and what it if for we will find out later on in this course. But for now I require avoiding regularization by setting regularization param to zero. # + from sklearn.linear_model import Ridge lr = Ridge(alpha=0) lr.fit(X=X_train, y=y_train) # - print(f'w_0 = {lr.intercept_}, w_1 = {lr.coef_[0]}') y_pred_train = lr.predict(X_train) y_pred_test = lr.predict(X_test) data['predicted_BallControl'] = lr.predict(data['Dribbling'].values.reshape(-1, 1)) data[['Name', 'Dribbling', 'BallControl', 'predicted_BallControl']].head() # Right now we have predictions for train and test samples. How about measure the quality of the model? # **Task 2 (0.5 point).** Write your own function for MSE calculation using the formula above. Calculate train and test MSE, compare to built-in method (_sklearn.metrics.mean_squared_error_) def mse(y_true, y_pred): error = (np.sum(np.power(np.subtract(y_true, y_pred), 2))) / (y_pred.size) return error # + from sklearn.metrics import mean_squared_error assert round(mean_squared_error(y_train, y_pred_train), 9) == round(mse(y_train, y_pred_train), 9) assert round(mean_squared_error(y_test, y_pred_test), 9) == round(mse(y_test, y_pred_test), 9) # - print(f'Train MSE {mse(y_train, y_pred_train)}, test MSE {mse(y_test, y_pred_test)}') # **Task 3 (1.5 points).** Visualize the dependence of **test** _BallControl_ predictions and real _BallControl_ score on _Dribbling_ score. Don't forget to add axis and plot names! # + def plotter(): plt.plot(data['BallControl'], data['Dribbling'], marker='.', markeredgewidth=0.4, color='cyan', linestyle="None", label='Real BallControl') plt.plot(data['predicted_BallControl'], data['Dribbling'], color='green', label='predicted BallControl') plt.legend(bbox_to_anchor=(1, 1), loc='upper left') plt.xlabel("BallControl") plt.ylabel("Dribbling") plt.title('Dependence of test BallControl predictions and real BallControl score on Dribbling score\n') plt.show() plotter() # - # ### Part 2.2 Linear regression with many variables # **Task 4 (5 points).** Implement your own Linear Regression class for any number of input features and settable boolean parameter *fit_intercept*. # In this task you will work with _optimize_ module of [_scipy_](https://docs.scipy.org/doc/scipy/reference/) open-source library for mathematics, science, and engineering. You will need a function [_least_squares_](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html) that finds a coefficients for linear regression by minimizing the sum of the squares of the residuals (which is equivalent to MSE minimizing). More information about least squares approach [here](https://en.wikipedia.org/wiki/Least_squares). <br><br> # Even though this function has many parameters, you need only a few of them to complete the task (the rest will be filled in with default values automatically). # - **fun** computes a vector of residuals given weights, features and target, we provide you a function template _compute_residuals_ # - **x0** this is an initial weights vector. You can either pass a vector of zeros[n_features] or fill in randomly. # - **args** are fixed arguments to _fun_ function (which we are not going to optimize). In that particular case you will need to pass X and y. # # # You can access optimized weights by accessing the field **.x** of object which returns by this function. # !!! IMPORTANT <br> # Please complete this assignment **without any cycles**. You may use the standard operations of matrix \ vector multiplication ans different statistic calculation with NumPy. Otherwise, your solution may not go through asserts. def compute_residuals(w, X, y): """ Compute residuals when predicting y_hat as matrix product of X and transposed w :param w: linear regression weights, numpy.ndarrya: float64[num_features] :param X: training features, numpy.ndarray: float64[num_samples, num_features] :param y: training target, numpy.ndarray: float64[num_samples] :returns: vector of residuals (y_i_hat - y_i) for each sample_i in X """ y_hat = X.dot(w.T) residuals = np.subtract(y_hat, y) return residuals # + from sklearn.base import BaseEstimator from sklearn.utils.validation import check_X_y, check_array, check_is_fitted from scipy.optimize import least_squares class LinearRegression(BaseEstimator): def __init__(self, fit_intercept=True): self.fit_intercept = fit_intercept def fit(self, X, y): """ fit model weights given input features and target :param X: training features, numpy.ndarray: numeric[num_samples, num_features] :param y: training target, numpy.ndarray: numeric[num_samples] :returns: linear predictor with fitted weights so that train MSE is the lowest possible :note: weights: numpy.ndarray: float64[num_features] stored as class field """ # Check that X and y have correct shape X, y = check_X_y(X, y) # Save train data information. Necessary for following the uniform API self.X_ = X self.y_ = y self.n_features_in_ = X.shape[1] # Copy arrays and cast them to uniform type X_train = X.astype('float64') y_train = y.astype('float64') # Add dummy column of ones to X_train if we want to train an intercept - last component of future weight vector if self.fit_intercept: X_train = np.column_stack((X_train, (np.ones(X_train.shape[0])))) # Your code here. # Just follow the suggested steps: create initial weights vector, # apply least_squares optimizer passing the parameters described above # and finally extract optimized weights. # Remember: you need to distinguish coefficients from intercept when fit_intercept=True initial_weights_vector = np.zeros(X_train.shape[1]) args = (X_train, y_train) all_w = (least_squares(fun=compute_residuals, x0=initial_weights_vector, args=args, kwargs={}).x) self.coef_ = np.delete(all_w, -1) self.intercept_ = np.delete((np.append(np.ndarray(1), all_w[-1])), 0) # :( # Return the classifier return self def predict(self, X): # Check is fit had been called check_is_fitted(self) # Input validation X = check_array(X) self.coef_ = self.coef_.reshape(-1, 1) return X.dot(self.coef_) + self.intercept_ # + #Testing area from sklearn.utils.estimator_checks import check_estimator from sklearn.linear_model import Ridge lr = LinearRegression() ridge = Ridge(alpha=0) lr_no_intercept = LinearRegression(fit_intercept=False) ridge_no_intercept = Ridge(alpha=0, fit_intercept=False) #Check compatibility with Sklearn framework and apply some spesific internal tests check_estimator(lr) check_estimator(lr_no_intercept) #Compare model accuracy with Ridge(0) from Sklearn data.fillna({'BallControl': data['BallControl'].mean() , 'Dribbling': data['Dribbling'].mean() , 'Strength': data['Strength'].mean()}, inplace=True) X_sample, y_sample = data[['Dribbling', 'Strength']], data['BallControl'] lr.fit(X_sample, y_sample) ridge.fit(X_sample, y_sample) assert np.allclose(lr.predict(X_sample), ridge.predict(X_sample), rtol=1e-03), "Your model with intercept not accurate enough!" lr_no_intercept.fit(X_sample, y_sample) ridge_no_intercept.fit(X_sample, y_sample) assert np.allclose(lr_no_intercept.predict(X_sample), ridge_no_intercept.predict(X_sample), rtol=1e-03), "Your model without intercept not accurate enough!" # - # Let's add more features in order to predict Dribbling score more accurately. features = ['BallControl', 'ShortPassing', 'Strength', 'Weight_float', 'Weight_kg'] target = 'Dribbling' for feat in features: data.fillna({feat: data[feat].mean()}, inplace=True) X_train, X_test, y_train, y_test = train_test_split(data[features].values, data[target].values, train_size=0.8, random_state=2) # + lr = Ridge(0) lr.fit(X=X_train, y=y_train) y_pred_train = lr.predict(X_train) y_pred_test = lr.predict(X_test) print(f'Train MSE {mean_squared_error(y_train, y_pred_train)}, test MSE {mean_squared_error(y_test, y_pred_test)}') # - print(f'w_0 = {lr.intercept_}, w_1, w_2, w_3, w_4, w_5 = {lr.coef_}') # That is not ok, two last weight components are too large, and they vary depending on the run! Although the result seems better our model would behave unexpectadly to the patterns in data it has never seen! Large weights and weights instability are the sign of [**overfitting**](https://en.wikipedia.org/wiki/Overfitting). <br><br> # According to the definition it is "_the production of an analysis that corresponds too closely or exactly to a particular set of data, and may therefore fail to fit additional data or predict future observations reliably_". But what does it actually mean? # Assume that we have a player whose weight in kg was calculated with some tiny error, let's say +=1g. player = data[features + [target]].iloc[0:2] player['Predicted_dribbling'] = lr.predict(player[features].values) player.head() # Predictions are pretty good if the data is _pure_. Let's add some noise to _Weight_kg_ feature: player['Weight_kg'] = player['Weight_kg'] + [-0.001, 0.001] player['Predicted_dribbling_with_error'] = lr.predict(player[features].values) player.head() # Predicted dribbling value has changed significantly! Look at how this tiny **1g** error leads to extremly big or small dribbling! # The reason behind it is strange unstable behaviour is **collinearity** between Weight and Weight_kg features, what means that Weight_kg can be linearly predicted from Weight. As a matter of fact they represent the same essense but in different scales. <br><br> # **Multicollinearity** describes a more general case, when one feature can be predicted by linear combination of some other features.<br><br> # Collinearity is really close related to **correlation** - degree to which a pair of variables are linearly related. Collinearity origins from Linear Algebra and Geometry whereas Correlation is a term from Statistics. Anyway all of this three terms refer to **linearly dependent features**, which is really bad for Linear Models. # But why it is so bad? The main reason is that Linear Regression tries to capture the contribution of each feature to target _independently_, which obviously is not possible in terms of feature multicolliearity.<br><br> # There are a whole bunch of really interesting thoughts that can help to capture the intuition behind it [here](https://stats.stackexchange.com/questions/1149/is-there-an-intuitive-explanation-why-multicollinearity-is-a-problem-in-linear-r). I'd citate one of the examples provided.<br><br> # _Assume that two people collaborated and accomplished scientific discovery. It is easy to tell their unique contributions (who did what) when two are totally different persons (one is theory guy and the other is good at experiment), while it is difficult to distinguish their unique influences (coefficients in regression) when they are twins acting similarly._ # There are a few approaches how to prevent overfitting and overcome multicollinearity. # - Drop features # - Combine features # - Regularization # # # Regularization is something we are going to speak about in the next modules. Combining features is problem-specific and could easily trigger a _holy_war_ due to ambiguity of approaches. Let's focus on simpliest - drop one of the features from the correlated pair.<br> # At first we need to define those pairs of features, **correlation matrix** comes to rescue! Each cell in the table shows the correlation between two variables. We use dataframe in-built method _corr_ in combination with seaborn _heatmap_. # + from seaborn import heatmap heatmap(data[features].corr(method='pearson'), center=0, square=True) plt.show() # + features = ['BallControl', 'ShortPassing', 'Strength', 'Weight_kg'] X_train, X_test, y_train, y_test = train_test_split(data[features].values, data[target].values, train_size=0.8, random_state=2) lr = Ridge(alpha=0) lr.fit(X=X_train, y=y_train) player['Predicted_dribbling_with_error'] = lr.predict(player[features].values) player.head() # - # ### Part 2.3 Putting all together # **Task 5 (up to 5 points).** Build a Linear Regression model for _Value_ prediction for every football player and validate it. You **have to** use either your custom Linear Regression class or `sklearn.linear_model.Ridge` with regularization param alpha=0. Steps you need to follow: # - Extract float number from _Value_ field in DataFrame (**0.5 points**) # - Сhoose more features that you expect to influence on player _Value_ (at least 10) # - Plot feature correlation matrix. (**0.5 points**) # - Drop features that are highly correlated with each other (_abs_(corr) > 0.9) one by one until no correlated pairs left. _Hint_: you may reuse code from Task_9 in HW_1 for automatic correlated pairs selection. (**1.5 points**) # - Split data into train/test with some proportion (**0.5 points**) # - Train a model on train dataset, make predictions both for train and test. (**0.5 points**) # - Measure the model quality in terms of MSE in train and test samples, (**0.5 points**) # - Write a short report about the work done. Why did you take these particular features? Can you find a logical explanation for high correlation of some of your features? Are you satisfied with the quality of predictions? etc. (**1 point**) # **Penalties** # - **-1 point** if used a different model besides custom Linear Regression or `sklearn.linear_model.Ridge` with regularization param alpha=0 # - **-0.5 points** if number of selected features BEFORE removal of linearly dependent ones is less than 10. # - **-0.5 points** if did not remove linearly dependent features before training the model. # + # Your code and a bunch of cool ideas here
second_week/seminar_and_homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Featurestore - Mars21 # ## Feature Store Delete # <NAME> \ # <NAME> from google.api_core import operations_v1 from google.cloud.aiplatform_v1beta1.types import io as io_pb2 from google.cloud.aiplatform_v1beta1.types.feature import Feature from google.cloud.aiplatform_v1beta1 import FeaturestoreServiceClient from google.cloud.aiplatform_v1beta1 import FeaturestoreOnlineServingServiceClient from google.cloud.aiplatform_v1beta1.types import entity_type as entity_type_pb2 from google.cloud.aiplatform_v1beta1.types import featurestore as featurestore_pb2 from google.cloud.aiplatform_v1beta1.types import feature_selector as feature_selector_pb2 from google.cloud.aiplatform_v1beta1.types import featurestore_service as featurestore_service_pb2 from google.cloud.aiplatform_v1beta1.types import featurestore_online_service as featurestore_online_service_pb2 PROJECT_ID = "feature-store-mars21" # Change to your project id LOCATION = "us-central1" API_ENDPOINT = LOCATION+"-aiplatform.googleapis.com" FEATURESTORE_ID = "universe" ENTITY="customer" # ## Define clinets for FS admin and data management # + # Create admin_client for CRUD and data_client for reading feature values. admin_client = FeaturestoreServiceClient( client_options={"api_endpoint": API_ENDPOINT}) data_client = FeaturestoreOnlineServingServiceClient( client_options={"api_endpoint": API_ENDPOINT}) # + LOC_PATH = admin_client.common_location_path(PROJECT_ID, LOCATION) FS_PATH = admin_client.featurestore_path(PROJECT_ID, LOCATION, FEATURESTORE_ID) ENTITY_PATH = admin_client.entity_type_path(PROJECT_ID, LOCATION, FEATURESTORE_ID, ENTITY) FEATURE_PATH = admin_client.feature_path(PROJECT_ID, LOCATION, FEATURESTORE_ID, ENTITY, '{}') print("Location: \t", LOC_PATH) print("Feature Store: \t", FS_PATH) print("Entity: \t", ENTITY_PATH) print("Feature: \t",FEATURE_PATH) # - # ## Delete Features , Entities and Feature Store try: for et in list(admin_client.list_entity_types(parent=FS_PATH)): for f in list(admin_client.list_features(parent=et.name)): admin_client.delete_feature(name=f.name).result() # delete feature #admin_client.delete_entity_type(name=et.name).result() # delete entity #admin_client.delete_featurestore(name = FS_PATH) # delete store except Exception as ex: print(ex) try: print(admin_client.delete_featurestore( request=featurestore_service_pb2.DeleteFeaturestoreRequest( name=FS_PATH, force=True)).result()) except Exception as ex: print(ex)
feature-store/04-delete.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import numpy as np # temp = 10.0*np.random.rand()-5.0 # temp,np.sqrt(4.0-temp ** 2) # - # ### 数据生成 # + x1_pos = [] x2_pos = [] y_pos = [] for i in range(1000): temp = 4.0*np.random.rand()-2.0 y_pos.append(1) x1_pos.append(temp) if i%2==0: x2_pos.append(np.sqrt(4.0-temp ** 2)+0.3*np.random.randn()) elif i%2==1: x2_pos.append(-np.sqrt(4.0-temp ** 2)+0.3*np.random.randn()) x1_Neg = [] x2_Neg = [] y_Neg = [] for i in range(1000): temp = 10.0*np.random.rand()-5.0 y_Neg.append(1) x1_Neg.append(temp) if i%2==0: x2_Neg.append(np.sqrt(25.0-temp ** 2)+0.3*np.random.randn()) elif i%2==1: x2_Neg.append(-np.sqrt(25.0-temp ** 2)+0.3*np.random.randn()) # - plt.figure(figsize=(12,5)) plt.scatter(x1_pos,x2_pos,color='black',label="class1",alpha=0.5) plt.scatter(x1_Neg,x2_Neg,color='red',label="class0",alpha=0.5) plt.xlabel('x1') plt.ylabel('x2') plt.legend() plt.grid() plt.show() Dict = {'x1':x1_pos+x1_Neg,'x2':x2_pos+x2_Neg,'y':y_pos+y_Neg} DataTrain = pd.DataFrame(Dict) # + x1_pos = [] x2_pos = [] y_pos = [] for i in range(250): temp = 4.0*np.random.rand()-2.0 y_pos.append(1) x1_pos.append(temp) if i%2==0: x2_pos.append(np.sqrt(4.0-temp ** 2)+0.3*np.random.randn()) elif i%2==1: x2_pos.append(-np.sqrt(4.0-temp ** 2)+0.3*np.random.randn()) x1_Neg = [] x2_Neg = [] y_Neg = [] for i in range(250): temp = 10.0*np.random.rand()-5.0 y_Neg.append(1) x1_Neg.append(temp) if i%2==0: x2_Neg.append(np.sqrt(25.0-temp ** 2)+0.3*np.random.randn()) elif i%2==1: x2_Neg.append(-np.sqrt(25.0-temp ** 2)+0.3*np.random.randn()) # - Dict = {'x1':x1_pos+x1_Neg,'x2':x2_pos+x2_Neg,'y':y_pos+y_Neg} DataTest = pd.DataFrame(Dict) # ### model training # $$\vec{x}\in\mathbb{R}^{1\times 2}$$ # # $$W_1\in\mathbb{R}^{2\times 10}; b_1\in\mathbb{R}^{1\times 10}$$ # # $$W_2\in\mathbb{R}^{10\times 2}; b_2\in\mathbb{R}^{1\times 2}$$ # # $$z_1 = \sigma(\vec{x}W_1+b_1)$$ # # $$z_2 = \sigma({z_1}W_2+b_2)$$ # # $$\hat{y} = {\rm{softmax}}(z_2)$$ # *********** # $$Loss(W_1,W_2,b_1,b_2) = -\frac{1}{N}\sum_{i=1}^{N}[y_i\log\hat{y_i}+(1-y_i)\log(1-\hat{y_i})]$$ # # *********** # $$k=0,1,2,...$$ # # $$W_1^{k+1}=W_1^{k}-\alpha_k\frac{\partial Loss(W_1^{k},W_2^{k},b_1^{k},b_2^{k})}{\partial W_1}$$ # # $$W_2^{k+1}=W_2^{k}-\alpha_k\frac{\partial Loss(W_1^{k},W_2^{k},b_1^{k},b_2^{k})}{\partial W_2}$$ # # $$b_1^{k+1}=b_1^{k}-\alpha_k\frac{\partial Loss(W_1^{k},W_2^{k},b_1^{k},b_2^{k})}{\partial b_1}$$ # # $$b_2^{k+1}=b_2^{k}-\alpha_k\frac{\partial Loss(W_1^{k},W_2^{k},b_1^{k},b_2^{k})}{\partial b_2}$$ class DeepNeuralNetworkModel(n.Module): # 构造类 def __init__(self,input_dim1,output_dim1,input_dim2,output_dim2): # output_dim1,input_dim2 super(DeepNeuralNetworkModel,self).__init__() # Fully connected layer1 self.FC_layer1 = nn.Linear(input_dim1,output_dim1) # Fully connected layer2 self.FC_layer2 = nn.Linear(input_dim2,output_dim2) # activation function sigmoid() self.act_sig = nn.Sigmoid() # 前向传播函数 def forward(self,x): z1_ = self.FC_layer1(x) z1 = self.act_sig(z1_) z2_ = self.FC_layer1(z1) z2 = self.act_sig(z2_) return z2
机器学习/算法/.ipynb_checkpoints/学习模型--DNN-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ### Import the required modules # + deletable=true editable=true slideshow={"slide_type": "fragment"} from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import pandas as pd from sklearn.feature_extraction.text import CountVectorizer # + [markdown] deletable=true editable=true slideshow={"slide_type": "slide"} # ### Text data and how to make it ready for Machine Learning # + [markdown] deletable=true editable=true # #### A very small test text dataset # + deletable=true editable=true test_text_data = ['Check out this link', 'Lets go get a drink', 'This is the best video you will ever see'] # + [markdown] deletable=true editable=true # #### Instantiate `CountVectorizer` # + deletable=true editable=true vectorizer = CountVectorizer() # + [markdown] deletable=true editable=true # #### Fitting our data # + deletable=true editable=true vectorizer.fit(test_text_data) # + [markdown] deletable=true editable=true # #### Let's look at the vectorized word tokens # + deletable=true editable=true vectorizer.get_feature_names() # + [markdown] deletable=true editable=true # #### Transform to Document Term Matrix # + deletable=true editable=true dtm = vectorizer.transform(test_text_data) dtm # + deletable=true editable=true pd.DataFrame(dtm.toarray(), columns=vectorizer.get_feature_names()) # + [markdown] deletable=true editable=true # #### Test a new record against the bag of word # + deletable=true editable=true test_record = ['This is amazing. Check it out.'] # + deletable=true editable=true test_dtm = vectorizer.transform(test_record) # + deletable=true editable=true pd.DataFrame(test_dtm.toarray(), columns=vectorizer.get_feature_names()) # + [markdown] deletable=true editable=true # ### Real Dataset # # Dataset based on - # * University of Michigan Sentiment Analysis competition on Kaggle # * Twitter Sentiment Corpus by <NAME>(Sentiment140) # # # #### Read in the dataset # + deletable=true editable=true data = pd.read_table('tweets.tsv', usecols=[0,1]) # + [markdown] deletable=true editable=true # ### Examine the dataset # + deletable=true editable=true data.head() # + deletable=true editable=true data.shape # + deletable=true editable=true data.info() # + deletable=true editable=true data.Sentiment.value_counts() # + [markdown] deletable=true editable=true # #### Take out features and labels from the data # + deletable=true editable=true X = data.SentimentText y = data.Sentiment print(X.shape) print(y.shape) # + [markdown] deletable=true editable=true # #### Instantiate `CountVectorizer` # + deletable=true editable=true vectorizer = CountVectorizer() # + [markdown] deletable=true editable=true # #### Fit it with the text data # + deletable=true editable=true vectorizer.fit(X) # + [markdown] deletable=true editable=true # #### Transform the data to a document term matrix # + deletable=true editable=true X_dtm = vectorizer.transform(X) # + deletable=true editable=true X_dtm # + deletable=true editable=true
Code/Section 4/Making your Bag of Words ready.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### predictions on test data and on complete scan of patient #2: # import libraries: import pydicom import numpy as np import matplotlib.pyplot as plt from matplotlib.image import imsave import pandas as pd from tensorflow.keras.models import load_model from tensorflow.keras.utils import plot_model from PIL import Image import imageio import pickle import re # + # define metrics for model import: def diceCoef(y_true, y_pred, smooth=1.): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / ( K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def diceCoefLoss(y_true, y_pred): return (1-diceCoef(y_true, y_pred)) def jaccardDistance(y_true, y_pred, smooth=1): intersection = sum(abs(y_true * y_pred), axis=-1) sum_ = sum(abs(y_true) + abs(y_pred), axis=-1) jac = (intersection + smooth) / (sum_ - intersection + smooth) return (1-jac) * smooth def f1Score(y_true, y_pred): true_positives = sum(round(clip(y_true * y_pred, 0, 1))) possible_positives = sum(round(clip(y_true, 0, 1))) predicted_positives = sum(round(clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + epsilon()) recall = true_positives / (possible_positives + epsilon()) f1_val = 2*(precision*recall)/(precision+recall+epsilon()) return f1_val # - # load saved model: model = load_model('/Users/krystanafoh/final_project_documentation/FINALmodel/model_best_128big_patients_2.h5', custom_objects={'diceCoefLoss': diceCoefLoss, 'diceCoef': diceCoef, 'jaccardDistance' : jaccardDistance, 'f1Score': f1Score}) # #### import test data: data = np.load('./xytts_patients_128big/X_test_19_patients_128big.pkl', allow_pickle=True) masks = np.load('./xytts_patients_128big/y_test_19_patients_128big.pkl', allow_pickle=True) # predictions on test data: preds = model.predict(data) # reshape to plot: temp = preds[130].reshape(128, 128) tempX = data[130].reshape(128, 128) tempy = masks[130].reshape(128,128) # + # plotting predicted masks examples in comparison to ground truth: f = plt.figure() f.add_subplot(1,2, 1) plt.title('original scan with ground truth') plt.imshow(tempX) plt.imshow(tempy,cmap=plt.cm.bone, alpha=0.2 ) f.add_subplot(1,2, 2) plt.title('mask/ground truth') plt.imshow(tempy,cmap=plt.cm.bone, alpha=0.5 ) plt.show(block=True) f = plt.figure() f.add_subplot(1,2, 1) plt.title('original scan with predicted mask') plt.imshow(tempX) plt.imshow(temp, cmap=plt.cm.bone, alpha=0.2) f.add_subplot(1,2, 2) plt.title('predicted mask') plt.imshow(temp, cmap=plt.cm.bone, alpha=0.5) plt.show(block=True) # - # #### predict on patient #2: # load dataframe of patient 2: dfp2 = pd.read_pickle('./patient2.hdf') # only original scans: df_origp2 = dfp2['original'] # cut off suffix, to list: def cut_suffix(df): list_short = [] for i in range(len(df)): short = df.iloc[i].rsplit('_', 1)[0] list_short.append(short) return list_short list_shortp2 = cut_suffix(df_origp2) # + # sort natural def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' sorts in human order ''' return [ atoi(c) for c in re.split(r'(\d+)', text) ] list_shortp2.sort(key=natural_keys) # - # #### preprocessing for model input: # filepaths to arrays in list: def to_arrays(list): """reads in files from paths in df, converts them to pixel_arrays, returns list of pixel arrays""" im_list = [] for i in list: ds = pydicom.read_file(i) data = ds.pixel_array im_list.append(data) return im_list list_arrayp2 = to_arrays(list_shortp2) # downsample all images in list: def downsampling(list): """downsamples images to 128*128 pixels. Returns list of images.""" ds_img_list = [] for array in list: array = array[::4, ::4] ds_img_list.append(array) return ds_img_list list_dsp2 = downsampling(list_arrayp2) # + # to array of arrays: def to_array_array(list_of_arrays:list): """creates array of arrays, returns array.""" array = np.asanyarray(list_of_arrays) return array array_arrayp2 = to_array_array(list_dsp2) # - # normalize: def normalize(array): """Normalize the array between -1000 and 400 (Hounsfield)""" min = -1000 max = 400 array[array < min] = min array[array > max] = max array = (array - min) / (max - min) # hö array = array.astype("float32") return array # keras documentation array_normp2 = normalize(array_arrayp2) #reshape array: patient2 = array_normp2.reshape(172, 128, 128, -1) # #### predict on scans of patient 2: preds_p2 = model.predict(patient2) # #### get original masks as ground truths to plot in contrast: # df_masksp2 = dfp2['mask'] # #### preprocess: # cut off suffix, to list: def cut_suffix(df): list_short = [] for i in range(len(df)): short = df.iloc[i].rsplit('_', 1)[0] list_short.append(short) return list_short masks_p2 = cut_suffix(df_masksp2) # + # sort natural def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' sorts in human order ''' return [ atoi(c) for c in re.split(r'(\d+)', text) ] masks_p2.sort(key=natural_keys) # - mask_arraysp2 = to_arrays(masks_p2) mask_arraysp2 = downsampling(mask_arraysp2) masks_arrayp2 = to_array_array(mask_arraysp2) # reshape: masks_p2 = masks_arrayp2.reshape(172, 128, 128, -1) # #### plot example predictions on test data: # plot only downscaled original image: # reshape to plot: temppred = preds_p2[120].reshape(128, 128) tempXp2 = patient2[120].reshape(128, 128) tempyp2 = masks_p2[120].reshape(128,128) # + # plotting predicted masks examples in comparison to ground truth: f = plt.figure() f.add_subplot(1,2, 1) plt.title('original scan with ground truth') plt.imshow(tempXp2) plt.imshow(tempyp2,cmap=plt.cm.bone, alpha=0.2 ) f.add_subplot(1,2, 2) plt.title('mask/ground truth') plt.imshow(tempyp2,cmap=plt.cm.bone, alpha=0.5 ) plt.show(block=True) f = plt.figure() f.add_subplot(1,2, 1) plt.title('original scan with predicted mask') plt.imshow(tempXp2, cmap=plt.cm.bone) plt.imshow(temppred, cmap=plt.cm.bone, alpha=0.2) f.add_subplot(1,2, 2) plt.title('predicted mask') plt.imshow(temppred, cmap=plt.cm.bone, alpha=0.5) plt.show(block=True) # - # #### make gif of orginal scansd of patient 2 with predicted masks: # + # merge scans and predicted masks: gif_pics_p2 = [] for i in range(len(patient2)): temppred = preds_p2[i].reshape(128, 128) tempXp2 = patient2[i].reshape(128, 128) pic = tempXp2 + temppred * 0.3 gif_pics_p2.append(pic) # - # save merged images as png's: i = 1 lengths = [] for pic in gif_pics_p2: image = Image.fromarray(pic) name = f'{i}_picgif_neu.png' imsave(name , image) lengths.append(i) i += 1 # + # create gif and save to disk: images = [] for i in range(1, 172): filename = f'./{i}_picgif_neu.png' #.format(i) images.append(imageio.imread(filename)) imageio.mimsave('animated_mask_patient_neu.gif', images, fps=20)
predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.6 64-bit (''base'': conda)' # name: python3 # --- # --- # author: <NAME> (<EMAIL>) # --- # We're going to use fake data here, but you can replace our fake data with your real data below. # You will need not only the samples but also the known population standard deviations. # + cell_id="00005-d53be040-6048-48b6-8478-2ccdc1333f12" deepnote_cell_type="code" tags=[] sample1 = [ 5, 8, 10, 3, 6, 2] sample2 = [13, 20, 16, 12, 18, 15] population1_sd = 2.4 population2_sd = 3 # - # We must compute the sizes and means of the two samples. import numpy as np n1 = len(sample1) n2 = len(sample2) sample1_mean = np.mean(sample1) sample2_mean = np.mean(sample2) # + [markdown] cell_id="00004-8a4ce426-a40f-4c95-aa1d-b6293c282237" deepnote_cell_type="markdown" tags=[] # We choose a value $0 \le \alpha \le 1$ as the probability of a Type I error # (a false positive, finding we should reject $H_0$ when it’s actually true). # We will use $\alpha=0.05$ in this example. # # ### Two-tailed test # # In a two-tailed test, the null hypothesis is that the difference is zero, # $H_0: \bar{x} - \bar{x}' = 0$. We compute a test statistic and $p$-value as # follows. # + cell_id="00000-c918c8a0-a876-46a9-90d4-687b38a7f05c" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=439 execution_start=1628704638613 source_hash="e7b143a6" tags=[] from scipy import stats test_statistic = ( (sample1_mean - sample2_mean) / np.sqrt(population1_sd**2/n1 + population2_sd**2/n2) ) 2*stats.norm.sf(abs(test_statistic)) # two-tailed p-value # + [markdown] cell_id="00006-716aefbb-701e-4d34-9dc9-dc234a2b55f0" deepnote_cell_type="markdown" tags=[] # Our p-value is less than $\alpha$, so we have sufficient evidence to reject the null hypothesis. # The difference between the means is significantly different from zero. # # ### Right-tailed test # # In the right-tailed test, the null hypothesis is $H_0: \bar{x} - \bar{x}' \le 0$. # That is, we are testing whether the difference is greater than zero. # # The code is very similar to the previous, except only in computing the $p$-value. # We repeat the code that's in common, to make it easier to copy and paste the examples. # + cell_id="00007-6165fc59-b7ce-48eb-887d-fbf0ee97e36e" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=5 execution_start=1628704705604 source_hash="a55e314a" tags=[] from scipy import stats test_statistic = ( (sample1_mean - sample2_mean) / np.sqrt(population1_sd**2/n1 + population2_sd**2/n2) ) stats.norm.sf(test_statistic) # right-tailed p-value # + [markdown] cell_id="00008-232ebbf6-6906-4ae0-aa95-d281e3e192e9" deepnote_cell_type="markdown" tags=[] # Our $p$-value is greater than $\alpha$, so we do not have sufficient evidence to # reject the null hypothesis. We would continue to assume that the difference in # means is less than or equal to zero. # # ### Left-tailed test # # In a left-tailed test, the null hypothesis is $H_0: \bar{x} - \bar{x}' \ge 0$. # That is, we are testing whether the difference is less than zero. # # The code is very similar to the previous, except only in computing the $p$-value. # We repeat the code that's in common, to make it easier to copy and paste the examples. # + cell_id="00009-9b050b94-acb6-4883-be72-08941697f4cd" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=10 execution_start=1628704803869 source_hash="4b5f7ca2" tags=[] from scipy import stats test_statistic = ( (sample1_mean - sample2_mean) / np.sqrt(population1_sd**2/n1 + population2_sd**2/n2) ) stats.norm.sf(-test_statistic) # left-tailed p-value # + [markdown] cell_id="00010-b2e29cae-e293-456f-bb27-7b6f220fe893" deepnote_cell_type="markdown" tags=[] # Our $p$-value is less than $\alpha$, so we have sufficient evidence to reject # the null hypothesis. The difference between the means is significantly less than zero.
database/tasks/How to do a hypothesis test for the difference between means when both population variances are known/Python, using SciPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py_37_env # language: python # name: py_37_env # --- # # refget-py tutorial import refget from refget import trunc512_digest # Show some results for sequence digests: trunc512_digest('ACGT') trunc512_digest('TCGA') trunc512_digest('ACGT', 26) # ## Use a database # # Now, instantiate a RefDB object. You have to provide a database where you will store lookup values. For a demo, you can also use a basic dictionary as a lookup database, but this will obviously not persist. # # Seed our database with a few pre-existing entries: # + local_lookup_dict = { trunc512_digest('ACGT'): "ACGT", trunc512_digest('TCGA'): "TCGA" } rgdb_local = refget.RefDB(local_lookup_dict) # - # Retrieve sequences using the checksum rgdb_local.refget(trunc512_digest('TCGA')) # We can also add new sequences into the database: rgdb_local.refget(trunc512_digest('TCGATCGA')) # This sequence is not found in our database yet checksum = rgdb_local.load_seq("TCGATCGA") # So, let's add it into database rgdb_local.refget(checksum) # This time it returns # ## Switching to a Redis back-end # Using a dict as a database will not persist. Let's instead use a redis back-end. If you're running a local redis server, you can use that as a back-end. First, start up a server like this: # # ``` # docker run --rm --network='host' --workdir="`pwd`" redis:5.0.5 redis-server # ``` # Then you can instantiate a new RefDB object that uses it like this: rgdb = refget.RefDB(refget.RedisDict()) # ## Database insertion # # Insert a sequence into the database, then retrieve it via checksum checksum = rgdb.load_seq("GGAA") rgdb.refget(checksum) # ## Insert and retrieve a sequence collection (fasta file) fa_file = "demo_fasta/demo.fa" checksum, content = rgdb.load_fasta(fa_file) # Here we retrieve all the sequences in the fasta file: rgdb.refget(checksum) # If you want it in fasta format there's a helper function for that: print(rgdb.fasta_fmt(rgdb.refget(checksum))) # You can limit recursion to get just the checksums for individual sequences, rather than the sequences themselves: rgdb.refget(checksum, reclimit=1) # The individual sequences are also retrievable independently because each sequence from the fasta file is stored as a primary unit. Test some single-sequence lookups from the database: rgdb.refget(content["chr1"]) rgdb.refget(trunc512_digest('ACGT')) # Now if we kill that object and create a new object using the same redis back-end, the data persists because it's stored in the redis back-end: rgdb = None rgdb = refget.RefDB(refget.RedisDict()) rgdb.refget(checksum) checksum rgdb.refget("3912dddce432f3085c6b4f72a644c4c4c73f07215a9679ce") # # Using MongoDB backend # import refget from refget import trunc512_digest from mongodict import MongoDict import pymongo pymongo.Connection = lambda host, port, **kwargs: pymongo.MongoClient(host=host, port=port) from platform import python_version python_version() my_dict = MongoDict(host='localhost', port=27017, database='my_dict', collection='store') my_dict rgdb = refget.RefDB(my_dict) fa_file = "demo_fasta/demo.fa" checksum, content = rgdb.load_fasta(fa_file) rgdb.refget(checksum) rgdb.refget(checksum, reclimit=1) fa_object = refget.parse_fasta(fa_file) content_checksums = {} for k in fa_object.keys(): seq = str(fa_object[k]) content_checksums[k] = {'length': len(seq), 'seq': rgdb.load_seq(seq)} content_checksums collection_string = ";".join(["{}:{}/{}".format(name, value["length"], value["seq"]) for name, value in content_checksums.items()]) collection_string
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + raw_mimetype="text/restructuredtext" active="" # .. _networkset: # | # | # Download This Notebook: :download:`NetworkSet.ipynb` # - # # NetworkSet # # # ## Introduction # # # # The [NetworkSet](../api/networkSet.rst) object represents an unordered set of networks. It # provides methods iterating and slicing the set, sorting by datetime, calculating statistical quantities, and displaying uncertainty bounds on plots. # # ## Creating a [NetworkSet](../api/networkSet.rst) # Lets take a look in the `data/` folder, there are some redundant measurements of a network called `ro`, which is a *radiating open* waveguide. # + active="" # ls data/ro* # # -a---- 14/02/2021 12:35 8031 ro,1.s1p # -a---- 14/02/2021 12:35 8030 ro,2.s1p # -a---- 14/02/2021 12:35 8031 ro,3.s1p # -a---- 14/02/2021 12:35 46592 ro_spreadsheet.xls # + [markdown] raw_mimetype="text/markdown" # The files `ro,1.s1p` , `ro,2.s1p`, ... are redundant measurements on # which we would like to calculate statistics using the [NetworkSet](../api/networkSet.rst) # class. # # A [NetworkSet](../api/networkSet.rst) is created from a list or dict of # [Network](../api/network.rst)'s. So first we need to load all of the # touchstone files into `Networks`. This can be done quickly with # `rf.read_all`, The argument `contains` is used to load only files # which match a given substring. # + import skrf as rf rf.read_all(rf.data.pwd, contains='ro') # - # This can be passed directly to the [NetworkSet](../api/networkSet.rst) constructor, # + from skrf import NetworkSet ro_dict = rf.read_all(rf.data.pwd, contains='ro') ro_ns = NetworkSet(ro_dict, name='ro set') ro_ns # - # A NetworkSet can also be constructed directly from a dir with `NetworkSet.from_dir()` or from a zipfile of touchstones through the class method `NetworkSet.from_zip()`. # ## Accessing Network Methods # The [Network](../api/network.rst) elements in a [NetworkSet](../api/networkSet.rst) can be accessed like the elements of list, ro_ns[0] # Most [Network](../api/network.rst) methods are also methods of # [NetworkSet](../api/networkSet.rst). These methods are called on each # [Network](../api/network.rst) element individually. For example to # plot the log-magnitude of the s-parameters of each Network. # + # %matplotlib inline from pylab import * import skrf as rf rf.stylely() ro_ns.plot_s_db() # - # ## Statistical Properties # # # Statistical quantities can be calculated by accessing # properties of the NetworkSet. To calculate the complex # average of the set, access the `mean_s` property ro_ns.mean_s # + raw_mimetype="text/restructuredtext" active="" # # .. note:: # # Because the statistical operator methods are generated upon initialization # their API is not explicitly documented in this manual. # - # # The naming convention of the statistical operator properties are `NetworkSet.{function}_{parameter}`, where `function` is the name of the # statistical function, and `parameter` is the Network parameter to operate # on. These methods return a [Network](../api/network.rst) object, so they can be # saved or plotted in the same way as you would with a Network. # To plot the log-magnitude of the complex mean response ro_ns.mean_s.plot_s_db(label='ro') # Or to plot the standard deviation of the complex s-parameters, ro_ns.std_s.plot_s_re(y_label='Standard Deviations') # Using these properties it is possible to calculate statistical quantities on the scalar # components of the complex network parameters. To calculate the # mean of the phase component, ro_ns.mean_s_deg.plot_s_re() # ## Plotting Uncertainty Bounds # # # Uncertainty bounds can be plotted through the methods ro_ns.plot_uncertainty_bounds_s_db() ro_ns.plot_uncertainty_bounds_s_deg() # + raw_mimetype="text/restructuredtext" active="" # .. note:: # # The uncertainty bounds plotted above are calculated **after** # the complex number has been projected onto the specified scalar component. # Thus, the first plot represents uncerainty in the magnitude component **only**. # - # # ## Reading and Writing # To write all [Network](../api/network.rst)s of a [NetworkSet](../api/networkSet.rst) out to individual touchstones, ro_ns.write_touchstone(dir='data/') # For temporary data storage, [NetworkSet](../api/networkSet.rst)s can be saved and read from disk # using the functions `rf.read` and `rf.write` # # rf.write('ro set.ns', ro_ns) ro_ns = rf.read('ro set.ns') ro_ns # ## Export to Excel, csv, or html # [NetworkSet](../api/networkSet.rst)s can also be exported to other filetypes. The format of the output; real/imag, mag/phase is adjustable, as is the output type; csv, excel, html. For example to export mag/phase for each network into an Excel spreadsheet for your boss[s] ro_ns.write_spreadsheet('data/ro_spreadsheet.xls', form='db') # More info on this can be found in the function, `skrf.io.general.network_2_spreadsheet`
doc/source/tutorials/NetworkSet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Doc2Vec Searching of Lang Database # # In this recipe, I will demonstrate the use of the `gensim` package and a simple `doc2vec` model trained on stories from the Lang coloured fairy books to support semantic retrieval of fairy stories. # # The approach can be summarised as follows: # # - generate a vocabulary of terms representative of the search corpus; # - generate a vector space where each dimension is a word in the vocabulary; # - generate a vector for each document or search phrase; # - retrieve documents based on similarity between document vector and search phrase vector. # # The following recipe is inspired by [How to make a search engine on Movies Description](https://github.com/ppontisso/Text-Search-Engine-using-Doc2Vec-and-TF-IDF/blob/master/notebook.ipynb). # # See also https://www.kaggle.com/hgilles06/a-doc2vec-search-engine-cord19-new-version for ideas on a possible graphical user interface. # ## Connecting to the Database # # We're going to work with our Lang fairy story database, so let's set up a connection to it: # + from sqlite_utils import Database db_name = "demo.db" db = Database(db_name) # - # Let's remind ourselves of the database structure: print(db.schema) # Recall that we can perform a full text search: # + #q = 'king "three sons" gold' q = 'hansel witch' _q = f'SELECT title FROM books_fts WHERE books_fts MATCH {db.quote(q)} ;' for row in db.query(_q): print(row["title"]) # - # We can randomly sample a selection of rows with a query of the following form: # + # Via https://gist.github.com/alecco/9976dab8fda8256ed403054ed0a65d7b _q_random_sample = """ SELECT * FROM books WHERE rowid IN (SELECT rowid FROM books WHERE title NOT LIKE "Preface" ORDER BY random() LIMIT {}); """ for row in db.query(_q_random_sample.format(5)): print(row["title"]) # - # ## Simple Model # # We could use an off-the-shelf model to process documents, or we can train our own model from our own documents so that the word vectors are aligned to our dataset. In a large corpus, we can train on a sample of documents if they are representative of the whole. # # If we train against the whole dataset, we can search into the dataset directly from the model. If train the model on a partial collection, then we can only compare search phrases and documents that we have generated vectors for. # # To create the model, it helps if we clean the documents, e.g. by decasing, and removing punctuation: # + from gensim.parsing.preprocessing import preprocess_string from gensim.parsing.preprocessing import strip_tags, strip_punctuation, strip_numeric, remove_stopwords def clean_text(text): """Generate a cleaned, tokenised version of a text.""" CUSTOM_FILTERS = [lambda x: x.lower(), strip_tags, strip_punctuation, strip_numeric, remove_stopwords] return preprocess_string(text, CUSTOM_FILTERS) # - # Apply the cleaning function to the text on the way in to creating the training corpus: # + sample_corpus = db.query(_q_random_sample.format(9999)) sample_docs = [(clean_text(r['text']), f"{r['book']}::{r['title']}", #create a unique tag r['title']) for r in sample_corpus] # For the first doc, preview the first 5 cleaned words and title sample_docs[0][0][:5], sample_docs[0][1], sample_docs[0][2] # + # The gensim model needs named tuples # including at least a words and tags dimension # Naively we can just use a document index count as the tag from collections import namedtuple StoryDoc = namedtuple('StoryDoc', 'words tags title') sample_docs_training = [] for i, sample_doc in enumerate(sample_docs): sample_docs_training.append(StoryDoc(sample_doc[0], [sample_doc[1]], # This must be a list sample_doc[2])) # + from gensim.models import Doc2Vec # Define the parameters for building the model. # We can also pass a list of documents # via the first "documents" parameter # and the model will be trained against those. # Alternatively, create an empty model and train it later. model = Doc2Vec( # dm: training algorithm; # 1: distributed memory/PV-DM; # 0: distributed bag of words (PV-DBOW) dm=1, # vector_size: size of feature vectors vector_size=300, # window: max dist between current & predicted word window=10, # hs: 1: hierarchical softmax; # hs: 0 : negative sampling if negative hs=0, # min_count: ignore words w/ lower frequency # There is a risk to setting this too high # particularly if a search term is likely unique, # as it might be with a name. On the other hand, # for such situations, a simple search might be better? min_count=1, # sample: randomly downsample hi-frequency words # useful range: (0, 1e-5) sample=1e-5, ) # - # The model is built around a vocabulary extracted from the training document corpus. # Build the model vocabulary model.build_vocab(sample_docs_training) # We can now train the model (this may take some time for a large corpus): # It would be useful if we could display a progress bar for this model.train(sample_docs_training, total_examples=model.corpus_count, epochs=100, start_alpha=0.01, end_alpha=0.01) # Rather than creating a model each time we want to use it, we can save the model and then load it as required: # + # Save a model model.save("lang_model.gensim") # Load in a model model = Doc2Vec.load("lang_model.gensim") # - # To retrieve a document matching a search phrase, we need to encode the search phrase and then try to find a matching document: # + search_phrase = """ hansel and his sister were cast out by their wicked stepmother and went into forest and met an evil witch """ # Preprocess the search phrase tokens = clean_text(search_phrase) tokens # - # Generate a vector for the tokens: # Generate the vector representation from the model search_vector = model.infer_vector(tokens, alpha=0.001, steps = 50) # We can now search for related documents from the original training set based on how well their vectors match the vector generated for the search phrase: # + # Find the top 10 matches matches = model.docvecs.most_similar([search_vector], topn=10) # To rank every document from the training corpus # set: topn=model.docvecs.count # The response gives the original training document ids and match scores matches # - # Let's try another one: # + search_phrase = """ a poor orphan girl lives with her wicked stepmother and sisters but then her fairy godmother appears and she goes to a ball and leaves at midnight but loses her slipper then finally marries the prince """ # Preprocess the search phrase tokens = clean_text(search_phrase) search_vector = model.infer_vector(tokens, alpha=0.01, steps = 50) model.docvecs.most_similar([search_vector], topn=10) # - # Not that the result is stochastic (has a random element) in the way that the search vector is inferred: if you rerun the query, you will likely generate a different search vector. As a consequence, the search results returned are likely differ in their order and match scores each time the query is run. # ## Creating a Search Tool # # The next step is to register a custom SQLite function that will generate a vector for a search term and return matching records on that basis. # + from vtfunc import TableFunction class SQLite_doc2Vec_Model_Search(TableFunction): params = ['query', 'threshold'] columns = ['book', 'title', 'score'] name = 'doc2vec_model_search' model = Doc2Vec.load("lang_model.gensim") def initialize(self, query=None, num=None, threshold=None): tokens = clean_text(query) search_vector = model.infer_vector(tokens, alpha=0.01, steps = 50) scores = model.docvecs.most_similar([search_vector], topn=model.docvecs.count) if threshold: scores = [(t, s) for (t, s) in scores if s >= threshold ] self._iter = iter(scores) def iterate(self, idx): (tag, score) = next(self._iter) items = tag.split("::") return (items[0], items[1], score,) # And register the function SQLite_doc2Vec_Model_Search.register(db.conn) # - # The query searches over the model and can take various forms: # # - `doc2vec_model_search("search phrase")` # - `doc2vec_model_search("search phrase", MIN_SCORE)` # # For example: # + model_query = f""" SELECT * FROM doc2vec_model_search('''{search_phrase}''', 0.45 ); """ for i in db.execute(model_query): print(i) # - # ## Saving Model Vectors into the Database # # If we look at the object type of one of the model vectors, we see that it is a `numpy.ndarray`, which can be easily represented as a list: type(model.docvecs[0]) # We can store this data in the SQLite database as a `BLOB`. To simplify the process of converting the array into and out of the appropriate format for storage in the database compared to its use as a gensim vector, we can register a custom handler for the `numpy.ndarray` object: # + # Via https://stackoverflow.com/a/18622264/454773 # See also: https://github.com/joosephook/sqlite3-numpy import sqlite3 import numpy as np import io def adapt_array(arr): """ http://stackoverflow.com/a/31312102/190597 (SoulNibbler) """ out = io.BytesIO() np.save(out, arr) out.seek(0) return sqlite3.Binary(out.read()) def convert_array(text): out = io.BytesIO(text) out.seek(0) return np.load(out) # Converts np.array to TEXT when inserting sqlite3.register_adapter(np.ndarray, adapt_array) # Converts TEXT to np.array when selecting sqlite3.register_converter("array", convert_array) # - # Now we need to reset the database to a connection that supports the custom handler we have just registered: # Reset the database connection to used the parsed datatype db.conn = sqlite3.connect(db_name, detect_types=sqlite3.PARSE_DECLTYPES) # We can now create a table with a custom "array" datatype: # + # Give ourselves a clean slate db["story_vectors"].drop(ignore=True) # sqlite_utils doesn't appear to support custom types (yet?!) # The following errors on the "array" datatype """ db["story_vectors"].create({ "book": str, "title": str, "tag": str, # a unique key derived from book and title "vector": "array", }, pk=("book", "title"), # The following is not currently supported by sqlite_utils #foreign_keys=[ (("book", "title"), "books", ("book", "title"))] # local-table-id, foreign-table, foreign-table-id] ) """ # so we can create a table the old fashioned way... vector_table_create = """ CREATE TABLE story_vectors (tag TEXT PRIMARY KEY, vector array, book TEXT, title TEXT ); """ cur = db.conn.cursor() cur.execute(vector_table_create) # - # We can generate a list of dictionaries, one per record used to train the model, that can then be added directly to the `story_vectors` database table: # + xrecords = [] for tag in model.docvecs.doctags: xrecords.append({'book': tag.split('::')[0], 'title': tag.split('::')[1], 'tag': tag, 'vector':model.docvecs[tag]}) # - # And add the records directly to the database: db["story_vectors"].insert_all(xrecords) # Let's pull an example record back showing just the first few elements of the vector associated with the record: # + _q = f'SELECT * FROM story_vectors LIMIT 1;' for row in db.query(_q): print(row['tag'], row['book'], row['title'], row['vector'][:10]) # + import pandas as pd _q = f'SELECT * FROM story_vectors;' df = pd.read_sql(_q, db.conn) df.head(3) # - # We can get the cosine similarity for each row relative to a search vector using the `sklearn.metrics.pairwise.cosine_similarity` applied to a dataframe of vectors we want to match against. # # The `cosine_similarity()` function will happily accept two `pandas` dataframes, such as an N x M matrix of vectors we want to score against, and a 1 x M search vector matrics: # + from sklearn.metrics.pairwise import cosine_similarity # Grab the vectors and expand each on across columns match_vectors_df = df['vector'].apply(pd.Series) search_vector_df = pd.DataFrame(search_vector).T df['score'] = cosine_similarity(match_vectors_df, search_vector_df) df[df['score']>0.45].sort_values("score", ascending=False).head() # - # So it's easy enough to create a custom function to search over the vectors table rather than the model: # + class SQLite_doc2Vec_Table_Search(TableFunction): params = ['query', 'threshold'] columns = ['book', 'title', 'score'] name = 'doc2vec_search' # If we move this into the body, we can update the database # and respond to new rows added to story_vectors table _q = f'SELECT book, title, vector FROM story_vectors;' df = pd.read_sql(_q, db.conn) match_vectors_df = df['vector'].apply(pd.Series) def initialize(self, query=None, threshold=None): df = self.df tokens = clean_text(query) search_vector = model.infer_vector(tokens, alpha=0.01, steps = 50) search_vector_df = pd.DataFrame(search_vector).T # Find cosine similarity df['score'] = cosine_similarity(self.match_vectors_df, search_vector_df) # Apply minimum threshold if set _iterator = df[df['score']>=threshold] if threshold else df self._iter = _iterator[self.columns].itertuples(index=False, name=None) def iterate(self, idx): row = next(self._iter) return (row[0], row[1], row[2],) # And register the function SQLite_doc2Vec_Table_Search.register(db.conn) # - # Let's try it out: # + vector_query = f""" SELECT * FROM doc2vec_search("{search_phrase}") WHERE score>0.4 ORDER BY score DESC LIMIT 3; """ pd.read_sql(vector_query, db.conn) # - # Let's try another one: # + search_phrase2 = """ A girl lives with her grandmother and a wicked lord comes along and tells her she can't get married unless she spins a funeral shroud and a wedding gown made from nettles. As she spins, he gets ill, but can't die until she finishes it. """ vector_query = f""" SELECT * FROM doc2vec_search("{search_phrase2}") WHERE score>0.3 ORDER BY score DESC LIMIT 3; """ pd.read_sql(vector_query, db.conn) # + search_phrase3 = """ A girl in kitchen makes soup for king gets married. """ vector_query = f""" SELECT * FROM doc2vec_search("{search_phrase3}") WHERE score>0.3 ORDER BY score DESC LIMIT 10; """ pd.read_sql(vector_query, db.conn) # - # ## Add TF-IDF # # To improve the doc2vec performance, it might be worth exploring a model that has a stricter minimum frequency for words in the corpus, but that also mixes a TF-IDF (*term frequency, inverse document frequency*) component in the ranking score?
Lang_Doc2Vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + semantic_network = nx.DiGraph() relation_defs = {} with open("../data/umls/SRDEF") as fp: for line in fp: line = line.split("|") if line[0] == 'STY': semantic_network.add_node(line[2], description=line[4]) elif line[0] == 'RL': relation_defs[line[2]] = line[4] # - with open("../data/umls/SRSTR") as fp: for line in fp: src, rel, dst, st, _ = line.split("|") if src in semantic_network and dst in semantic_network: semantic_network.add_edge(src, dst, label=rel, status=st, description=relation_defs[rel]) # + from pydot import Dot, Node, Edge class JDot(Dot): def __init__(self, scale=1, rankdir='TB'): super().__init__(rankdir=rankdir) self.scale = scale def _repr_svg_(self): return self.create_svg().decode('utf8').replace("scale(1 1)", "scale(%f, %f)" % (self.scale, self.scale)) # + graph = JDot(scale=0.5, rankdir='TB') for node in semantic_network.nodes: graph.add_node(Node(node)) for u,v in semantic_network.edges: attrs = semantic_network.get_edge_data(u,v) if attrs['label'] == 'isa': graph.add_edge(Edge(src=u, dst=v)) # else: # graph.add_edge(Edge(src=u, dst=v, style='dashed', label=attrs['label'])) graph # -
notebooks/visualizing-umls.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <b>Traçar um esboço do gráfico e obter uma equação da parábola que satisfaça as condições dadas.</b> # <b>22. Foco: $F(4,-5)$; diretriz $d: y = 1$</b> # <b>Fazendo um esboço é possível perceber que a parábola é paralela ao eixo $y$, logo sua equação é dada por $(x-h)^2 = 2p(y-k)$</b><br><br> # <b>Sabendo que a distância da diretriz até o foco é $p$, podemos calcular sua distância para achar $\frac{p}{2}$ usando o ponto $P(4,1)$ da diretriz</b><br><br> # $p = \sqrt{(4-4)^2 + (-5-1)^2}$<br><br> # $p = \sqrt{0 + (-6)^2}$<br><br> # $p = \pm \sqrt{36}$<br><br> # $p = -6$<br><br> # $\frac{p}{2} = -3$<br><br> # <b>Somando $\frac{p}{2}$ no eixo $y$ da diretriz, obtemos as coordenadas do vértice</b><br><br> # <b>Vértice:</b> $V(4, -2)$<b><br><br> # <b>Substituindo agora os pontos dos vértice e o valor de $p$ na fórmula, temos que</b><br><br> # $(x-4)^2 = 2 \cdot -6 \cdot (y+2)$<br><br> # $(x-4)^2 = -12(y+2)$<br><br> # $x^2 - 8x + 16 = -12y - 24$<br><br> # $x^2 - 8x + 12y + 16 + 24$<br><br> # $x^2 - 8x + 12y + 40$<br><br> # <b>Gráfico da parábola</b> from sympy import * from sympy.plotting import plot_implicit x, y = symbols("x y") plot_implicit(Eq((x-4)**2, -12*(y+2)), (x,-20,20), (y,-20,20), title=u'Gráfico da parábola', xlabel='x', ylabel='y');
Problemas Propostos. Pag. 172 - 175/22.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: work3 # language: python # name: work3 # --- # # Imports # + from matplotlib import pyplot as plt from matplotlib import rcParams # %matplotlib inline import numpy as np # - # # Default Parameters # + figsize = (6, 4) # in inches fontsize = 10 save_dpi = 300 linewidth = 0.5 rcParams.update({ 'font.size' : fontsize, 'savefig.dpi' : 300, 'axes.linewidth' : 0.5, }) # - # # Data # + # data x = np.linspace(-2*np.pi, 2*np.pi, 50) y1 = np.sin(x) y2 = np.cos(x) # labeling ylabel = 'amplitude' # coloring colors = ['#2b8cbe', '#f03b20',] # - # # Beginner # ### plot this # <img src="beginner-line.png" alt="beginner" width='40%' align='left'/> # + # create figure and axis ''' hint - use the imported pyplot (plt) command to create a figure typically you can go about this using the subplots(rows, columns, figsize) function which gives you both the figure, and an array of axes or you can create the figure first using figure(), then add axes onto it using add_axes() or add_subplot() (not the lack of an 's') example fig, ax = plt.subplots(1, 1, figsize=figsize) ''' # plot waves ''' hint - use the X and Y (y1, y2) coordinates to plot the sine and cosine waves onto ax look at the ax.plot() documentation to see what arguments are necessary, and what can be customized https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html example - ax.plot(x, y) ''' # set labels ''' hint - set setters to set the x and y labels example - ax1.set_ylabel(<args>) ''' # - # # Advanced # <img src="advanced-line.png" alt="beginner" width='40%' align='left'/> # + # create figure and axis # create labels for each curve ''' hint - something as simple as making a list of names example - labels = ['sine', 'cosine'] ''' # use a for loop to change both the first and second axis, without having to repeat code ''' example - for i, y, color, label in zip(np.arange(2), [y1, y2], colors, labels): # the zip function packages similarly sized arrays (or lists) and indexes through each one simultaneously # so the first iteration contains # i=0, y=y1, color=colors[0], label=labels[0] # second iteration contains # i=1, y=y2, color=colors[1], label=labels[1] ''' # within for loop, plot the sine and cosine waves ''' hint - when plotting, you can either plot the line, points, or both example - plotting lines: ax.plot(x, y, ...) plotting points: ax.plot(x, y, linestyle='none', marker='o') plotting both: ax.plot(x, y, linestyle='-', marker='o') look at the ax.plot() documentation to see what is customizable https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html ''' # outside the loop, annotate the axes (means, going back a tab) # move the spines so they look less awkward ''' hint - use the ax.spines command to move around the axis borders (i.e. spines) example - ax.spines['bottom'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ''' # modify the x and y tick labels # y ticks are too cluttered, and change label # change x ticks to radians (pi) ''' hint - use setters, but in this case, the useful command called ax.locator_params() read the documentation for more info: https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.locator_params.html hint - when adding custom labels, you need to input a list of strings rather than an array of values. How matplotlib does this is that it first needs the coordinates of each strings position, afterwards, the list of strings are then placed at said coordinates example - # the coordinates for the x tick labels are at plus/minus 2 pi ax.set_xticks([-2*pi, 2*pi]) # because the coordinates are known (plus/minus 2 pi), the actual strings can then be placed ax.set_xticklabels(['-2$\pi$', '2$\pi$']) ''' # create legend from labels # - # # Save ''' - example fig.savefig('title', dpi=300, transparent=True) '''
exercise1-line_chart/exerise-line_chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import argparse import datetime import tensorflow as tf import yolo.config as cfg from yolo.yolo_net import YOLONet from utils.timer import Timer from utils.pascal_voc import pascal_voc import easydict import time slim = tf.contrib.slim print("ok") # + class Solver(object): def __init__(self, net, data): print("Solver init") self.net = net self.data = data self.weights_file = cfg.WEIGHTS_FILE self.max_iter = cfg.MAX_ITER self.initial_learning_rate = cfg.LEARNING_RATE self.decay_steps = cfg.DECAY_STEPS self.decay_rate = cfg.DECAY_RATE self.staircase = cfg.STAIRCASE self.summary_iter = cfg.SUMMARY_ITER self.save_iter = cfg.SAVE_ITER self.output_dir = os.path.join(cfg.OUTPUT_DIR, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) self.save_cfg() self.variable_to_restore = tf.global_variables() self.saver = tf.train.Saver(self.variable_to_restore, max_to_keep=None) self.ckpt_file = os.path.join(self.output_dir, 'yolo') self.summary_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(self.output_dir, flush_secs=60) self.global_step = tf.train.create_global_step() self.learning_rate = tf.train.exponential_decay( self.initial_learning_rate, self.global_step, self.decay_steps, self.decay_rate, self.staircase, name='learning_rate') self.optimizer = tf.train.GradientDescentOptimizer( learning_rate=self.learning_rate) self.train_op = slim.learning.create_train_op( self.net.total_loss, self.optimizer, global_step=self.global_step) gpu_options = tf.GPUOptions() config = tf.ConfigProto(gpu_options=gpu_options) self.sess = tf.Session(config=config) self.sess.run(tf.global_variables_initializer()) if self.weights_file is not None: print('Restoring weights from: ' + self.weights_file) self.saver.restore(self.sess, self.weights_file) self.writer.add_graph(self.sess.graph) print("Solver init done!") def train(self): print("Solver train() start") train_timer = Timer() load_timer = Timer() for step in range(1, self.max_iter + 1): print("step:%d"%step) time.sleep(5) load_timer.tic() images, labels = self.data.get() load_timer.toc() feed_dict = {self.net.images: images, self.net.labels: labels} if step % self.summary_iter == 0: if step % (self.summary_iter * 10) == 0: train_timer.tic() summary_str, loss, _ = self.sess.run( [self.summary_op, self.net.total_loss, self.train_op], feed_dict=feed_dict) train_timer.toc() log_str = '''{} Epoch: {}, Step: {}, Learning rate: {},''' ''' Loss: {:5.3f}\nSpeed: {:.3f}s/iter,''' '''' Load: {:.3f}s/iter, Remain: {}'''.format( datetime.datetime.now().strftime('%m-%d %H:%M:%S'), self.data.epoch, int(step), round(self.learning_rate.eval(session=self.sess), 6), loss, train_timer.average_time, load_timer.average_time, train_timer.remain(step, self.max_iter)) print(log_str) else: train_timer.tic() summary_str, _ = self.sess.run( [self.summary_op, self.train_op], feed_dict=feed_dict) train_timer.toc() self.writer.add_summary(summary_str, step) else: train_timer.tic() self.sess.run(self.train_op, feed_dict=feed_dict) train_timer.toc() if step % self.save_iter == 0: print('{} Saving checkpoint file to: {}'.format( datetime.datetime.now().strftime('%m-%d %H:%M:%S'), self.output_dir)) self.saver.save( self.sess, self.ckpt_file, global_step=self.global_step) def save_cfg(self): with open(os.path.join(self.output_dir, 'config.txt'), 'w') as f: cfg_dict = cfg.__dict__ for key in sorted(cfg_dict.keys()): if key[0].isupper(): cfg_str = '{}: {}\n'.format(key, cfg_dict[key]) f.write(cfg_str) # + def update_config_paths(data_dir, weights_file): cfg.DATA_PATH = data_dir cfg.PASCAL_PATH = os.path.join(data_dir, 'pascal_voc') cfg.CACHE_PATH = os.path.join(cfg.PASCAL_PATH, 'cache') cfg.OUTPUT_DIR = os.path.join(cfg.PASCAL_PATH, 'output') cfg.WEIGHTS_DIR = os.path.join(cfg.PASCAL_PATH, 'weights') #cfg.WEIGHTS_DIR = "data/weights" cfg.WEIGHTS_FILE = os.path.join(cfg.WEIGHTS_DIR, weights_file) #cfg.WEIGHTS_FILE = "data/weights/YOLO_small.ckpt" print("ok") # + def main(): ''' parser = argparse.ArgumentParser() parser.add_argument('--weights', default="YOLO_small.ckpt", type=str) parser.add_argument('--data_dir', default="data", type=str) parser.add_argument('--threshold', default=0.2, type=float) parser.add_argument('--iou_threshold', default=0.5, type=float) parser.add_argument('--gpu', default='', type=str) args = parser.parse_args() args = easydict.EasyDict( { "weights":"YOLO_small.ckpt", "data_dir":"data", "threshold":0.2, "gpu":"", "iou_threshold":0.5 }) if args.gpu is not None: cfg.GPU = args.gpu if args.data_dir != cfg.DATA_PATH: update_config_paths(args.data_dir, args.weights) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU ''' yolo = YOLONet() print("build yolo model done") pascal = pascal_voc('train') solver = Solver(yolo, pascal) print('Start training ...') #solver.train() print("假装已经训练玩啦") print('Done training.') print("ok") # python train.py --weights YOLO_small.ckpt --gpu 0 # - main()
train_ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Futures # + # Import libraries import xarray as xr import numpy as np import pandas as pd import qnt.output as qnout import qnt.ta as qnta import qnt.data as qndata import qnt.stepper as qnstepper import qnt.stats as qnstats import qnt.graph as qngraph import datetime as dt import plotly.graph_objs as go import xarray.ufuncs as xruf import time # - # #### Load futures data # Quantnet provides data for 39 global derivatives. # The underlying assets are currencies, cross-rates, indices, bonds, energy and metals from the world's futures exchanges. # # Suppose we want to download the data for the last 4 years. One can use the following function: fut_data = qndata.futures.load_data(tail = dt.timedelta(days = 4*365), dims = ("time", "field", "asset"), forward_order = True) # The complete list fut_data.asset # + # we can see historical data on a chart trend_fig = [ go.Scatter( x = fut_data.sel(asset = '6B').sel(field = 'close').to_pandas().index, y = fut_data.sel(asset = '6B').sel(field = 'close'), line = dict(width=1,color='black'))] # draw chart fig = go.Figure(data = trend_fig) fig.update_yaxes(fixedrange=False) # unlock vertical scrolling fig.show() # - # ## Weights allocation # Say we want to implement technical analysis to futures. We can select trade based strategy, described in details [here](https://quantnet.ai/referee/template/14261892/html). # Thus we need at list two functions - calc_positions and calc_output_all: def calc_positions(futures, ma_periods, roc_periods, sideways_threshold): """ Calculates positions for given data(futures) and parameters """ close = futures.sel(field='close') # calculate MA ma = qnta.lwma(close, ma_periods) # calcuate ROC roc = qnta.roc(ma, roc_periods) # positive trend direction positive_trend = roc > sideways_threshold # negtive trend direction negative_trend = roc < -sideways_threshold # sideways sideways_trend = abs(roc) <= sideways_threshold # We suppose that a sideways trend after a positive trend is also positive side_positive_trend = positive_trend.where(sideways_trend == False).ffill('time').fillna(False) # and a sideways trend after a negative trend is also negative side_negative_trend = negative_trend.where(sideways_trend == False).ffill('time').fillna(False) # define signals buy_signal = positive_trend buy_stop_signal = side_negative_trend sell_signal = negative_trend sell_stop_signal = side_positive_trend # calc positions position = close.copy(True) position[:] = np.nan position = xr.where(buy_signal, 1, position) position = xr.where(sell_signal, -1, position) position = xr.where(xruf.logical_and(buy_stop_signal, position.ffill('time') > 0), 0, position) position = xr.where(xruf.logical_and(sell_stop_signal, position.ffill('time') < 0), 0, position) position = position.ffill('time').fillna(0) return position def calc_output_all(data, params): positions = data.sel(field='close').copy(True) positions[:] = np.nan i = 0 st = time.time() for futures_name in params.keys(): i += 1 if i % 300 == 0: print(i, "/", len(data.asset), time.time() - st) p = params[futures_name] futures_data = data.sel(asset=futures_name).dropna('time','any') p = calc_positions(futures_data, p['ma_periods'], p['roc_periods'], p['sideways_threshold']) positions.loc[{'asset':futures_name, 'time':p.time}] = p return positions # + # say we select futures and their parameters for technical algorithm params = {'F2MX': {'ma_periods': 10, 'roc_periods': 26, 'sideways_threshold': 5.0}, 'FGBX': {'ma_periods': 138, 'roc_periods': 49, 'sideways_threshold': 2.0}, 'N1U': {'ma_periods': 138, 'roc_periods': 25, 'sideways_threshold': 1.25}} futures_list = list(params.keys()) #form the output output = calc_output_all(fut_data.sel(asset = futures_list), params) # - # ## Statistics def print_stat(stat): """Prints selected statistical key indicators: - the global Sharpe ratio of the strategy; - the global mean profit; - the global volatility; - the maximum drawdown. Note that Sharpe ratio, mean profit and volatility apply to max simulation period, and not to the rolling basis of 3 years. """ days = len(stat.coords["time"]) returns = stat.loc[:, "relative_return"] equity = stat.loc[:, "equity"] sharpe_ratio = qnstats.calc_sharpe_ratio_annualized( returns, max_periods=days, min_periods=days).to_pandas().values[-1] profit = (qnstats.calc_mean_return_annualized( returns, max_periods=days, min_periods=days).to_pandas().values[-1])*100.0 volatility = (qnstats.calc_volatility_annualized( returns, max_periods=days, min_periods=days).to_pandas().values[-1])*100.0 max_ddown = (qnstats.calc_max_drawdown( qnstats.calc_underwater(equity)).to_pandas().values[-1])*100.0 print("Sharpe Ratio : ", "{0:.3f}".format(sharpe_ratio)) print("Mean Return [%] : ", "{0:.3f}".format(profit)) print("Volatility [%] : ", "{0:.3f}".format(volatility)) print("Maximum Drawdown [%] : ", "{0:.3f}".format(-max_ddown)) stat = qnstats.calc_stat(fut_data, output, slippage_factor=0.05) print_stat(stat) # show plot with profit and losses: performance = stat.to_pandas()["equity"] qngraph.make_plot_filled(performance.index, performance, name="PnL (Equity)", type="log") qnout.write(output)
strategy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # $$ # \newcommand{\mat}[1]{\boldsymbol {#1}} # \newcommand{\mattr}[1]{\boldsymbol {#1}^\top} # \newcommand{\matinv}[1]{\boldsymbol {#1}^{-1}} # \newcommand{\vec}[1]{\boldsymbol {#1}} # \newcommand{\vectr}[1]{\boldsymbol {#1}^\top} # \newcommand{\rvar}[1]{\mathrm {#1}} # \newcommand{\rvec}[1]{\boldsymbol{\mathrm{#1}}} # \newcommand{\diag}{\mathop{\mathrm {diag}}} # \newcommand{\set}[1]{\mathbb {#1}} # \newcommand{\norm}[1]{\left\lVert#1\right\rVert} # \newcommand{\pderiv}[2]{\frac{\partial #1}{\partial #2}} # \newcommand{\bb}[1]{\boldsymbol{#1}} # $$ # # ACISDetector With CSV # <a id=ACISDetectorCSV></a> # In this part, we will examine our algorithms to classify binaries. Additionally, we'll write training loops and implement a modular model trainer. We'll use a few configurations for classifying architecture and instruction set with [isadetect]( https://github.com/kairis/isadetect) CSV Datasets. # # + import os import re import sys import glob import numpy as np import matplotlib.pyplot as plt import torch # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - plt.rcParams.update({'font.size': 12}) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Using device:', device) # ## Architecture and instruction set # <a id=arch_is></a> # <!-- isadetect @inproceedings{kairajarvi2020isadetect,author={<NAME>}, # title={{ISAdetect: Usable Automated Detection of CPU Architecture and Endianness for Executable Binary Files and Object Code}}, # booktitle={Proceedings of the Tenth ACM Conference on Data and Application Security and Privacy}, # yar={2020}, url="https://doi.org/10.1145/3374664.3375742"} --> import isadetect.helpers as isa_api import src.arch_classifier as arch_api import src.hyperparams as hp from src.binary_dataset import FeatureDataset,DatasetToTuple import src.arch_trainer as training # ## Preprocessing # + import pathlib import urllib import shutil DOWNLOAD_URL = 'https://github.com/kfirgirstein/ACISDetector/releases/download/Dataset/ISAdetect_only_code_sections_features.csv' DATA_DIR = pathlib.Path.home().joinpath('.pytorch-datasets') def download_dataset(out_path=DATA_DIR, url=DOWNLOAD_URL, force=False): pathlib.Path(out_path).mkdir(exist_ok=True) out_filename = os.path.join(out_path, os.path.basename(url)) if os.path.isfile(out_filename) and not force: print(f'Dataset file {out_filename} exists, skipping download.') else: print(f'Downloading {url}...') with urllib.request.urlopen(url) as response, open(out_filename, 'wb') as out_file: shutil.copyfileobj(response, out_file) print(f'Saved to {out_filename}.') return out_filename DATASET_FILE = download_dataset() # + binary_dataset = FeatureDataset(DATASET_FILE) N = len(binary_dataset) batch_size = 32 print(f'features length: {N}') train_length = int(0.7* N) test_length = N - train_length ds_train,ds_test = torch.utils.data.random_split(binary_dataset,(train_length,test_length)) print(f'Train: {len(ds_train)} samples') print(f'Test: {len(ds_test)} samples') dl_train = torch.utils.data.DataLoader(ds_train,batch_size=batch_size, shuffle=True) dl_test = torch.utils.data.DataLoader(ds_test,batch_size=batch_size, shuffle=True) x0,y0 = ds_train[0] dataset_shape = (x0.shape if x0.dim() > 0 else 1),(y0.shape if y0.dim() > 0 else 1) print('input size =', dataset_shape[0], "X",dataset_shape[1] ) # - # ## Training # <a id=part2_3></a> # in order to plot our result and to compare them, we will use plot.py # and then we'll use the following function to load multiple experiment results and plot them together. # + from jupyter_utils.plot import plot_fit, plot_exp_results, plot_residuals fig = None fit_res = [] num_classes = 24 in_size = dataset_shape[0][0] help(plot_fit) # + rf_hp = hp.random_forest_hp() print(rf_hp) _randomForest = arch_api.RandomForest(in_estimators = rf_hp["estimators"] , in_max_depth = batch_size,random_state= rf_hp["random_state"] , n_jobs = rf_hp["n_jobs"]) print(_randomForest) rf_dataset = binary_dataset[ds_train.indices] _randomForest.fit(*rf_dataset) print("RandomForest modle is fiting now!") # - loss_fn = torch.nn.MSELoss() rf_ev = _randomForest.evaluate(dl_test,loss_fn) print("Avg Loss: ",sum(rf_ev['losses'])/len(rf_ev['losses']),"Accuracy: ",rf_ev['accuracy']) # + _isa_mlp = arch_api.ISADetectLogisticRegression(in_size,num_classes) optimizer = torch.optim.Adam(_isa_mlp.parameters(), lr=0.01) loss_fn = torch.nn.CrossEntropyLoss() trainer = training.ArchTrainer(_isa_mlp, loss_fn, optimizer, device) print(_isa_mlp) fit_res.append({"legend":"ISADetectLogisticRegression","result":trainer.fit(dl_train,dl_test,num_epochs = 10,print_every=100)}) print("ISADetectLogisticRegression modle is fiting now!") # + mlp_hp = hp.mlp_hp() print(mlp_hp) _mlp = arch_api.MLP(in_size,num_classes,mlp_hp['hidden_size']) optimizer = torch.optim.Adam(_mlp.parameters(), lr=mlp_hp['lr']) loss_fn = torch.nn.CrossEntropyLoss() trainer = training.ArchTrainer(_mlp, loss_fn, optimizer, device) print(_mlp) fit_res.append({"legend":"MLP","result":trainer.fit(dl_train,dl_test,num_epochs = 10,print_every=2)}) print("MLP modle is fiting now!") # - for elem in fit_res: fig, axes = plot_fit(elem['result'], fig, legend = elem['legend'],log_loss=True) fig = []
ACIS_CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Law of Large Numbers # ## Author: <NAME> # # Suppose an experiment has a finite discrete set of numerical outcomes, e.g., the roll of a die. Suppose further that each outcome has an associated probability, so that based on this information one can compute the `expected value` of the experiment. Suppose one repeats the experiment a certain number of times (say $n$) and computes the average value of the outcomes (denoted by $X_n$). Then the `Law of Large Numbers` in its weak form states that as $n$ tends to infinity, the value $X_n$ converges to the `expected value`. The term "convergence" has a precise mathematical formulation that I will skip here. # # I illustrated this phenomenon with the help of a simple experiment below. # Experiment information - outcomes and the probability distribution outcomes <- c(1:10) # the vector of outcomes vec <- sample(c(1:1000), size=length(outcomes), replace=TRUE) prob_distn <- vec/sum(vec) # the associated probability distribution # + # Verify if the outcomes conform with the probability distribution trialLength <- 10000 # the number of trials for the verification distributionTable <- integer(trialLength) for (i in c(1:trialLength)) { distributionTable[i] <- sample(outcomes, size=1, prob=prob_distn, replace=TRUE) } computed_prob <- round(table(distributionTable)/trialLength, 2) computed_prob_df <- cbind("Outcome"=outcomes, "Computed Probability"=as.numeric(computed_prob), "Actual Probability"=round(prob_distn, 2)) computed_prob_df # there can be a slight discrepancy due to the rounding of the values # - # Compute the expected value expected_value <- sum(outcomes*prob_distn) expected_value # Define a function to compute the average for a certain number of trials compute_avg <- function(n_trials, outcomes, prob_distn) { trials <- integer(n_trials) for (i in c(1:n_trials)) { trials[i] <- sample(outcomes, size=1, prob = prob_distn, replace=TRUE) } avg <- sum(trials)/length(trials) return(avg) } # First check - number of trials = 100 error1 <- abs(expected_value - compute_avg(100, outcomes, prob_distn)) error1 # Second check - number of trials = 10000 error2 <- abs(expected_value - compute_avg(10000, outcomes, prob_distn)) error2 # + # Compute the absolute value of error as a function of the number of trials range <- seq(from=100, to=10000, by=10) error_vector <- numeric(length(range)) index <- 1 for (n in range) { error_vector[index] <- abs(expected_value - compute_avg(n, outcomes, prob_distn)) index = index+1 } # - # Plot the error as a function of the number of trials plot(x = range, y = error_vector, type="l", main="Absolute Value of Error as a Function", xlab="Number of Trials", ylab="Absolute Value of Error") # Observe how the absolute value of the error tends to decrease as the number of trials increases.
StatMathMethods/R/LawOfLargeNumbers.ipynb